Kernel: Rewrite all kernel mutexes

Now SpinLock is actually just a spin lock and I added a Mutex that
does the same as the old "SpinLock". This is in preparation for
starting to support smp and making the kernel smp safe. This commit
also removes obsolete PageTableScope and CriticalScope which should
now be used by alternative APIs.
This commit is contained in:
Bananymous 2024-02-25 21:29:43 +02:00
parent 6ebfe05fce
commit 40b626b0aa
83 changed files with 825 additions and 703 deletions

View File

@ -4,7 +4,6 @@
#ifdef __is_kernel
#include <kernel/FS/VirtualFileSystem.h>
#include <kernel/Memory/PageTableScope.h>
#include <kernel/Process.h>
#endif
@ -26,7 +25,7 @@ namespace LibELF
BAN::Vector<uint8_t> buffer;
TRY(buffer.resize(inode->size()));
TRY(inode->read(0, buffer.data(), inode->size()));
TRY(inode->read(0, { buffer.data(), inode->size() }));
ELF* elf_ptr = new ELF(BAN::move(buffer));
if (elf_ptr == nullptr)

View File

@ -1,7 +1,6 @@
#include <BAN/ScopeGuard.h>
#include <kernel/CriticalScope.h>
#include <kernel/Memory/Heap.h>
#include <kernel/LockGuard.h>
#include <kernel/Lock/LockGuard.h>
#include <LibELF/LoadableELF.h>
#include <LibELF/Values.h>
@ -315,12 +314,9 @@ namespace LibELF
if (paddr == 0)
return BAN::Error::from_errno(ENOMEM);
{
CriticalScope _;
PageTable::map_fast_page(paddr);
PageTable::with_fast_page(paddr, [&] {
memcpy(PageTable::fast_page_as_ptr(), (void*)(start + i * PAGE_SIZE), PAGE_SIZE);
PageTable::unmap_fast_page();
}
});
new_page_table.map_page_at(paddr, start + i * PAGE_SIZE, flags);
elf->m_physical_page_count++;

View File

@ -68,7 +68,8 @@ set(KERNEL_SOURCES
kernel/Random.cpp
kernel/Scheduler.cpp
kernel/Semaphore.cpp
kernel/SpinLock.cpp
kernel/Lock/Mutex.cpp
kernel/Lock/SpinLock.cpp
kernel/SSP.cpp
kernel/Storage/ATA/AHCI/Controller.cpp
kernel/Storage/ATA/AHCI/Device.cpp

View File

@ -1,7 +1,7 @@
#include <kernel/Arch.h>
#include <kernel/CPUID.h>
#include <kernel/InterruptController.h>
#include <kernel/LockGuard.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/kmalloc.h>
#include <kernel/Memory/PageTable.h>
@ -22,6 +22,8 @@ namespace Kernel
static bool s_has_nxe = false;
static bool s_has_pge = false;
SpinLock PageTable::s_fast_page_lock;
// PML4 entry for kernel memory
static paddr_t s_global_pml4e = 0;

View File

@ -1,6 +1,4 @@
#include <kernel/LockGuard.h>
#include <kernel/Panic.h>
#include <kernel/SpinLock.h>
#define ATEXIT_MAX_FUNCS 128
@ -115,21 +113,17 @@ void __cxa_finalize(void *f)
namespace __cxxabiv1
{
/* guard variables */
static Kernel::SpinLock s_spin_lock;
/* The ABI requires a 64-bit type. */
__extension__ typedef int __guard __attribute__((mode(__DI__)));
int __cxa_guard_acquire (__guard* g)
{
Kernel::LockGuard lock_guard(s_spin_lock);
return !*(int*)g;
}
void __cxa_guard_release (__guard* g)
{
Kernel::LockGuard lock_guard(s_spin_lock);
*(int*)g = 1;
}

View File

@ -2,6 +2,7 @@
#include <BAN/Vector.h>
#include <kernel/InterruptController.h>
#include <kernel/Lock/SpinLock.h>
#include <kernel/Memory/Types.h>
namespace Kernel
@ -58,6 +59,7 @@ namespace Kernel
BAN::Vector<IOAPIC> m_io_apics;
uint8_t m_irq_overrides[0x100] {};
uint8_t m_reserved_gsis[0x100 / 8] {};
SpinLock m_lock;
};
}

View File

@ -1,30 +0,0 @@
#pragma once
#include <BAN/NoCopyMove.h>
#include <stddef.h>
namespace Kernel
{
class CriticalScope
{
BAN_NON_COPYABLE(CriticalScope);
BAN_NON_MOVABLE(CriticalScope);
public:
CriticalScope()
{
asm volatile("pushf; cli; pop %0" : "=r"(m_flags) :: "memory");
}
~CriticalScope()
{
asm volatile("push %0; popf" :: "rm"(m_flags) : "memory", "cc");
}
private:
size_t m_flags;
};
}

View File

@ -1,32 +1,33 @@
#pragma once
#include <BAN/Formatter.h>
#include <kernel/Lock/SpinLock.h>
#define dprintln(...) \
do { \
Debug::DebugLock::lock(); \
Debug::s_debug_lock.lock(); \
Debug::print_prefix(__FILE__, __LINE__); \
BAN::Formatter::print(Debug::putchar, __VA_ARGS__); \
BAN::Formatter::print(Debug::putchar, "\r\n"); \
Debug::DebugLock::unlock(); \
Debug::s_debug_lock.unlock(); \
} while(false)
#define dwarnln(...) \
do { \
Debug::DebugLock::lock(); \
Debug::s_debug_lock.lock(); \
BAN::Formatter::print(Debug::putchar, "\e[33m"); \
dprintln(__VA_ARGS__); \
BAN::Formatter::print(Debug::putchar, "\e[m"); \
Debug::DebugLock::unlock(); \
Debug::s_debug_lock.unlock(); \
} while(false)
#define derrorln(...) \
do { \
Debug::DebugLock::lock(); \
Debug::s_debug_lock.lock(); \
BAN::Formatter::print(Debug::putchar, "\e[31m"); \
dprintln(__VA_ARGS__); \
BAN::Formatter::print(Debug::putchar, "\e[m"); \
Debug::DebugLock::unlock(); \
Debug::s_debug_lock.unlock(); \
} while(false)
#define dprintln_if(cond, ...) \
@ -55,10 +56,5 @@ namespace Debug
void putchar(char);
void print_prefix(const char*, int);
class DebugLock
{
public:
static void lock();
static void unlock();
};
extern Kernel::RecursiveSpinLock s_debug_lock;
}

View File

@ -29,7 +29,7 @@ namespace Kernel
{ }
private:
mutable SpinLock m_device_lock;
mutable Mutex m_device_lock;
BAN::Vector<BAN::RefPtr<Device>> m_devices;

View File

@ -106,7 +106,7 @@ namespace Kernel
};
private:
RecursiveSpinLock m_lock;
Mutex m_mutex;
BAN::RefPtr<BlockDevice> m_block_device;

View File

@ -9,7 +9,7 @@
#include <kernel/API/DirectoryEntry.h>
#include <kernel/Credentials.h>
#include <kernel/SpinLock.h>
#include <kernel/Lock/Mutex.h>
#include <sys/socket.h>
#include <sys/types.h>
@ -157,7 +157,7 @@ namespace Kernel
virtual BAN::ErrorOr<long> ioctl_impl(int request, void* arg) { return BAN::Error::from_errno(ENOTSUP); }
protected:
mutable RecursivePrioritySpinLock m_lock;
mutable Mutex m_mutex;
private:
BAN::WeakPtr<SharedFileData> m_shared_region;

View File

@ -2,7 +2,6 @@
#include <kernel/FS/Inode.h>
#include <kernel/Semaphore.h>
#include <kernel/SpinLock.h>
namespace Kernel
{

View File

@ -4,9 +4,8 @@
#include <BAN/Iteration.h>
#include <kernel/FS/FileSystem.h>
#include <kernel/FS/TmpFS/Inode.h>
#include <kernel/LockGuard.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/PageTable.h>
#include <kernel/SpinLock.h>
namespace Kernel
{
@ -119,7 +118,7 @@ namespace Kernel
private:
const dev_t m_rdev;
RecursiveSpinLock m_lock;
Mutex m_mutex;
BAN::HashMap<ino_t, BAN::RefPtr<TmpInode>> m_inode_cache;
BAN::RefPtr<TmpDirectoryInode> m_root_inode;
@ -155,7 +154,7 @@ namespace Kernel
template<TmpFuncs::with_block_buffer_callback F>
void TmpFileSystem::with_block_buffer(size_t index, F callback)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
paddr_t block_paddr = find_block(index);
PageTable::with_fast_page(block_paddr, [&] {
BAN::ByteSpan buffer(reinterpret_cast<uint8_t*>(PageTable::fast_page()), PAGE_SIZE);
@ -166,7 +165,7 @@ namespace Kernel
template<TmpFuncs::for_each_inode_callback F>
void TmpFileSystem::for_each_inode(F callback)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
for (auto& [_, inode] : m_inode_cache)
{
switch (callback(inode))

View File

@ -3,7 +3,7 @@
#include <BAN/String.h>
#include <BAN/Vector.h>
#include <kernel/FS/FileSystem.h>
#include <kernel/SpinLock.h>
#include <kernel/Lock/Mutex.h>
namespace Kernel
{
@ -42,7 +42,7 @@ namespace Kernel
MountPoint* mount_from_root_inode(BAN::RefPtr<Inode>);
private:
SpinLock m_lock;
Mutex m_mutex;
FileSystem* m_root_fs = nullptr;
BAN::Vector<MountPoint> m_mount_points;
};

View File

@ -20,6 +20,8 @@ namespace Kernel::Input
KeyboardLayout();
private:
SpinLock m_lock;
BAN::Array<Key, 0xFF> m_keycode_to_key_normal;
BAN::Array<Key, 0xFF> m_keycode_to_key_shift;
BAN::Array<Key, 0xFF> m_keycode_to_key_altgr;

View File

@ -5,7 +5,6 @@
#include <kernel/Device/Device.h>
#include <kernel/Input/PS2/Config.h>
#include <kernel/InterruptController.h>
#include <kernel/SpinLock.h>
namespace Kernel::Input
{
@ -60,7 +59,8 @@ namespace Kernel::Input
private:
BAN::RefPtr<PS2Device> m_devices[2];
RecursiveSpinLock m_lock;
Mutex m_mutex;
RecursiveSpinLock m_cmd_lock;
BAN::CircularQueue<Command, 128> m_command_queue;
uint64_t m_command_send_time { 0 };

View File

@ -45,6 +45,7 @@ namespace Kernel::Input
PS2Keymap m_keymap;
Semaphore m_semaphore;
SpinLock m_event_lock;
protected:
virtual BAN::ErrorOr<size_t> read_impl(off_t, BAN::ByteSpan) override;

View File

@ -38,6 +38,7 @@ namespace Kernel::Input
BAN::CircularQueue<MouseEvent, 128> m_event_queue;
SpinLock m_event_lock;
Semaphore m_semaphore;
protected:

View File

@ -0,0 +1,54 @@
#pragma once
#include <BAN/Atomic.h>
#include <BAN/NoCopyMove.h>
#include <sys/types.h>
namespace Kernel
{
class Mutex
{
BAN_NON_COPYABLE(Mutex);
BAN_NON_MOVABLE(Mutex);
public:
Mutex() = default;
void lock();
bool try_lock();
void unlock();
pid_t locker() const { return m_locker; }
bool is_locked() const { return m_locker != -1; }
uint32_t lock_depth() const { return m_lock_depth; }
private:
BAN::Atomic<pid_t> m_locker { -1 };
uint32_t m_lock_depth { 0 };
};
class PriorityMutex
{
BAN_NON_COPYABLE(PriorityMutex);
BAN_NON_MOVABLE(PriorityMutex);
public:
PriorityMutex() = default;
void lock();
bool try_lock();
void unlock();
pid_t locker() const { return m_locker; }
bool is_locked() const { return m_locker != -1; }
uint32_t lock_depth() const { return m_lock_depth; }
private:
BAN::Atomic<pid_t> m_locker { -1 };
uint32_t m_lock_depth { 0 };
BAN::Atomic<uint32_t> m_queue_depth { 0 };
};
}

View File

@ -0,0 +1,54 @@
#pragma once
#include <BAN/Atomic.h>
#include <BAN/NoCopyMove.h>
#include <sys/types.h>
namespace Kernel
{
class SpinLock
{
BAN_NON_COPYABLE(SpinLock);
BAN_NON_MOVABLE(SpinLock);
public:
SpinLock() = default;
void lock();
bool try_lock();
void unlock();
pid_t locker() const { return m_locker; }
bool is_locked() const { return m_locker != -1; }
uint32_t lock_depth() const { return is_locked(); }
private:
BAN::Atomic<pid_t> m_locker { -1 };
uintptr_t m_flags { 0 };
};
class RecursiveSpinLock
{
BAN_NON_COPYABLE(RecursiveSpinLock);
BAN_NON_MOVABLE(RecursiveSpinLock);
public:
RecursiveSpinLock() = default;
void lock();
bool try_lock();
void unlock();
pid_t locker() const { return m_locker; }
bool is_locked() const { return m_locker != -1; }
uint32_t lock_depth() const { return m_lock_depth; }
private:
BAN::Atomic<pid_t> m_locker { -1 };
uint32_t m_lock_depth { 0 };
uintptr_t m_flags { 0 };
};
}

View File

@ -3,8 +3,8 @@
#include <BAN/NoCopyMove.h>
#include <BAN/Vector.h>
#include <kernel/Lock/SpinLock.h>
#include <kernel/Memory/PhysicalRange.h>
#include <kernel/SpinLock.h>
namespace Kernel
{

View File

@ -2,9 +2,8 @@
#include <BAN/Errors.h>
#include <BAN/Traits.h>
#include <kernel/CriticalScope.h>
#include <kernel/Lock/SpinLock.h>
#include <kernel/Memory/Types.h>
#include <kernel/SpinLock.h>
namespace Kernel
{
@ -15,6 +14,12 @@ namespace Kernel
requires BAN::is_same_v<decltype(func()), void>;
};
template<typename F>
concept with_fast_page_callback_error = requires(F func)
{
requires BAN::is_same_v<decltype(func()), BAN::ErrorOr<void>>;
};
class PageTable
{
public:
@ -37,19 +42,30 @@ namespace Kernel
static PageTable& kernel();
static PageTable& current();
static void map_fast_page(paddr_t);
static void unmap_fast_page();
static constexpr vaddr_t fast_page() { return KERNEL_OFFSET; }
public:
template<with_fast_page_callback F>
static void with_fast_page(paddr_t paddr, F callback)
{
CriticalScope _;
s_fast_page_lock.lock();
map_fast_page(paddr);
callback();
unmap_fast_page();
s_fast_page_lock.unlock();
}
template<with_fast_page_callback_error F>
static BAN::ErrorOr<void> with_fast_page(paddr_t paddr, F callback)
{
s_fast_page_lock.lock();
map_fast_page(paddr);
auto ret = callback();
unmap_fast_page();
s_fast_page_lock.unlock();
return ret;
}
static constexpr vaddr_t fast_page() { return KERNEL_OFFSET; }
// FIXME: implement sized checks, return span, etc
static void* fast_page_as_ptr(size_t offset = 0)
{
@ -110,9 +126,13 @@ namespace Kernel
void prepare_fast_page();
static void invalidate(vaddr_t);
static void map_fast_page(paddr_t);
static void unmap_fast_page();
private:
paddr_t m_highest_paging_struct { 0 };
mutable RecursiveSpinLock m_lock;
static SpinLock s_fast_page_lock;
};
static constexpr size_t range_page_count(vaddr_t start, size_t bytes)

View File

@ -1,33 +0,0 @@
#pragma once
#include <kernel/CriticalScope.h>
#include <kernel/LockGuard.h>
#include <kernel/Memory/PageTable.h>
namespace Kernel
{
class PageTableScope
{
public:
PageTableScope(PageTable& page_table)
: m_guard(page_table)
, m_old(PageTable::current())
, m_temp(page_table)
{
if (&m_old != &m_temp)
m_temp.load();
}
~PageTableScope()
{
if (&m_old != &m_temp)
m_old.load();
}
private:
LockGuard<PageTable> m_guard;
CriticalScope m_scope;
PageTable& m_old;
PageTable& m_temp;
};
}

View File

@ -51,7 +51,8 @@ namespace Kernel
};
private:
SpinLock m_lock;
SpinLock m_pending_lock;
SpinLock m_table_lock;
BAN::HashMap<BAN::IPv4Address, BAN::MACAddress> m_arp_table;

View File

@ -67,6 +67,8 @@ namespace Kernel
bool m_has_eerprom { false };
private:
SpinLock m_lock;
BAN::UniqPtr<DMARegion> m_rx_buffer_region;
BAN::UniqPtr<DMARegion> m_tx_buffer_region;
BAN::UniqPtr<DMARegion> m_rx_descriptor_region;

View File

@ -12,7 +12,6 @@
#include <kernel/Networking/NetworkLayer.h>
#include <kernel/Networking/NetworkSocket.h>
#include <kernel/Process.h>
#include <kernel/SpinLock.h>
namespace Kernel
{
@ -68,7 +67,8 @@ namespace Kernel
};
private:
RecursiveSpinLock m_lock;
RecursiveSpinLock m_packet_lock;
RecursiveSpinLock m_socket_lock;
BAN::UniqPtr<ARPTable> m_arp_table;
Process* m_process { nullptr };

View File

@ -119,7 +119,6 @@ namespace Kernel
uint64_t m_time_wait_start_ms { 0 };
RecursiveSpinLock m_lock;
Semaphore m_semaphore;
BAN::Atomic<bool> m_should_ack { false };

View File

@ -20,8 +20,11 @@ namespace Kernel
private:
static PIC* create();
friend class InterruptController;
private:
SpinLock m_lock;
uint16_t m_reserved_irqs { 0 };
friend class InterruptController;
};
}

View File

@ -10,7 +10,6 @@
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/MemoryRegion.h>
#include <kernel/OpenFileDescriptorSet.h>
#include <kernel/SpinLock.h>
#include <kernel/Terminal/TTY.h>
#include <kernel/Thread.h>
@ -222,7 +221,8 @@ namespace Kernel
const pid_t m_pid;
const pid_t m_parent;
mutable RecursiveSpinLock m_lock;
mutable Mutex m_big_mutex;
SpinLock m_signal_lock;
BAN::String m_working_directory;
BAN::Vector<Thread*> m_threads;

View File

@ -7,6 +7,21 @@
namespace Kernel
{
class SchedulerLock
{
public:
void lock();
void unlock();
void unlock_all();
pid_t locker() const;
private:
BAN::Atomic<pid_t> m_locker { -1 };
uint32_t m_lock_depth { 0 };
friend class Scheduler;
};
class Scheduler
{
public:
@ -19,6 +34,8 @@ namespace Kernel
void reschedule();
void reschedule_if_idling();
void reschedule_current_no_save();
void set_current_thread_sleeping(uint64_t wake_time);
void block_current_thread(Semaphore*, uint64_t wake_time);
@ -29,8 +46,8 @@ namespace Kernel
Thread& current_thread();
static pid_t current_tid();
[[noreturn]] void execute_current_thread();
[[noreturn]] void _execute_current_thread();
BAN::ErrorOr<void> add_thread(Thread*);
[[noreturn]] void delete_current_process_and_thread();
private:
@ -43,7 +60,8 @@ namespace Kernel
void remove_and_advance_current_thread();
void advance_current_thread();
BAN::ErrorOr<void> add_thread(Thread*);
[[noreturn]] void execute_current_thread();
[[noreturn]] void _execute_current_thread();
private:
struct SchedulerThread
@ -57,13 +75,13 @@ namespace Kernel
Semaphore* semaphore;
};
SchedulerLock m_lock;
Thread* m_idle_thread { nullptr };
BAN::LinkedList<SchedulerThread> m_active_threads;
BAN::LinkedList<SchedulerThread> m_sleeping_threads;
BAN::LinkedList<SchedulerThread>::iterator m_current_thread;
friend class Process;
};
}

View File

@ -1,65 +0,0 @@
#pragma once
#include <BAN/Atomic.h>
#include <BAN/NoCopyMove.h>
#include <sys/types.h>
namespace Kernel
{
class SpinLock
{
BAN_NON_COPYABLE(SpinLock);
BAN_NON_MOVABLE(SpinLock);
public:
SpinLock() = default;
void lock();
void unlock();
bool is_locked() const;
uint32_t lock_depth() const { return m_locker != -1; }
private:
BAN::Atomic<pid_t> m_locker = -1;
};
class RecursiveSpinLock
{
BAN_NON_COPYABLE(RecursiveSpinLock);
BAN_NON_MOVABLE(RecursiveSpinLock);
public:
RecursiveSpinLock() = default;
void lock();
void unlock();
bool is_locked() const;
uint32_t lock_depth() const { return m_lock_depth; }
private:
BAN::Atomic<pid_t> m_locker = -1;
BAN::Atomic<uint32_t> m_lock_depth = 0;
};
class RecursivePrioritySpinLock
{
BAN_NON_COPYABLE(RecursivePrioritySpinLock);
BAN_NON_MOVABLE(RecursivePrioritySpinLock);
public:
RecursivePrioritySpinLock() = default;
void lock();
void unlock();
bool is_locked() const;
uint32_t lock_depth() const { return m_lock_depth; }
private:
BAN::Atomic<pid_t> m_locker = -1;
BAN::Atomic<uint32_t> m_lock_depth = 0;
BAN::Atomic<uint32_t> m_queue_length = 0;
};
}

View File

@ -4,7 +4,7 @@
#include <BAN/RefPtr.h>
#include <BAN/Vector.h>
#include <kernel/InterruptController.h>
#include <kernel/SpinLock.h>
#include <kernel/Lock/Mutex.h>
namespace Kernel
{
@ -51,7 +51,7 @@ namespace Kernel
private:
const uint16_t m_base;
const uint16_t m_ctrl;
SpinLock m_lock;
Mutex m_mutex;
volatile bool m_has_got_irq { false };

View File

@ -3,7 +3,6 @@
#include <BAN/Array.h>
#include <BAN/ByteSpan.h>
#include <kernel/Memory/Types.h>
#include <kernel/SpinLock.h>
namespace Kernel
{

View File

@ -3,6 +3,7 @@
#include <BAN/UniqPtr.h>
#include <BAN/Vector.h>
#include <kernel/InterruptController.h>
#include <kernel/Lock/Mutex.h>
#include <kernel/Memory/DMARegion.h>
#include <kernel/Semaphore.h>
#include <kernel/Storage/NVMe/Definitions.h>
@ -20,7 +21,7 @@ namespace Kernel
virtual void handle_irq() final override;
private:
SpinLock m_lock;
Mutex m_mutex;
BAN::UniqPtr<Kernel::DMARegion> m_completion_queue;
BAN::UniqPtr<Kernel::DMARegion> m_submission_queue;
volatile NVMe::DoorbellRegisters& m_doorbell;

View File

@ -44,7 +44,7 @@ namespace Kernel
virtual bool has_error_impl() const override { return false; }
private:
SpinLock m_lock;
Mutex m_mutex;
BAN::Optional<DiskCache> m_disk_cache;
BAN::Vector<BAN::RefPtr<Partition>> m_partitions;

View File

@ -59,6 +59,7 @@ namespace Kernel
bool initialize();
private:
SpinLock m_lock;
BAN::String m_name;
Serial m_serial;
BAN::CircularQueue<uint8_t, 128> m_input;

View File

@ -3,7 +3,6 @@
#include <BAN/Array.h>
#include <kernel/Device/Device.h>
#include <kernel/Input/KeyEvent.h>
#include <kernel/SpinLock.h>
#include <kernel/Terminal/TerminalDriver.h>
#include <kernel/Terminal/termios.h>
#include <kernel/Semaphore.h>

View File

@ -3,7 +3,6 @@
#include <BAN/Array.h>
#include <kernel/Device/Device.h>
#include <kernel/Input/KeyEvent.h>
#include <kernel/SpinLock.h>
#include <kernel/Terminal/TerminalDriver.h>
#include <kernel/Terminal/termios.h>
#include <kernel/Terminal/TTY.h>
@ -70,6 +69,8 @@ namespace Kernel
private:
BAN::String m_name;
RecursiveSpinLock m_write_lock;
State m_state { State::Normal };
AnsiState m_ansi_state { };
UTF8State m_utf8_state { };

View File

@ -113,6 +113,8 @@ namespace Kernel
Process* m_process { nullptr };
bool m_is_userspace { false };
mutable RecursiveSpinLock m_lock;
uintptr_t* m_return_rsp { nullptr };
uintptr_t* m_return_rip { nullptr };

View File

@ -30,6 +30,8 @@ namespace Kernel
uint64_t read_main_counter() const;
private:
mutable SpinLock m_lock;
bool m_is_64bit { false };
uint64_t m_last_ticks { 0 };

View File

@ -99,10 +99,10 @@ namespace Kernel
if (rsdp->revision >= 2)
{
PageTable::map_fast_page(rsdp->xsdt_address & PAGE_ADDR_MASK);
TRY(PageTable::with_fast_page(rsdp->xsdt_address & PAGE_ADDR_MASK,
[&]() -> BAN::ErrorOr<void>
{
auto& xsdt = PageTable::fast_page_as<const XSDT>(rsdp->xsdt_address % PAGE_SIZE);
BAN::ScopeGuard _([] { PageTable::unmap_fast_page(); });
if (memcmp(xsdt.signature, "XSDT", 4) != 0)
return BAN::Error::from_error_code(ErrorCode::ACPI_RootInvalid);
if (!is_valid_std_header(&xsdt))
@ -111,13 +111,16 @@ namespace Kernel
m_header_table_paddr = rsdp->xsdt_address + offsetof(XSDT, entries);
m_entry_size = 8;
root_entry_count = (xsdt.length - sizeof(SDTHeader)) / 8;
return {};
}
));
}
else
{
PageTable::map_fast_page(rsdp->rsdt_address & PAGE_ADDR_MASK);
TRY(PageTable::with_fast_page(rsdp->rsdt_address & PAGE_ADDR_MASK,
[&]() -> BAN::ErrorOr<void>
{
auto& rsdt = PageTable::fast_page_as<const RSDT>(rsdp->rsdt_address % PAGE_SIZE);
BAN::ScopeGuard _([] { PageTable::unmap_fast_page(); });
if (memcmp(rsdt.signature, "RSDT", 4) != 0)
return BAN::Error::from_error_code(ErrorCode::ACPI_RootInvalid);
if (!is_valid_std_header(&rsdt))
@ -126,6 +129,9 @@ namespace Kernel
m_header_table_paddr = rsdp->rsdt_address + offsetof(RSDT, entries);
m_entry_size = 4;
root_entry_count = (rsdt.length - sizeof(SDTHeader)) / 4;
return {};
}
));
}
size_t needed_pages = range_page_count(m_header_table_paddr, root_entry_count * m_entry_size);
@ -144,9 +150,10 @@ namespace Kernel
auto map_header =
[](paddr_t header_paddr) -> vaddr_t
{
PageTable::map_fast_page(header_paddr & PAGE_ADDR_MASK);
size_t header_length = PageTable::fast_page_as<SDTHeader>(header_paddr % PAGE_SIZE).length;
PageTable::unmap_fast_page();
size_t header_length;
PageTable::with_fast_page(header_paddr & PAGE_ADDR_MASK, [&] {
header_length = PageTable::fast_page_as<SDTHeader>(header_paddr % PAGE_SIZE).length;
});
size_t needed_pages = range_page_count(header_paddr, header_length);
vaddr_t page_vaddr = PageTable::kernel().reserve_free_contiguous_pages(needed_pages, KERNEL_OFFSET);

View File

@ -1,9 +1,10 @@
#include <BAN/ScopeGuard.h>
#include <kernel/Debug.h>
#include <kernel/ACPI.h>
#include <kernel/APIC.h>
#include <kernel/CPUID.h>
#include <kernel/Debug.h>
#include <kernel/IDT.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/PageTable.h>
#include <kernel/MMIO.h>
@ -223,7 +224,7 @@ namespace Kernel
void APIC::enable_irq(uint8_t irq)
{
CriticalScope _;
LockGuard _(m_lock);
uint32_t gsi = m_irq_overrides[irq];
@ -268,7 +269,7 @@ namespace Kernel
BAN::ErrorOr<void> APIC::reserve_irq(uint8_t irq)
{
CriticalScope _;
LockGuard _(m_lock);
uint32_t gsi = m_irq_overrides[irq];
@ -301,7 +302,7 @@ namespace Kernel
BAN::Optional<uint8_t> APIC::get_free_irq()
{
CriticalScope _;
LockGuard _(m_lock);
for (int irq = 0; irq <= 0xFF; irq++)
{
uint32_t gsi = m_irq_overrides[irq];

View File

@ -1,7 +1,6 @@
#include <kernel/Debug.h>
#include <kernel/InterruptController.h>
#include <kernel/Memory/PageTable.h>
#include <kernel/SpinLock.h>
#include <kernel/Terminal/Serial.h>
#include <kernel/Terminal/TTY.h>
#include <kernel/Timer/Timer.h>
@ -13,6 +12,8 @@ extern TerminalDriver* g_terminal_driver;
namespace Debug
{
Kernel::RecursiveSpinLock s_debug_lock;
void dump_stack_trace()
{
using namespace Kernel;
@ -120,18 +121,4 @@ namespace Debug
BAN::Formatter::print(Debug::putchar, "[{5}.{3}] {}:{}: ", ms_since_boot / 1000, ms_since_boot % 1000, file, line);
}
static Kernel::RecursiveSpinLock s_debug_lock;
void DebugLock::lock()
{
if (Kernel::interrupts_enabled())
s_debug_lock.lock();
}
void DebugLock::unlock()
{
if (Kernel::interrupts_enabled())
s_debug_lock.unlock();
}
}

View File

@ -20,7 +20,7 @@ namespace Kernel
BAN::ErrorOr<size_t> DebugDevice::write_impl(off_t, BAN::ConstByteSpan buffer)
{
auto ms_since_boot = SystemTimer::get().ms_since_boot();
Debug::DebugLock::lock();
Debug::s_debug_lock.lock();
BAN::Formatter::print(Debug::putchar, "[{5}.{3}] {}: ",
ms_since_boot / 1000,
ms_since_boot % 1000,
@ -28,7 +28,7 @@ namespace Kernel
);
for (size_t i = 0; i < buffer.size(); i++)
Debug::putchar(buffer[i]);
Debug::DebugLock::unlock();
Debug::s_debug_lock.unlock();
return buffer.size();
}

View File

@ -5,7 +5,7 @@
#include <kernel/Device/ZeroDevice.h>
#include <kernel/FS/DevFS/FileSystem.h>
#include <kernel/FS/TmpFS/Inode.h>
#include <kernel/LockGuard.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Process.h>
#include <kernel/Scheduler.h>
#include <kernel/Storage/StorageDevice.h>

View File

@ -1,6 +1,6 @@
#include <BAN/ScopeGuard.h>
#include <kernel/FS/Ext2/FileSystem.h>
#include <kernel/LockGuard.h>
#include <kernel/Lock/LockGuard.h>
#define EXT2_DEBUG_PRINT 0
#define EXT2_VERIFY_INODE 0
@ -139,7 +139,7 @@ namespace Kernel
BAN::ErrorOr<uint32_t> Ext2FS::create_inode(const Ext2::Inode& ext2_inode)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
ASSERT(ext2_inode.size == 0);
@ -218,7 +218,7 @@ namespace Kernel
void Ext2FS::delete_inode(uint32_t ino)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
ASSERT(ino >= superblock().first_ino);
ASSERT(ino <= superblock().inodes_count);
@ -271,7 +271,7 @@ namespace Kernel
void Ext2FS::read_block(uint32_t block, BlockBufferWrapper& buffer)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
const uint32_t sector_size = m_block_device->blksize();
const uint32_t block_size = this->block_size();
@ -284,7 +284,7 @@ namespace Kernel
void Ext2FS::write_block(uint32_t block, const BlockBufferWrapper& buffer)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
const uint32_t sector_size = m_block_device->blksize();
const uint32_t block_size = this->block_size();
@ -297,7 +297,7 @@ namespace Kernel
void Ext2FS::sync_superblock()
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
const uint32_t sector_size = m_block_device->blksize();
ASSERT(1024 % sector_size == 0);
@ -322,13 +322,13 @@ namespace Kernel
Ext2FS::BlockBufferWrapper Ext2FS::get_block_buffer()
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
return m_buffer_manager.get_buffer();
}
BAN::ErrorOr<uint32_t> Ext2FS::reserve_free_block(uint32_t primary_bgd)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
if (m_superblock.r_blocks_count >= m_superblock.free_blocks_count)
return BAN::Error::from_errno(ENOSPC);
@ -389,7 +389,7 @@ namespace Kernel
void Ext2FS::release_block(uint32_t block)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
ASSERT(block >= m_superblock.first_data_block);
ASSERT(block < m_superblock.blocks_count);
@ -422,7 +422,7 @@ namespace Kernel
Ext2FS::BlockLocation Ext2FS::locate_inode(uint32_t ino)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
ASSERT(ino <= superblock().inodes_count);
@ -464,7 +464,7 @@ namespace Kernel
Ext2FS::BlockLocation Ext2FS::locate_block_group_descriptior(uint32_t group_index)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
const uint32_t block_size = this->block_size();

View File

@ -1,5 +1,5 @@
#include <kernel/FS/Inode.h>
#include <kernel/LockGuard.h>
#include <kernel/Lock/LockGuard.h>
#include <fcntl.h>
@ -58,13 +58,13 @@ namespace Kernel
void Inode::on_close()
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
on_close_impl();
}
BAN::ErrorOr<BAN::RefPtr<Inode>> Inode::find_inode(BAN::StringView name)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
if (!mode().ifdir())
return BAN::Error::from_errno(ENOTDIR);
return find_inode_impl(name);
@ -72,7 +72,7 @@ namespace Kernel
BAN::ErrorOr<void> Inode::list_next_inodes(off_t offset, DirectoryEntryList* list, size_t list_len)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
if (!mode().ifdir())
return BAN::Error::from_errno(ENOTDIR);
return list_next_inodes_impl(offset, list, list_len);
@ -80,7 +80,7 @@ namespace Kernel
BAN::ErrorOr<void> Inode::create_file(BAN::StringView name, mode_t mode, uid_t uid, gid_t gid)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
if (!this->mode().ifdir())
return BAN::Error::from_errno(ENOTDIR);
if (Mode(mode).ifdir())
@ -90,7 +90,7 @@ namespace Kernel
BAN::ErrorOr<void> Inode::create_directory(BAN::StringView name, mode_t mode, uid_t uid, gid_t gid)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
if (!this->mode().ifdir())
return BAN::Error::from_errno(ENOTDIR);
if (!Mode(mode).ifdir())
@ -100,7 +100,7 @@ namespace Kernel
BAN::ErrorOr<void> Inode::unlink(BAN::StringView name)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
if (!mode().ifdir())
return BAN::Error::from_errno(ENOTDIR);
if (name == "."sv || name == ".."sv)
@ -110,7 +110,7 @@ namespace Kernel
BAN::ErrorOr<BAN::String> Inode::link_target()
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
if (!mode().iflnk())
return BAN::Error::from_errno(EINVAL);
return link_target_impl();
@ -118,7 +118,7 @@ namespace Kernel
BAN::ErrorOr<long> Inode::accept(sockaddr* address, socklen_t* address_len)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
if (!mode().ifsock())
return BAN::Error::from_errno(ENOTSOCK);
return accept_impl(address, address_len);
@ -126,7 +126,7 @@ namespace Kernel
BAN::ErrorOr<void> Inode::bind(const sockaddr* address, socklen_t address_len)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
if (!mode().ifsock())
return BAN::Error::from_errno(ENOTSOCK);
return bind_impl(address, address_len);
@ -134,7 +134,7 @@ namespace Kernel
BAN::ErrorOr<void> Inode::connect(const sockaddr* address, socklen_t address_len)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
if (!mode().ifsock())
return BAN::Error::from_errno(ENOTSOCK);
return connect_impl(address, address_len);
@ -142,7 +142,7 @@ namespace Kernel
BAN::ErrorOr<void> Inode::listen(int backlog)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
if (!mode().ifsock())
return BAN::Error::from_errno(ENOTSOCK);
return listen_impl(backlog);
@ -150,7 +150,7 @@ namespace Kernel
BAN::ErrorOr<size_t> Inode::sendto(BAN::ConstByteSpan message, const sockaddr* address, socklen_t address_len)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
if (!mode().ifsock())
return BAN::Error::from_errno(ENOTSOCK);
return sendto_impl(message, address, address_len);
@ -158,7 +158,7 @@ namespace Kernel
BAN::ErrorOr<size_t> Inode::recvfrom(BAN::ByteSpan buffer, sockaddr* address, socklen_t* address_len)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
if (!mode().ifsock())
return BAN::Error::from_errno(ENOTSOCK);
return recvfrom_impl(buffer, address, address_len);
@ -166,7 +166,7 @@ namespace Kernel
BAN::ErrorOr<size_t> Inode::read(off_t offset, BAN::ByteSpan buffer)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
if (mode().ifdir())
return BAN::Error::from_errno(EISDIR);
return read_impl(offset, buffer);
@ -174,7 +174,7 @@ namespace Kernel
BAN::ErrorOr<size_t> Inode::write(off_t offset, BAN::ConstByteSpan buffer)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
if (mode().ifdir())
return BAN::Error::from_errno(EISDIR);
return write_impl(offset, buffer);
@ -182,7 +182,7 @@ namespace Kernel
BAN::ErrorOr<void> Inode::truncate(size_t size)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
if (mode().ifdir())
return BAN::Error::from_errno(EISDIR);
return truncate_impl(size);
@ -191,37 +191,37 @@ namespace Kernel
BAN::ErrorOr<void> Inode::chmod(mode_t mode)
{
ASSERT((mode & Inode::Mode::TYPE_MASK) == 0);
LockGuard _(m_lock);
LockGuard _(m_mutex);
return chmod_impl(mode);
}
BAN::ErrorOr<void> Inode::chown(uid_t uid, gid_t gid)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
return chown_impl(uid, gid);
}
bool Inode::can_read() const
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
return can_read_impl();
}
bool Inode::can_write() const
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
return can_write_impl();
}
bool Inode::has_error() const
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
return has_error_impl();
}
BAN::ErrorOr<long> Inode::ioctl(int request, void* arg)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
return ioctl_impl(request, arg);
}

View File

@ -1,5 +1,5 @@
#include <kernel/FS/Pipe.h>
#include <kernel/LockGuard.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Thread.h>
#include <kernel/Timer/Timer.h>
@ -26,14 +26,14 @@ namespace Kernel
void Pipe::clone_writing()
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
ASSERT(m_writing_count > 0);
m_writing_count++;
}
void Pipe::close_writing()
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
ASSERT(m_writing_count > 0);
m_writing_count--;
if (m_writing_count == 0)
@ -42,12 +42,11 @@ namespace Kernel
BAN::ErrorOr<size_t> Pipe::read_impl(off_t, BAN::ByteSpan buffer)
{
LockGuard _(m_lock);
while (m_buffer.empty())
{
if (m_writing_count == 0)
return 0;
LockFreeGuard lock_free(m_lock);
LockFreeGuard lock_free(m_mutex);
TRY(Thread::current().block_or_eintr_indefinite(m_semaphore));
}
@ -66,8 +65,6 @@ namespace Kernel
BAN::ErrorOr<size_t> Pipe::write_impl(off_t, BAN::ConstByteSpan buffer)
{
LockGuard _(m_lock);
size_t old_size = m_buffer.size();
TRY(m_buffer.resize(old_size + buffer.size()));

View File

@ -1,6 +1,5 @@
#include <kernel/FS/ProcFS/FileSystem.h>
#include <kernel/FS/ProcFS/Inode.h>
#include <kernel/LockGuard.h>
namespace Kernel
{

View File

@ -66,7 +66,7 @@ namespace Kernel
BAN::ErrorOr<BAN::RefPtr<TmpInode>> TmpFileSystem::open_inode(ino_t ino)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
if (m_inode_cache.contains(ino))
return m_inode_cache[ino];
@ -85,7 +85,7 @@ namespace Kernel
BAN::ErrorOr<void> TmpFileSystem::add_to_cache(BAN::RefPtr<TmpInode> inode)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
if (!m_inode_cache.contains(inode->ino()))
TRY(m_inode_cache.insert(inode->ino(), inode));
@ -94,7 +94,7 @@ namespace Kernel
void TmpFileSystem::remove_from_cache(BAN::RefPtr<TmpInode> inode)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
ASSERT(m_inode_cache.contains(inode->ino()));
m_inode_cache.remove(inode->ino());
@ -102,7 +102,7 @@ namespace Kernel
void TmpFileSystem::read_inode(ino_t ino, TmpInodeInfo& out)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
auto inode_location = find_inode(ino);
PageTable::with_fast_page(inode_location.paddr, [&] {
@ -112,7 +112,7 @@ namespace Kernel
void TmpFileSystem::write_inode(ino_t ino, const TmpInodeInfo& info)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
auto inode_location = find_inode(ino);
PageTable::with_fast_page(inode_location.paddr, [&] {
@ -123,7 +123,7 @@ namespace Kernel
void TmpFileSystem::delete_inode(ino_t ino)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
auto inode_location = find_inode(ino);
PageTable::with_fast_page(inode_location.paddr, [&] {
@ -138,7 +138,7 @@ namespace Kernel
BAN::ErrorOr<ino_t> TmpFileSystem::allocate_inode(const TmpInodeInfo& info)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
constexpr size_t inodes_per_page = PAGE_SIZE / sizeof(TmpInodeInfo);
@ -164,7 +164,7 @@ namespace Kernel
TmpFileSystem::InodeLocation TmpFileSystem::find_inode(ino_t ino)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
ASSERT_GTE(ino, first_inode);
ASSERT_LT(ino, max_inodes);
@ -182,7 +182,7 @@ namespace Kernel
void TmpFileSystem::free_block(size_t index)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
constexpr size_t addresses_per_page = PAGE_SIZE / sizeof(PageInfo);
@ -204,7 +204,7 @@ namespace Kernel
BAN::ErrorOr<size_t> TmpFileSystem::allocate_block()
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
size_t result = first_data_page;
TRY(for_each_indirect_paddr_allocating(m_data_pages, [&] (paddr_t, bool allocated) {
@ -218,7 +218,7 @@ namespace Kernel
paddr_t TmpFileSystem::find_block(size_t index)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
ASSERT_GT(index, 0);
return find_indirect(m_data_pages, index - first_data_page, 3);
@ -226,7 +226,7 @@ namespace Kernel
paddr_t TmpFileSystem::find_indirect(PageInfo root, size_t index, size_t depth)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
ASSERT(root.flags() & PageInfo::Flags::Present);
if (depth == 0)
@ -257,7 +257,7 @@ namespace Kernel
template<TmpFuncs::for_each_indirect_paddr_allocating_callback F>
BAN::ErrorOr<BAN::Iteration> TmpFileSystem::for_each_indirect_paddr_allocating_internal(PageInfo page_info, F callback, size_t depth)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
ASSERT(page_info.flags() & PageInfo::Flags::Present);
if (depth == 0)
@ -316,7 +316,7 @@ namespace Kernel
template<TmpFuncs::for_each_indirect_paddr_allocating_callback F>
BAN::ErrorOr<void> TmpFileSystem::for_each_indirect_paddr_allocating(PageInfo page_info, F callback, size_t depth)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
BAN::Iteration result = TRY(for_each_indirect_paddr_allocating_internal(page_info, callback, depth));
ASSERT(result == BAN::Iteration::Break);

View File

@ -5,7 +5,7 @@
#include <kernel/FS/ProcFS/FileSystem.h>
#include <kernel/FS/TmpFS/FileSystem.h>
#include <kernel/FS/VirtualFileSystem.h>
#include <kernel/LockGuard.h>
#include <kernel/Lock/LockGuard.h>
#include <fcntl.h>
namespace Kernel
@ -44,6 +44,8 @@ namespace Kernel
BAN::ErrorOr<void> VirtualFileSystem::mount(const Credentials& credentials, BAN::StringView block_device_path, BAN::StringView target)
{
LockGuard _(m_mutex);
auto block_device_file = TRY(file_from_absolute_path(credentials, block_device_path, true));
if (!block_device_file.inode->is_device())
return BAN::Error::from_errno(ENOTBLK);
@ -63,15 +65,14 @@ namespace Kernel
if (!file.inode->mode().ifdir())
return BAN::Error::from_errno(ENOTDIR);
LockGuard _(m_lock);
LockGuard _(m_mutex);
TRY(m_mount_points.push_back({ file, file_system }));
return {};
}
VirtualFileSystem::MountPoint* VirtualFileSystem::mount_from_host_inode(BAN::RefPtr<Inode> inode)
{
ASSERT(m_lock.is_locked());
LockGuard _(m_mutex);
for (MountPoint& mount : m_mount_points)
if (*mount.host.inode == *inode)
return &mount;
@ -80,7 +81,7 @@ namespace Kernel
VirtualFileSystem::MountPoint* VirtualFileSystem::mount_from_root_inode(BAN::RefPtr<Inode> inode)
{
ASSERT(m_lock.is_locked());
LockGuard _(m_mutex);
for (MountPoint& mount : m_mount_points)
if (*mount.target->root_inode() == *inode)
return &mount;
@ -89,7 +90,7 @@ namespace Kernel
BAN::ErrorOr<VirtualFileSystem::File> VirtualFileSystem::file_from_absolute_path(const Credentials& credentials, BAN::StringView path, int flags)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
ASSERT(path.front() == '/');

View File

@ -1,7 +1,7 @@
#include <BAN/HashMap.h>
#include <kernel/CriticalScope.h>
#include <kernel/FS/VirtualFileSystem.h>
#include <kernel/Input/KeyboardLayout.h>
#include <kernel/Lock/LockGuard.h>
#include <ctype.h>
@ -74,6 +74,7 @@ namespace Kernel::Input
Key KeyboardLayout::get_key_from_event(KeyEvent event)
{
LockGuard _(m_lock);
if (event.shift())
return m_keycode_to_key_shift[event.keycode];
if (event.ralt())
@ -256,7 +257,7 @@ namespace Kernel::Input
}
}
CriticalScope _;
LockGuard _(m_lock);
for (size_t i = 0; i < new_layout->m_keycode_to_key_normal.size(); i++)
if (new_layout->m_keycode_to_key_normal[i] != Key::None)

View File

@ -22,7 +22,7 @@ namespace Kernel::Input
BAN::ErrorOr<void> PS2Controller::send_byte(uint16_t port, uint8_t byte)
{
ASSERT(interrupts_enabled());
LockGuard _(m_lock);
LockGuard _(m_mutex);
uint64_t timeout = SystemTimer::get().ms_since_boot() + s_ps2_timeout_ms;
while (SystemTimer::get().ms_since_boot() < timeout)
{
@ -37,7 +37,7 @@ namespace Kernel::Input
BAN::ErrorOr<uint8_t> PS2Controller::read_byte()
{
ASSERT(interrupts_enabled());
LockGuard _(m_lock);
LockGuard _(m_mutex);
uint64_t timeout = SystemTimer::get().ms_since_boot() + s_ps2_timeout_ms;
while (SystemTimer::get().ms_since_boot() < timeout)
{
@ -50,14 +50,14 @@ namespace Kernel::Input
BAN::ErrorOr<void> PS2Controller::send_command(PS2::Command command)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
TRY(send_byte(PS2::IOPort::COMMAND, command));
return {};
}
BAN::ErrorOr<void> PS2Controller::send_command(PS2::Command command, uint8_t data)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
TRY(send_byte(PS2::IOPort::COMMAND, command));
TRY(send_byte(PS2::IOPort::DATA, data));
return {};
@ -65,7 +65,7 @@ namespace Kernel::Input
BAN::ErrorOr<void> PS2Controller::device_send_byte(uint8_t device_index, uint8_t byte)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
if (device_index == 1)
TRY(send_byte(PS2::IOPort::COMMAND, PS2::Command::WRITE_TO_SECOND_PORT));
TRY(send_byte(PS2::IOPort::DATA, byte));
@ -74,7 +74,7 @@ namespace Kernel::Input
BAN::ErrorOr<void> PS2Controller::device_send_byte_and_wait_ack(uint8_t device_index, uint8_t byte)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
for (;;)
{
TRY(device_send_byte(device_index, byte));
@ -101,8 +101,7 @@ namespace Kernel::Input
bool PS2Controller::append_command_queue(PS2Device* device, uint8_t command, uint8_t response_size)
{
// NOTE: command queue push/pop must be done without interrupts
CriticalScope _;
LockGuard _(m_cmd_lock);
if (m_command_queue.size() + 1 >= m_command_queue.capacity())
{
dprintln("PS/2 command queue full");
@ -121,8 +120,7 @@ namespace Kernel::Input
bool PS2Controller::append_command_queue(PS2Device* device, uint8_t command, uint8_t data, uint8_t response_size)
{
// NOTE: command queue push/pop must be done without interrupts
CriticalScope _;
LockGuard _(m_cmd_lock);
if (m_command_queue.size() + 1 >= m_command_queue.capacity())
{
dprintln("PS/2 command queue full");
@ -143,6 +141,9 @@ namespace Kernel::Input
{
ASSERT(interrupts_enabled());
// NOTE: CircularQueue reads don't need locking, as long as
// we can guarantee that read element is not popped
if (m_command_queue.empty())
return;
auto& command = m_command_queue.front();
@ -152,6 +153,8 @@ namespace Kernel::Input
{
dwarnln_if(DEBUG_PS2, "Command timedout");
m_devices[command.device_index]->command_timedout(command.out_data, command.out_count);
LockGuard _(m_cmd_lock);
m_command_queue.pop();
}
return;

View File

@ -1,5 +1,4 @@
#include <BAN/ScopeGuard.h>
#include <kernel/CriticalScope.h>
#include <kernel/FS/DevFS/FileSystem.h>
#include <kernel/Input/KeyboardLayout.h>
#include <kernel/Input/PS2/Config.h>
@ -165,6 +164,7 @@ namespace Kernel::Input
event.modifier = m_modifiers | (released ? 0 : KeyEvent::Modifier::Pressed);
event.keycode = keycode.value();
LockGuard _(m_event_lock);
if (m_event_queue.full())
{
dwarnln("PS/2 event queue full");
@ -197,7 +197,7 @@ namespace Kernel::Input
if (m_event_queue.empty())
TRY(Thread::current().block_or_eintr_indefinite(m_semaphore));
CriticalScope _;
LockGuard _(m_event_lock);
if (m_event_queue.empty())
continue;

View File

@ -1,5 +1,4 @@
#include <BAN/ScopeGuard.h>
#include <kernel/CriticalScope.h>
#include <kernel/FS/DevFS/FileSystem.h>
#include <kernel/Input/PS2/Config.h>
#include <kernel/Input/PS2/Mouse.h>
@ -158,6 +157,7 @@ namespace Kernel::Input
event.scroll_event.scroll = rel_z;
}
LockGuard _(m_event_lock);
for (int i = 0; i < event_count; i++)
{
if (m_event_queue.full())
@ -181,7 +181,7 @@ namespace Kernel::Input
if (m_event_queue.empty())
TRY(Thread::current().block_or_eintr_indefinite(m_semaphore));
CriticalScope _;
LockGuard _(m_event_lock);
if (m_event_queue.empty())
continue;

View File

@ -0,0 +1,68 @@
#include <kernel/Lock/Mutex.h>
#include <kernel/Scheduler.h>
namespace Kernel
{
void Mutex::lock()
{
auto tid = Scheduler::current_tid();
if (tid != m_locker)
while (!m_locker.compare_exchange(-1, tid))
Scheduler::get().reschedule();
m_lock_depth++;
}
bool Mutex::try_lock()
{
auto tid = Scheduler::current_tid();
if (tid != m_locker)
if (!m_locker.compare_exchange(-1, tid))
return false;
m_lock_depth++;
return true;
}
void Mutex::unlock()
{
ASSERT_EQ(m_locker.load(), Scheduler::current_tid());
if (--m_lock_depth == 0)
m_locker = -1;
}
void PriorityMutex::lock()
{
const auto tid = Scheduler::current_tid();
const bool has_priority = tid ? !Thread::current().is_userspace() : true;
if (has_priority)
m_queue_depth++;
if (tid != m_locker)
while ((!has_priority && m_queue_depth > 0) || !m_locker.compare_exchange(-1, tid))
asm volatile("pause");
m_lock_depth++;
}
bool PriorityMutex::try_lock()
{
const auto tid = Scheduler::current_tid();
const bool has_priority = tid ? !Thread::current().is_userspace() : true;
if (tid != m_locker)
while ((!has_priority && m_queue_depth > 0) || !m_locker.compare_exchange(-1, tid))
return false;
if (has_priority)
m_queue_depth++;
m_lock_depth++;
return true;
}
void PriorityMutex::unlock()
{
const auto tid = Scheduler::current_tid();
const bool has_priority = tid ? !Thread::current().is_userspace() : true;
if (has_priority)
m_queue_depth--;
if (--m_lock_depth)
m_locker = -1;
}
}

View File

@ -0,0 +1,80 @@
#include <kernel/Lock/SpinLock.h>
#include <kernel/Scheduler.h>
namespace Kernel
{
static inline uintptr_t get_flags_and_disable_interrupts()
{
uintptr_t flags;
asm volatile("pushf; cli; pop %0" : "=r"(flags) :: "memory");
return flags;
}
static inline void restore_flags(uintptr_t flags)
{
asm volatile("push %0; popf" :: "rm"(flags) : "memory", "cc");
}
void SpinLock::lock()
{
const auto tid = Scheduler::current_tid();
ASSERT_NEQ(m_locker.load(), tid);
while (!m_locker.compare_exchange(-1, tid))
__builtin_ia32_pause();
m_flags = get_flags_and_disable_interrupts();
}
bool SpinLock::try_lock()
{
const auto tid = Scheduler::current_tid();
ASSERT_NEQ(m_locker.load(), tid);
if (!m_locker.compare_exchange(-1, tid))
return false;
m_flags = get_flags_and_disable_interrupts();
return true;
}
void SpinLock::unlock()
{
ASSERT_EQ(m_locker.load(), Scheduler::current_tid());
restore_flags(m_flags);
m_locker = -1;
}
void RecursiveSpinLock::lock()
{
auto tid = Scheduler::current_tid();
if (m_locker != tid)
{
while (!m_locker.compare_exchange(-1, tid))
__builtin_ia32_pause();
m_flags = get_flags_and_disable_interrupts();
}
m_lock_depth++;
}
bool RecursiveSpinLock::try_lock()
{
auto tid = Scheduler::current_tid();
if (m_locker != tid)
{
if (!m_locker.compare_exchange(-1, tid))
return false;
m_flags = get_flags_and_disable_interrupts();
}
m_lock_depth++;
return true;
}
void RecursiveSpinLock::unlock()
{
ASSERT_EQ(m_locker.load(), Scheduler::current_tid());
if (--m_lock_depth == 0)
{
restore_flags(m_flags);
m_locker = -1;
}
}
}

BIN
kernel/kernel/Lock/spin.o Normal file

Binary file not shown.

View File

@ -1,5 +1,4 @@
#include <kernel/CriticalScope.h>
#include <kernel/LockGuard.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/FileBackedRegion.h>
#include <kernel/Memory/Heap.h>
@ -26,7 +25,7 @@ namespace Kernel
if (type == Type::SHARED)
{
LockGuard _(inode->m_lock);
LockGuard _(inode->m_mutex);
if (inode->m_shared_region.valid())
region->m_shared_data = inode->m_shared_region.lock();
else
@ -83,12 +82,9 @@ namespace Kernel
if (pages[page_index] == 0)
return;
{
CriticalScope _;
PageTable::with_fast_page(pages[page_index], [&] {
memcpy(page_buffer, PageTable::fast_page_as_ptr(), PAGE_SIZE);
});
}
if (auto ret = inode->write(page_index * PAGE_SIZE, BAN::ConstByteSpan::from(page_buffer)); ret.is_error())
dwarnln("{}", ret.error());
@ -157,7 +153,7 @@ namespace Kernel
}
else if (m_type == Type::SHARED)
{
LockGuard _(m_inode->m_lock);
LockGuard _(m_inode->m_mutex);
ASSERT(m_inode->m_shared_region.valid());
ASSERT(m_shared_data->pages.size() == BAN::Math::div_round_up<size_t>(m_inode->size(), PAGE_SIZE));
@ -175,10 +171,9 @@ namespace Kernel
TRY(m_inode->read(offset, BAN::ByteSpan(m_shared_data->page_buffer, bytes)));
CriticalScope _;
PageTable::map_fast_page(pages[page_index]);
PageTable::with_fast_page(pages[page_index], [&] {
memcpy(PageTable::fast_page_as_ptr(), m_shared_data->page_buffer, PAGE_SIZE);
PageTable::unmap_fast_page();
});
}
paddr_t paddr = pages[page_index];

View File

@ -1,5 +1,5 @@
#include <kernel/BootInfo.h>
#include <kernel/LockGuard.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/PageTable.h>

View File

@ -1,5 +1,4 @@
#include <kernel/CriticalScope.h>
#include <kernel/LockGuard.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/MemoryBackedRegion.h>
@ -60,12 +59,7 @@ namespace Kernel
if (&PageTable::current() == &m_page_table)
memset((void*)vaddr, 0x00, PAGE_SIZE);
else
{
CriticalScope _;
PageTable::map_fast_page(paddr);
memset(PageTable::fast_page_as_ptr(), 0x00, PAGE_SIZE);
PageTable::unmap_fast_page();
}
PageTable::with_fast_page(paddr, [] { memset(PageTable::fast_page_as_ptr(), 0x00, PAGE_SIZE); });
return true;
}
@ -103,12 +97,10 @@ namespace Kernel
if (&PageTable::current() == &m_page_table)
memcpy((void*)write_vaddr, (void*)(buffer + written), bytes);
else
{
CriticalScope _;
PageTable::map_fast_page(m_page_table.physical_address_of(write_vaddr & PAGE_ADDR_MASK));
memcpy(PageTable::fast_page_as_ptr(page_offset), (void*)(buffer + written), bytes);
PageTable::unmap_fast_page();
}
PageTable::with_fast_page(
m_page_table.physical_address_of(write_vaddr & PAGE_ADDR_MASK),
[&] { memcpy(PageTable::fast_page_as_ptr(page_offset), (void*)(buffer + written), bytes); }
);
written += bytes;
}

View File

@ -1,5 +1,4 @@
#include <kernel/CriticalScope.h>
#include <kernel/LockGuard.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/VirtualRange.h>
@ -135,10 +134,9 @@ namespace Kernel
result->m_page_table.map_page_at(paddr, vaddr() + offset, m_flags);
}
CriticalScope _;
PageTable::map_fast_page(result->m_page_table.physical_address_of(vaddr() + offset));
PageTable::with_fast_page(result->m_page_table.physical_address_of(vaddr() + offset), [&] {
memcpy(PageTable::fast_page_as_ptr(), (void*)(vaddr() + offset), PAGE_SIZE);
PageTable::unmap_fast_page();
});
}
return result;
@ -176,10 +174,9 @@ namespace Kernel
for (size_t offset = 0; offset < size(); offset += PAGE_SIZE)
{
CriticalScope _;
PageTable::map_fast_page(m_page_table.physical_address_of(vaddr() + offset));
PageTable::with_fast_page(m_page_table.physical_address_of(vaddr() + offset), [&] {
memset(PageTable::fast_page_as_ptr(), 0x00, PAGE_SIZE);
PageTable::unmap_fast_page();
});
}
}
@ -204,12 +201,9 @@ namespace Kernel
while (bytes > 0)
{
{
CriticalScope _;
PageTable::map_fast_page(m_page_table.physical_address_of(vaddr() + page_index * PAGE_SIZE));
PageTable::with_fast_page(m_page_table.physical_address_of(vaddr() + page_index * PAGE_SIZE), [&] {
memcpy(PageTable::fast_page_as_ptr(page_offset), buffer, PAGE_SIZE - page_offset);
PageTable::unmap_fast_page();
}
});
buffer += PAGE_SIZE - page_offset;
bytes -= PAGE_SIZE - page_offset;

View File

@ -1,10 +1,13 @@
#include <BAN/Errors.h>
#include <kernel/CriticalScope.h>
#include <kernel/kprint.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/kmalloc.h>
#include <kernel/Thread.h>
using Kernel::LockGuard;
using Kernel::SpinLock;
#define MB (1 << 20)
extern uint8_t g_kernel_end[];
@ -81,6 +84,8 @@ struct kmalloc_info
};
static kmalloc_info s_kmalloc_info;
static SpinLock s_kmalloc_lock;
template<size_t SIZE>
struct kmalloc_fixed_node
{
@ -144,6 +149,8 @@ void kmalloc_initialize()
void kmalloc_dump_info()
{
LockGuard _(s_kmalloc_lock);
kprintln("kmalloc: 0x{8H}->0x{8H}", s_kmalloc_info.base, s_kmalloc_info.end);
kprintln(" used: 0x{8H}", s_kmalloc_info.used);
kprintln(" free: 0x{8H}", s_kmalloc_info.free);
@ -155,14 +162,18 @@ void kmalloc_dump_info()
static bool is_corrupted()
{
LockGuard _(s_kmalloc_lock);
auto& info = s_kmalloc_info;
auto* temp = info.first();
for (; temp->end() <= info.end; temp = temp->after());
for (; temp->end() <= info.end; temp = temp->after())
continue;
return (uintptr_t)temp != info.end;
}
[[maybe_unused]] static void debug_dump()
{
LockGuard _(s_kmalloc_lock);
auto& info = s_kmalloc_info;
uint32_t used = 0;
@ -183,6 +194,8 @@ static void* kmalloc_fixed()
{
auto& info = s_kmalloc_fixed_info;
LockGuard _(s_kmalloc_lock);
if (!info.free_list_head)
return nullptr;
@ -225,6 +238,8 @@ static void* kmalloc_impl(size_t size, size_t align)
auto& info = s_kmalloc_info;
LockGuard _(s_kmalloc_lock);
for (auto* node = info.first(); node->end() <= info.end; node = node->after())
{
if (node->used())
@ -304,8 +319,6 @@ void* kmalloc(size_t size, size_t align, bool force_identity_map)
align = s_kmalloc_min_align;
ASSERT(align <= PAGE_SIZE);
Kernel::CriticalScope critical;
if (size == 0 || size >= info.size)
goto no_memory;
@ -338,7 +351,7 @@ void kfree(void* address)
uintptr_t address_uint = (uintptr_t)address;
ASSERT(address_uint % s_kmalloc_min_align == 0);
Kernel::CriticalScope critical;
LockGuard _(s_kmalloc_lock);
if (s_kmalloc_fixed_info.base <= address_uint && address_uint < s_kmalloc_fixed_info.end)
{
@ -399,10 +412,9 @@ void kfree(void* address)
BAN::Optional<Kernel::paddr_t> kmalloc_paddr_of(Kernel::vaddr_t vaddr)
{
using namespace Kernel;
using Kernel::vaddr_t;
LockGuard _(s_kmalloc_lock);
if ((vaddr_t)s_kmalloc_storage <= vaddr && vaddr < (vaddr_t)s_kmalloc_storage + sizeof(s_kmalloc_storage))
return V2P(vaddr);
return {};
}

View File

@ -1,4 +1,4 @@
#include <kernel/LockGuard.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Networking/ARPTable.h>
#include <kernel/Scheduler.h>
#include <kernel/Timer/Timer.h>
@ -52,7 +52,7 @@ namespace Kernel
ipv4_address = interface.get_gateway();
{
LockGuard _(m_lock);
LockGuard _(m_table_lock);
if (m_arp_table.contains(ipv4_address))
return m_arp_table[ipv4_address];
}
@ -74,7 +74,7 @@ namespace Kernel
while (SystemTimer::get().ms_since_boot() < timeout)
{
{
LockGuard _(m_lock);
LockGuard _(m_table_lock);
if (m_arp_table.contains(ipv4_address))
return m_arp_table[ipv4_address];
}
@ -114,7 +114,7 @@ namespace Kernel
}
case ARPOperation::Reply:
{
LockGuard _(m_lock);
LockGuard _(m_table_lock);
if (m_arp_table.contains(packet.spa))
{
if (m_arp_table[packet.spa] != packet.sha)
@ -145,7 +145,7 @@ namespace Kernel
BAN::Optional<PendingArpPacket> pending;
{
CriticalScope _;
LockGuard _(m_pending_lock);
if (!m_pending_packets.empty())
{
pending = m_pending_packets.front();
@ -168,12 +168,12 @@ namespace Kernel
{
auto& arp_packet = buffer.as<const ARPPacket>();
LockGuard _(m_pending_lock);
if (m_pending_packets.full())
{
dprintln("arp packet queue full");
return;
}
m_pending_packets.push({ .interface = interface, .packet = arp_packet });
m_pending_semaphore.unblock();
}

View File

@ -261,7 +261,7 @@ namespace Kernel
{
ASSERT_LTE(buffer.size() + sizeof(EthernetHeader), E1000_TX_BUFFER_SIZE);
CriticalScope _;
LockGuard _(m_lock);
size_t tx_current = read32(REG_TDT) % E1000_TX_DESCRIPTOR_COUNT;

View File

@ -70,7 +70,7 @@ namespace Kernel
void IPv4Layer::unbind_socket(BAN::RefPtr<NetworkSocket> socket, uint16_t port)
{
LockGuard _(m_lock);
LockGuard _(m_socket_lock);
if (m_bound_sockets.contains(port))
{
ASSERT(m_bound_sockets[port].valid());
@ -88,11 +88,11 @@ namespace Kernel
return BAN::Error::from_errno(EAFNOSUPPORT);
auto& sockaddr_in = *reinterpret_cast<const struct sockaddr_in*>(address);
LockGuard _(m_lock);
LockGuard _(m_socket_lock);
uint16_t port = NetworkSocket::PORT_NONE;
for (uint32_t i = 0; i < 100 && port == NetworkSocket::PORT_NONE; i++)
if (uint32_t temp = 0xC000 | (Random::get_u32() & 0x3FFF); !m_bound_sockets.contains(temp))
if (uint32_t temp = 0xC000 | (Random::get_u32() & 0x3FFF); !m_bound_sockets.contains(temp) || !m_bound_sockets[temp].valid())
port = temp;
for (uint32_t temp = 0xC000; temp < 0xFFFF && port == NetworkSocket::PORT_NONE; temp++)
if (!m_bound_sockets.contains(temp))
@ -124,11 +124,17 @@ namespace Kernel
auto& sockaddr_in = *reinterpret_cast<const struct sockaddr_in*>(address);
uint16_t port = BAN::host_to_network_endian(sockaddr_in.sin_port);
LockGuard _(m_lock);
LockGuard _(m_socket_lock);
if (m_bound_sockets.contains(port))
return BAN::Error::from_errno(EADDRINUSE);
if (!m_bound_sockets.contains(port))
TRY(m_bound_sockets.insert(port, TRY(socket->get_weak_ptr())));
else
{
auto& bound = m_bound_sockets[port];
if (bound.valid())
return BAN::Error::from_errno(EADDRINUSE);
bound = TRY(socket->get_weak_ptr());
}
// FIXME: actually determine proper interface
auto interface = NetworkManager::get().interfaces().front();
@ -243,7 +249,7 @@ namespace Kernel
BAN::RefPtr<Kernel::NetworkSocket> bound_socket;
{
LockGuard _(m_lock);
LockGuard _(m_socket_lock);
if (!m_bound_sockets.contains(dst_port))
{
dprintln_if(DEBUG_IPV4, "no one is listening on port {}", dst_port);
@ -280,7 +286,7 @@ namespace Kernel
BAN::Optional<PendingIPv4Packet> pending;
{
CriticalScope _;
LockGuard _(m_packet_lock);
if (!m_pending_packets.empty())
{
pending = m_pending_packets.front();
@ -300,7 +306,7 @@ namespace Kernel
if (auto ret = handle_ipv4_packet(pending->interface, BAN::ByteSpan(buffer_start, ipv4_packet_size)); ret.is_error())
dwarnln("{}", ret.error());
CriticalScope _;
LockGuard _(m_packet_lock);
m_pending_total_size -= ipv4_packet_size;
if (m_pending_total_size)
memmove(buffer_start, buffer_start + ipv4_packet_size, m_pending_total_size);
@ -309,6 +315,8 @@ namespace Kernel
void IPv4Layer::add_ipv4_packet(NetworkInterface& interface, BAN::ConstByteSpan buffer)
{
LockGuard _(m_packet_lock);
if (m_pending_packets.full())
{
dwarnln("IPv4 packet queue full");

View File

@ -1,4 +1,4 @@
#include <kernel/LockGuard.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Networking/TCPSocket.h>
#include <kernel/Random.h>
#include <kernel/Timer/Timer.h>
@ -68,8 +68,6 @@ namespace Kernel
void TCPSocket::on_close_impl()
{
LockGuard _(m_lock);
if (!is_bound())
return;
@ -103,8 +101,6 @@ namespace Kernel
if (address_len > (socklen_t)sizeof(sockaddr_storage))
address_len = sizeof(sockaddr_storage);
LockGuard _(m_lock);
ASSERT(!m_connection_info.has_value());
switch (m_state)
@ -139,7 +135,7 @@ namespace Kernel
uint64_t wake_time_ms = SystemTimer::get().ms_since_boot() + 5000;
while (m_state != State::Established)
{
LockFreeGuard free(m_lock);
LockFreeGuard free(m_mutex);
if (SystemTimer::get().ms_since_boot() >= wake_time_ms)
return BAN::Error::from_errno(ECONNREFUSED);
TRY(Thread::current().block_or_eintr_or_waketime(m_semaphore, wake_time_ms, true));
@ -195,6 +191,8 @@ namespace Kernel
void TCPSocket::add_protocol_header(BAN::ByteSpan packet, uint16_t dst_port, PseudoHeader pseudo_header)
{
LockGuard _(m_mutex);
auto& header = packet.as<TCPHeader>();
memset(&header, 0, sizeof(TCPHeader));
memset(header.options, TCPOption::End, m_tcp_options_bytes);
@ -212,7 +210,6 @@ namespace Kernel
{
case State::Closed:
{
LockGuard _(m_lock);
header.syn = 1;
add_tcp_header_option<0, TCPOption::MaximumSeqmentSize>(header, m_interface->payload_mtu() - m_network_layer.header_size());
add_tcp_header_option<4, TCPOption::WindowScale>(header, 0);
@ -233,7 +230,6 @@ namespace Kernel
break;
case State::CloseWait:
{
LockGuard _(m_lock);
header.ack = 1;
header.fin = 1;
m_state = State::LastAck;
@ -242,7 +238,6 @@ namespace Kernel
}
case State::FinWait1:
{
LockGuard _(m_lock);
header.ack = 1;
header.fin = 1;
m_state = State::FinWait2;
@ -250,7 +245,6 @@ namespace Kernel
}
case State::FinWait2:
{
LockGuard _(m_lock);
header.ack = 1;
m_state = State::TimeWait;
m_time_wait_start_ms = SystemTimer::get().ms_since_boot();
@ -303,6 +297,10 @@ namespace Kernel
auto payload = buffer.slice(header.data_offset * sizeof(uint32_t));
// FIXME: Internet layer packet receive thread should not be able to be
// blocked by inode's mutex
LockGuard _(m_mutex);
switch (m_state)
{
case State::Closed:
@ -312,7 +310,6 @@ namespace Kernel
if (!header.ack || !header.syn)
break;
LockGuard _(m_lock);
if (header.ack_number != m_send_window.current_seq)
{
@ -345,8 +342,6 @@ namespace Kernel
if (!header.ack)
break;
LockGuard _(m_lock);
if (header.fin)
{
if (m_recv_window.start_seq + m_recv_window.data_size != header.seq_number)
@ -436,7 +431,7 @@ namespace Kernel
set_connection_as_closed();
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
if (m_should_ack)
{
@ -518,7 +513,7 @@ namespace Kernel
BAN::ErrorOr<size_t> TCPSocket::recvfrom_impl(BAN::ByteSpan buffer, sockaddr*, socklen_t*)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
if (m_state == State::Closed)
return BAN::Error::from_errno(ENOTCONN);
@ -542,7 +537,7 @@ namespace Kernel
case State::Closing: ASSERT_NOT_REACHED();
};
LockFreeGuard free(m_lock);
LockFreeGuard free(m_mutex);
TRY(Thread::current().block_or_eintr_indefinite(m_semaphore));
}
@ -575,7 +570,7 @@ namespace Kernel
return message.size();
}
LockGuard _(m_lock);
LockGuard _(m_mutex);
if (m_state == State::Closed)
return BAN::Error::from_errno(ENOTCONN);
@ -602,7 +597,7 @@ namespace Kernel
if (m_send_window.data_size + message.size() <= m_send_window.buffer->size())
break;
LockFreeGuard free(m_lock);
LockFreeGuard free(m_mutex);
TRY(Thread::current().block_or_eintr_indefinite(m_semaphore));
}
@ -634,7 +629,7 @@ namespace Kernel
case State::Closing: ASSERT_NOT_REACHED();
};
LockFreeGuard free(m_lock);
LockFreeGuard free(m_mutex);
TRY(Thread::current().block_or_eintr_indefinite(m_semaphore));
}

View File

@ -1,4 +1,4 @@
#include <kernel/LockGuard.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Networking/UDPSocket.h>
#include <kernel/Thread.h>

View File

@ -229,11 +229,11 @@ namespace Kernel
BAN::ErrorOr<void> UnixDomainSocket::add_packet(BAN::ConstByteSpan packet)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
while (m_packet_sizes.full() || m_packet_size_total + packet.size() > s_packet_buffer_size)
{
LockFreeGuard _(m_lock);
LockFreeGuard _(m_mutex);
TRY(Thread::current().block_or_eintr_indefinite(m_packet_semaphore));
}
@ -340,7 +340,7 @@ namespace Kernel
while (m_packet_size_total == 0)
{
LockFreeGuard _(m_lock);
LockFreeGuard _(m_mutex);
TRY(Thread::current().block_or_eintr_indefinite(m_packet_semaphore));
}

View File

@ -1,6 +1,6 @@
#include <kernel/CriticalScope.h>
#include <kernel/IDT.h>
#include <kernel/IO.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/PIC.h>
#include <string.h>
@ -79,7 +79,7 @@ namespace Kernel
void PIC::enable_irq(uint8_t irq)
{
CriticalScope _;
LockGuard _(m_lock);
ASSERT(irq < 16);
ASSERT(m_reserved_irqs & (1 << irq));
@ -99,7 +99,7 @@ namespace Kernel
dwarnln("PIC only supports 16 irqs");
return BAN::Error::from_errno(EFAULT);
}
CriticalScope _;
LockGuard _(m_lock);
if (m_reserved_irqs & (1 << irq))
{
dwarnln("irq {} is already reserved", irq);
@ -111,7 +111,7 @@ namespace Kernel
BAN::Optional<uint8_t> PIC::get_free_irq()
{
CriticalScope _;
LockGuard _(m_lock);
for (int irq = 0; irq < 16; irq++)
{
if (m_reserved_irqs & (1 << irq))

View File

@ -1,17 +1,15 @@
#include <BAN/ScopeGuard.h>
#include <BAN/StringView.h>
#include <kernel/CriticalScope.h>
#include <kernel/FS/DevFS/FileSystem.h>
#include <kernel/FS/ProcFS/FileSystem.h>
#include <kernel/FS/VirtualFileSystem.h>
#include <kernel/IDT.h>
#include <kernel/Input/KeyboardLayout.h>
#include <kernel/InterruptController.h>
#include <kernel/LockGuard.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/FileBackedRegion.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/MemoryBackedRegion.h>
#include <kernel/Memory/PageTableScope.h>
#include <kernel/Process.h>
#include <kernel/Scheduler.h>
#include <kernel/Storage/StorageDevice.h>
@ -67,7 +65,7 @@ namespace Kernel
pid_t pid;
{
CriticalScope _;
LockGuard _(s_process_lock);
pid = s_next_id;
if (sid == 0 && pgrp == 0)
{
@ -138,8 +136,6 @@ namespace Kernel
char** argv = nullptr;
{
PageTableScope _(process->page_table());
size_t needed_bytes = sizeof(char*) * 2 + path.size() + 1;
if (auto rem = needed_bytes % PAGE_SIZE)
needed_bytes += PAGE_SIZE - rem;
@ -196,7 +192,7 @@ namespace Kernel
void Process::add_thread(Thread* thread)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
MUST(m_threads.push_back(thread));
}
@ -217,7 +213,8 @@ namespace Kernel
while (m_exit_status.waiting > 0)
Scheduler::get().reschedule();
m_lock.lock();
// This mutex will no longer be freed
m_big_mutex.lock();
m_open_file_descriptors.close_all();
@ -238,7 +235,7 @@ namespace Kernel
m_threads.clear();
thread.setup_process_cleanup();
Scheduler::get().execute_current_thread();
Scheduler::get().reschedule_current_no_save();
ASSERT_NOT_REACHED();
}
@ -256,7 +253,7 @@ namespace Kernel
void Process::exit(int status, int signal)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
m_exit_status.exit_code = __WGENEXITCODE(status, signal);
for (auto* thread : m_threads)
if (thread != &Thread::current())
@ -277,7 +274,7 @@ namespace Kernel
meminfo.phys_pages = 0;
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
for (auto* thread : m_threads)
{
meminfo.virt_pages += thread->virtual_page_count();
@ -326,13 +323,13 @@ namespace Kernel
size_t Process::proc_cmdline(off_t offset, BAN::ByteSpan buffer) const
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
return read_from_vec_of_str(m_cmdline, offset, buffer);
}
size_t Process::proc_environ(off_t offset, BAN::ByteSpan buffer) const
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
return read_from_vec_of_str(m_environ, offset, buffer);
}
@ -345,7 +342,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_gettermios(::termios* termios)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(termios, sizeof(::termios)));
@ -364,7 +361,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_settermios(const ::termios* termios)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(termios, sizeof(::termios)));
@ -403,7 +400,7 @@ namespace Kernel
{
auto page_table = BAN::UniqPtr<PageTable>::adopt(TRY(PageTable::create_userspace()));
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
BAN::String working_directory;
TRY(working_directory.append(m_working_directory));
@ -443,7 +440,7 @@ namespace Kernel
{
// NOTE: We scope everything for automatic deletion
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_string_access(path));
auto loadable_elf = TRY(load_elf_for_exec(m_credentials, path, m_working_directory, page_table()));
@ -542,7 +539,7 @@ namespace Kernel
m_has_called_exec = true;
m_threads.front()->setup_exec();
Scheduler::get().execute_current_thread();
Scheduler::get().reschedule_current_no_save();
ASSERT_NOT_REACHED();
}
@ -579,7 +576,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_wait(pid_t pid, int* stat_loc, int options)
{
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(stat_loc, sizeof(int)));
}
@ -612,7 +609,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_nanosleep(const timespec* rqtp, timespec* rmtp)
{
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(rqtp, sizeof(timespec)));
if (rmtp)
TRY(validate_pointer_access(rmtp, sizeof(timespec)));
@ -654,7 +651,7 @@ namespace Kernel
return BAN::Error::from_errno(ENOTSUP);
}
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
auto absolute_path = TRY(absolute_path_of(path));
@ -683,7 +680,7 @@ namespace Kernel
{
ASSERT(&Process::current() == this);
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
if (Thread::current().stack().contains(address))
{
@ -711,13 +708,13 @@ namespace Kernel
BAN::ErrorOr<long> Process::open_inode(BAN::RefPtr<Inode> inode, int flags)
{
ASSERT(inode);
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
return TRY(m_open_file_descriptors.open(inode, flags));
}
BAN::ErrorOr<long> Process::open_file(BAN::StringView path, int flags, mode_t mode)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
BAN::String absolute_path = TRY(absolute_path_of(path));
@ -750,14 +747,14 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_open(const char* path, int flags, mode_t mode)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_string_access(path));
return open_file(path, flags, mode);
}
BAN::ErrorOr<long> Process::sys_openat(int fd, const char* path, int flags, mode_t mode)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_string_access(path));
@ -773,28 +770,28 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_close(int fd)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(m_open_file_descriptors.close(fd));
return 0;
}
BAN::ErrorOr<long> Process::sys_read(int fd, void* buffer, size_t count)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(buffer, count));
return TRY(m_open_file_descriptors.read(fd, BAN::ByteSpan((uint8_t*)buffer, count)));
}
BAN::ErrorOr<long> Process::sys_write(int fd, const void* buffer, size_t count)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(buffer, count));
return TRY(m_open_file_descriptors.write(fd, BAN::ByteSpan((uint8_t*)buffer, count)));
}
BAN::ErrorOr<long> Process::sys_create(const char* path, mode_t mode)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_string_access(path));
TRY(create_file_or_dir(path, mode));
return 0;
@ -802,7 +799,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_create_dir(const char* path, mode_t mode)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_string_access(path));
BAN::StringView path_sv(path);
if (!path_sv.empty() && path_sv.back() == '/')
@ -813,7 +810,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_unlink(const char* path)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_string_access(path));
auto absolute_path = TRY(absolute_path_of(path));
@ -846,7 +843,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_readlink(const char* path, char* buffer, size_t bufsize)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_string_access(path));
TRY(validate_pointer_access(buffer, bufsize));
@ -857,7 +854,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_readlinkat(int fd, const char* path, char* buffer, size_t bufsize)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_string_access(path));
TRY(validate_pointer_access(buffer, bufsize));
@ -874,7 +871,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_pread(int fd, void* buffer, size_t count, off_t offset)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(buffer, count));
auto inode = TRY(m_open_file_descriptors.inode_of(fd));
return TRY(inode->read(offset, { (uint8_t*)buffer, count }));
@ -885,7 +882,7 @@ namespace Kernel
if (mode & S_IFMASK)
return BAN::Error::from_errno(EINVAL);
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_string_access(path));
auto absolute_path = TRY(absolute_path_of(path));
@ -897,7 +894,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_chown(const char* path, uid_t uid, gid_t gid)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_string_access(path));
auto absolute_path = TRY(absolute_path_of(path));
@ -909,7 +906,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_socket(int domain, int type, int protocol)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
return TRY(m_open_file_descriptors.socket(domain, type, protocol));
}
@ -920,7 +917,7 @@ namespace Kernel
if (!address && address_len)
return BAN::Error::from_errno(EINVAL);
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
if (address)
{
TRY(validate_pointer_access(address_len, sizeof(*address_len)));
@ -936,7 +933,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_bind(int socket, const sockaddr* address, socklen_t address_len)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(address, address_len));
auto inode = TRY(m_open_file_descriptors.inode_of(socket));
@ -949,7 +946,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_connect(int socket, const sockaddr* address, socklen_t address_len)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(address, address_len));
auto inode = TRY(m_open_file_descriptors.inode_of(socket));
@ -962,7 +959,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_listen(int socket, int backlog)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
auto inode = TRY(m_open_file_descriptors.inode_of(socket));
if (!inode->mode().ifsock())
@ -974,7 +971,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_sendto(const sys_sendto_t* arguments)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(arguments, sizeof(sys_sendto_t)));
TRY(validate_pointer_access(arguments->message, arguments->length));
TRY(validate_pointer_access(arguments->dest_addr, arguments->dest_len));
@ -994,7 +991,7 @@ namespace Kernel
if (!arguments->address && arguments->address_len)
return BAN::Error::from_errno(EINVAL);
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(arguments, sizeof(sys_recvfrom_t)));
TRY(validate_pointer_access(arguments->buffer, arguments->length));
if (arguments->address)
@ -1013,14 +1010,14 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_ioctl(int fildes, int request, void* arg)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
auto inode = TRY(m_open_file_descriptors.inode_of(fildes));
return TRY(inode->ioctl(request, arg));
}
BAN::ErrorOr<long> Process::sys_pselect(sys_pselect_t* arguments)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(arguments, sizeof(sys_pselect_t)));
if (arguments->readfds)
@ -1089,7 +1086,7 @@ namespace Kernel
if (set_bits > 0)
break;
LockFreeGuard free(m_lock);
LockFreeGuard free(m_big_mutex);
SystemTimer::get().sleep(1);
}
@ -1115,7 +1112,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_pipe(int fildes[2])
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(fildes, sizeof(int) * 2));
TRY(m_open_file_descriptors.pipe(fildes));
return 0;
@ -1123,32 +1120,32 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_dup(int fildes)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
return TRY(m_open_file_descriptors.dup(fildes));
}
BAN::ErrorOr<long> Process::sys_dup2(int fildes, int fildes2)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
return TRY(m_open_file_descriptors.dup2(fildes, fildes2));
}
BAN::ErrorOr<long> Process::sys_fcntl(int fildes, int cmd, int extra)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
return TRY(m_open_file_descriptors.fcntl(fildes, cmd, extra));
}
BAN::ErrorOr<long> Process::sys_seek(int fd, off_t offset, int whence)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(m_open_file_descriptors.seek(fd, offset, whence));
return 0;
}
BAN::ErrorOr<long> Process::sys_tell(int fd)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
return TRY(m_open_file_descriptors.tell(fd));
}
@ -1156,7 +1153,7 @@ namespace Kernel
{
BAN::String absolute_source, absolute_target;
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(absolute_source.append(TRY(absolute_path_of(source))));
TRY(absolute_target.append(TRY(absolute_path_of(target))));
}
@ -1166,7 +1163,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_fstat(int fd, struct stat* buf)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(buf, sizeof(struct stat)));
TRY(m_open_file_descriptors.fstat(fd, buf));
return 0;
@ -1174,7 +1171,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_fstatat(int fd, const char* path, struct stat* buf, int flag)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(buf, sizeof(struct stat)));
TRY(m_open_file_descriptors.fstatat(fd, path, buf, flag));
return 0;
@ -1182,7 +1179,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_stat(const char* path, struct stat* buf, int flag)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(buf, sizeof(struct stat)));
TRY(m_open_file_descriptors.stat(TRY(absolute_path_of(path)), buf, flag));
return 0;
@ -1235,7 +1232,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_readdir(int fd, DirectoryEntryList* list, size_t list_size)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(list, list_size));
TRY(m_open_file_descriptors.read_dir_entries(fd, list, list_size));
return 0;
@ -1246,7 +1243,7 @@ namespace Kernel
BAN::String absolute_path;
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_string_access(path));
absolute_path = TRY(absolute_path_of(path));
}
@ -1255,7 +1252,7 @@ namespace Kernel
if (!file.inode->mode().ifdir())
return BAN::Error::from_errno(ENOTDIR);
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
m_working_directory = BAN::move(file.canonical_path);
return 0;
@ -1263,7 +1260,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_getpwd(char* buffer, size_t size)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(buffer, size));
@ -1279,7 +1276,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_mmap(const sys_mmap_t* args)
{
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(args, sizeof(sys_mmap_t)));
}
@ -1320,7 +1317,7 @@ namespace Kernel
region_type, page_flags
));
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(m_mapped_regions.push_back(BAN::move(region)));
return m_mapped_regions.back()->vaddr();
}
@ -1328,7 +1325,7 @@ namespace Kernel
if (args->addr != nullptr)
return BAN::Error::from_errno(ENOTSUP);
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
auto inode = TRY(m_open_file_descriptors.inode_of(args->fildes));
@ -1376,7 +1373,7 @@ namespace Kernel
if (vaddr % PAGE_SIZE != 0)
return BAN::Error::from_errno(EINVAL);
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
// FIXME: We should only map partial regions
for (size_t i = 0; i < m_mapped_regions.size(); i++)
@ -1395,7 +1392,7 @@ namespace Kernel
if (vaddr % PAGE_SIZE != 0)
return BAN::Error::from_errno(EINVAL);
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
for (auto& mapped_region : m_mapped_regions)
if (mapped_region->overlaps(vaddr, len))
@ -1406,7 +1403,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_tty_ctrl(int fildes, int command, int flags)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
auto inode = TRY(m_open_file_descriptors.inode_of(fildes));
if (!inode->is_tty())
@ -1419,7 +1416,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_termid(char* buffer)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_string_access(buffer));
@ -1440,7 +1437,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_clock_gettime(clockid_t clock_id, timespec* tp)
{
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(tp, sizeof(timespec)));
}
@ -1465,7 +1462,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_load_keymap(const char* path)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_string_access(path));
if (!m_credentials.is_superuser())
@ -1482,11 +1479,11 @@ namespace Kernel
return BAN::Error::from_errno(EINVAL);
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access((void*)handler, sizeof(handler)));
}
CriticalScope _;
LockGuard _(m_signal_lock);
m_signal_handlers[signal] = (vaddr_t)handler;
return 0;
}
@ -1500,7 +1497,7 @@ namespace Kernel
if (pid == Process::current().pid())
{
CriticalScope _;
LockGuard _(m_signal_lock);
Process::current().m_signal_pending_mask |= 1 << signal;
return 0;
}
@ -1514,7 +1511,7 @@ namespace Kernel
found = true;
if (signal)
{
CriticalScope _;
LockGuard _(m_signal_lock);
process.m_signal_pending_mask |= 1 << signal;
// FIXME: This is super hacky
Scheduler::get().unblock_thread(process.m_threads.front()->tid());
@ -1532,7 +1529,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_tcsetpgrp(int fd, pid_t pgrp)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
if (!m_controlling_terminal)
return BAN::Error::from_errno(ENOTTY);
@ -1568,7 +1565,7 @@ namespace Kernel
if (uid < 0 || uid >= 1'000'000'000)
return BAN::Error::from_errno(EINVAL);
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
// If the process has appropriate privileges, setuid() shall set the real user ID, effective user ID, and the saved
// set-user-ID of the calling process to uid.
@ -1598,7 +1595,7 @@ namespace Kernel
if (gid < 0 || gid >= 1'000'000'000)
return BAN::Error::from_errno(EINVAL);
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
// If the process has appropriate privileges, setgid() shall set the real group ID, effective group ID, and the saved
// set-group-ID of the calling process to gid.
@ -1626,7 +1623,7 @@ namespace Kernel
if (uid < 0 || uid >= 1'000'000'000)
return BAN::Error::from_errno(EINVAL);
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
// If uid is equal to the real user ID or the saved set-user-ID, or if the process has appropriate privileges, seteuid()
// shall set the effective user ID of the calling process to uid; the real user ID and saved set-user-ID shall remain unchanged.
@ -1645,7 +1642,7 @@ namespace Kernel
if (gid < 0 || gid >= 1'000'000'000)
return BAN::Error::from_errno(EINVAL);
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
// If gid is equal to the real group ID or the saved set-group-ID, or if the process has appropriate privileges, setegid()
// shall set the effective group ID of the calling process to gid; the real group ID, saved set-group-ID, and any
@ -1673,7 +1670,7 @@ namespace Kernel
// by the ruid and euid arguments. If ruid or euid is -1, the corresponding effective or real user ID of the current
// process shall be left unchanged.
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
// A process with appropriate privileges can set either ID to any value.
if (!m_credentials.is_superuser())
@ -1721,7 +1718,7 @@ namespace Kernel
// The real and effective group IDs may be set to different values in the same call.
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
// Only a process with appropriate privileges can set the real group ID and the effective group ID to any valid value.
if (!m_credentials.is_superuser())
@ -1754,7 +1751,7 @@ namespace Kernel
if (pgid < 0)
return BAN::Error::from_errno(EINVAL);
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
if (pid == 0)
pid = m_pid;
@ -1819,7 +1816,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_getpgid(pid_t pid)
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
if (pid == 0 || pid == m_pid)
return m_pgrp;
@ -1851,7 +1848,7 @@ namespace Kernel
BAN::ErrorOr<BAN::String> Process::absolute_path_of(BAN::StringView path) const
{
LockGuard _(m_lock);
LockGuard _(m_big_mutex);
if (path.empty() || path == "."sv)
return m_working_directory;

View File

@ -1,8 +1,8 @@
#include <kernel/Arch.h>
#include <kernel/Attributes.h>
#include <kernel/CriticalScope.h>
#include <kernel/GDT.h>
#include <kernel/InterruptController.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Process.h>
#include <kernel/Scheduler.h>
#include <kernel/Timer/Timer.h>
@ -32,6 +32,39 @@ namespace Kernel
asm volatile("movq %0, %%rsp" :: "r"(s_temp_stack + sizeof(s_temp_stack)));
}
void SchedulerLock::lock()
{
auto tid = Scheduler::current_tid();
if (tid != m_locker)
{
while (!m_locker.compare_exchange(-1, tid))
__builtin_ia32_pause();
ASSERT_EQ(m_lock_depth, 0);
}
m_lock_depth++;
}
void SchedulerLock::unlock()
{
ASSERT_EQ(m_locker.load(), Scheduler::current_tid());
ASSERT_GT(m_lock_depth, 0);
if (--m_lock_depth == 0)
m_locker = -1;
}
void SchedulerLock::unlock_all()
{
ASSERT_EQ(m_locker.load(), Scheduler::current_tid());
ASSERT_GT(m_lock_depth, 0);
m_lock_depth = 0;
m_locker = -1;
}
pid_t SchedulerLock::locker() const
{
return m_locker;
}
BAN::ErrorOr<void> Scheduler::initialize()
{
ASSERT(s_instance == nullptr);
@ -52,6 +85,7 @@ namespace Kernel
VERIFY_CLI();
ASSERT(!m_active_threads.empty());
m_current_thread = m_active_threads.begin();
m_lock.lock();
execute_current_thread();
ASSERT_NOT_REACHED();
}
@ -63,7 +97,7 @@ namespace Kernel
pid_t Scheduler::current_tid()
{
if (s_instance == nullptr)
if (s_instance == nullptr || s_instance->m_idle_thread == nullptr)
return 0;
return Scheduler::get().current_thread().tid();
}
@ -71,6 +105,7 @@ namespace Kernel
void Scheduler::timer_reschedule()
{
VERIFY_CLI();
m_lock.lock();
wake_threads();
@ -84,6 +119,7 @@ namespace Kernel
void Scheduler::reschedule()
{
DISABLE_INTERRUPTS();
m_lock.lock();
if (save_current_thread())
{
@ -98,20 +134,30 @@ namespace Kernel
void Scheduler::reschedule_if_idling()
{
VERIFY_CLI();
m_lock.lock();
if (m_active_threads.empty() || &current_thread() != m_idle_thread)
return;
return m_lock.unlock();
if (save_current_thread())
return;
m_current_thread = m_active_threads.begin();
m_current_thread = {};
advance_current_thread();
execute_current_thread();
ASSERT_NOT_REACHED();
}
void Scheduler::reschedule_current_no_save()
{
VERIFY_CLI();
m_lock.lock();
execute_current_thread();
}
void Scheduler::wake_threads()
{
VERIFY_CLI();
ASSERT_EQ(m_lock.locker(), current_tid());
uint64_t current_time = SystemTimer::get().ms_since_boot();
while (!m_sleeping_threads.empty() && m_sleeping_threads.front().wake_time <= current_time)
@ -126,7 +172,7 @@ namespace Kernel
BAN::ErrorOr<void> Scheduler::add_thread(Thread* thread)
{
CriticalScope _;
LockGuard _(m_lock);
TRY(m_active_threads.emplace_back(thread));
return {};
}
@ -134,19 +180,20 @@ namespace Kernel
void Scheduler::advance_current_thread()
{
VERIFY_CLI();
ASSERT_EQ(m_lock.locker(), current_tid());
if (m_active_threads.empty())
{
m_current_thread = {};
return;
}
if (!m_current_thread || ++m_current_thread == m_active_threads.end())
else if (!m_current_thread || ++m_current_thread == m_active_threads.end())
m_current_thread = m_active_threads.begin();
m_lock.m_locker = current_tid();
}
void Scheduler::remove_and_advance_current_thread()
{
VERIFY_CLI();
ASSERT_EQ(m_lock.locker(), current_tid());
ASSERT(m_current_thread);
@ -161,6 +208,8 @@ namespace Kernel
advance_current_thread();
m_active_threads.remove(temp);
}
m_lock.m_locker = current_tid();
}
// NOTE: this is declared always inline, so we don't corrupt the stack
@ -168,6 +217,7 @@ namespace Kernel
ALWAYS_INLINE bool Scheduler::save_current_thread()
{
VERIFY_CLI();
ASSERT_EQ(m_lock.locker(), current_tid());
uintptr_t rsp, rip;
push_callee_saved();
@ -190,6 +240,7 @@ namespace Kernel
void Scheduler::delete_current_process_and_thread()
{
DISABLE_INTERRUPTS();
m_lock.lock();
load_temp_stack();
PageTable::kernel().load();
@ -210,6 +261,7 @@ namespace Kernel
void Scheduler::execute_current_thread()
{
VERIFY_CLI();
ASSERT_EQ(m_lock.locker(), current_tid());
load_temp_stack();
PageTable::kernel().load();
@ -220,6 +272,7 @@ namespace Kernel
NEVER_INLINE void Scheduler::_execute_current_thread()
{
VERIFY_CLI();
ASSERT_EQ(m_lock.locker(), current_tid());
#if SCHEDULER_VERIFY_STACK
vaddr_t rsp;
@ -266,10 +319,12 @@ namespace Kernel
{
case Thread::State::NotStarted:
current->set_started();
m_lock.unlock_all();
start_thread(current->rsp(), current->rip());
case Thread::State::Executing:
while (current->can_add_signal_to_execute())
current->handle_signal();
m_lock.unlock_all();
continue_thread(current->rsp(), current->rip());
case Thread::State::Terminated:
ASSERT_NOT_REACHED();
@ -281,6 +336,7 @@ namespace Kernel
void Scheduler::set_current_thread_sleeping_impl(uint64_t wake_time)
{
VERIFY_CLI();
ASSERT_EQ(m_lock.locker(), current_tid());
if (save_current_thread())
{
@ -301,6 +357,7 @@ namespace Kernel
);
m_current_thread = {};
m_lock.m_locker = current_tid();
advance_current_thread();
execute_current_thread();
@ -311,6 +368,7 @@ namespace Kernel
{
VERIFY_STI();
DISABLE_INTERRUPTS();
m_lock.lock();
ASSERT(m_current_thread);
@ -322,6 +380,7 @@ namespace Kernel
{
VERIFY_STI();
DISABLE_INTERRUPTS();
m_lock.lock();
ASSERT(m_current_thread);
@ -331,7 +390,7 @@ namespace Kernel
void Scheduler::unblock_threads(Semaphore* semaphore)
{
CriticalScope critical;
LockGuard _(m_lock);
for (auto it = m_sleeping_threads.begin(); it != m_sleeping_threads.end();)
{
@ -352,7 +411,7 @@ namespace Kernel
void Scheduler::unblock_thread(pid_t tid)
{
CriticalScope _;
LockGuard _(m_lock);
for (auto it = m_sleeping_threads.begin(); it != m_sleeping_threads.end(); it++)
{

View File

@ -1,86 +0,0 @@
#include <kernel/Scheduler.h>
#include <kernel/SpinLock.h>
namespace Kernel
{
void SpinLock::lock()
{
pid_t tid = Scheduler::current_tid();
ASSERT(tid != m_locker);
while (!m_locker.compare_exchange(-1, tid))
Scheduler::get().reschedule();
}
void SpinLock::unlock()
{
ASSERT(m_locker == Scheduler::current_tid());
m_locker = -1;
}
bool SpinLock::is_locked() const
{
return m_locker != -1;
}
void RecursiveSpinLock::lock()
{
pid_t tid = Scheduler::current_tid();
if (m_locker != tid)
{
while (!m_locker.compare_exchange(-1, tid))
Scheduler::get().reschedule();
ASSERT(m_lock_depth == 0);
}
m_lock_depth++;
}
void RecursiveSpinLock::unlock()
{
ASSERT(m_lock_depth > 0);
ASSERT(m_locker == Scheduler::current_tid());
if (--m_lock_depth == 0)
m_locker = -1;
}
bool RecursiveSpinLock::is_locked() const
{
return m_locker != -1;
}
void RecursivePrioritySpinLock::lock()
{
pid_t tid = Scheduler::current_tid();
bool has_priority = !Thread::current().is_userspace();
if (has_priority)
m_queue_length++;
if (m_locker != tid)
{
while (!((has_priority || m_queue_length == 0) && m_locker.compare_exchange(-1, tid)))
Scheduler::get().reschedule();
ASSERT(m_lock_depth == 0);
}
m_lock_depth++;
}
void RecursivePrioritySpinLock::unlock()
{
ASSERT(m_lock_depth > 0);
ASSERT(m_locker == Scheduler::current_tid());
bool has_priority = !Thread::current().is_userspace();
if (has_priority)
m_queue_length--;
if (--m_lock_depth == 0)
m_locker = -1;
}
bool RecursivePrioritySpinLock::is_locked() const
{
return m_locker != -1;
}
}

View File

@ -2,7 +2,7 @@
#include <kernel/IDT.h>
#include <kernel/InterruptController.h>
#include <kernel/IO.h>
#include <kernel/LockGuard.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Storage/ATA/ATABus.h>
#include <kernel/Storage/ATA/ATADefinitions.h>
#include <kernel/Storage/ATA/ATADevice.h>
@ -261,7 +261,7 @@ namespace Kernel
if (lba + sector_count > device.sector_count())
return BAN::Error::from_error_code(ErrorCode::Storage_Boundaries);
LockGuard _(m_lock);
LockGuard _(m_mutex);
if (lba < (1 << 28))
{
@ -298,7 +298,7 @@ namespace Kernel
if (lba + sector_count > device.sector_count())
return BAN::Error::from_error_code(ErrorCode::Storage_Boundaries);
LockGuard _(m_lock);
LockGuard _(m_mutex);
if (lba < (1 << 28))
{

View File

@ -1,5 +1,4 @@
#include <kernel/CriticalScope.h>
#include <kernel/LockGuard.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/PageTable.h>
#include <kernel/Storage/DiskCache.h>
@ -32,10 +31,6 @@ namespace Kernel
uint64_t page_cache_offset = sector % sectors_per_page;
uint64_t page_cache_start = sector - page_cache_offset;
PageTable& page_table = PageTable::current();
LockGuard page_table_locker(page_table);
ASSERT(page_table.is_page_free(0));
for (auto& cache : m_cache)
{
if (cache.first_sector < page_cache_start)
@ -46,10 +41,9 @@ namespace Kernel
if (!(cache.sector_mask & (1 << page_cache_offset)))
continue;
CriticalScope _;
PageTable::map_fast_page(cache.paddr);
PageTable::with_fast_page(cache.paddr, [&] {
memcpy(buffer.data(), PageTable::fast_page_as_ptr(page_cache_offset * m_sector_size), m_sector_size);
PageTable::unmap_fast_page();
});
return true;
}
@ -64,10 +58,6 @@ namespace Kernel
uint64_t page_cache_offset = sector % sectors_per_page;
uint64_t page_cache_start = sector - page_cache_offset;
PageTable& page_table = PageTable::current();
LockGuard page_table_locker(page_table);
ASSERT(page_table.is_page_free(0));
size_t index = 0;
// Search the cache if the have this sector in memory
@ -80,12 +70,9 @@ namespace Kernel
if (cache.first_sector > page_cache_start)
break;
{
CriticalScope _;
PageTable::map_fast_page(cache.paddr);
PageTable::with_fast_page(cache.paddr, [&] {
memcpy(PageTable::fast_page_as_ptr(page_cache_offset * m_sector_size), buffer.data(), m_sector_size);
PageTable::unmap_fast_page();
}
});
cache.sector_mask |= 1 << page_cache_offset;
if (dirty)
@ -111,12 +98,9 @@ namespace Kernel
return ret.error();
}
{
CriticalScope _;
PageTable::map_fast_page(cache.paddr);
PageTable::with_fast_page(cache.paddr, [&] {
memcpy(PageTable::fast_page_as_ptr(page_cache_offset * m_sector_size), buffer.data(), m_sector_size);
PageTable::unmap_fast_page();
}
});
return {};
}
@ -128,12 +112,9 @@ namespace Kernel
if (cache.dirty_mask == 0)
continue;
{
CriticalScope _;
PageTable::map_fast_page(cache.paddr);
PageTable::with_fast_page(cache.paddr, [&] {
memcpy(m_sync_cache.data(), PageTable::fast_page_as_ptr(), PAGE_SIZE);
PageTable::unmap_fast_page();
}
});
uint8_t sector_start = 0;
uint8_t sector_count = 0;

View File

@ -1,4 +1,4 @@
#include <kernel/LockGuard.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Scheduler.h>
#include <kernel/Storage/NVMe/Queue.h>
#include <kernel/Timer/Timer.h>
@ -44,7 +44,7 @@ namespace Kernel
uint16_t NVMeQueue::submit_command(NVMe::SubmissionQueueEntry& sqe)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
ASSERT(m_done == false);
m_status = 0;

View File

@ -4,7 +4,7 @@
#include <BAN/UTF8.h>
#include <kernel/FS/DevFS/FileSystem.h>
#include <kernel/FS/VirtualFileSystem.h>
#include <kernel/LockGuard.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/PCI.h>
#include <kernel/Storage/StorageDevice.h>
#include <kernel/Thread.h>
@ -150,7 +150,7 @@ namespace Kernel
BAN::Vector<uint8_t> lba1;
TRY(lba1.resize(sector_size()));
TRY(read_sectors(1, 1, lba1.span()));
TRY(read_sectors(1, 1, BAN::ByteSpan { lba1.span() }));
const GPTHeader& header = *(const GPTHeader*)lba1.data();
if (!is_valid_gpt_header(header, sector_size()))
@ -165,7 +165,7 @@ namespace Kernel
BAN::Vector<uint8_t> entry_array;
TRY(entry_array.resize(size));
TRY(read_sectors(header.partition_entry_lba, size / sector_size(), entry_array.span()));
TRY(read_sectors(header.partition_entry_lba, size / sector_size(), BAN::ByteSpan { entry_array.span() }));
if (!is_valid_gpt_crc32(header, lba1, entry_array))
return BAN::Error::from_error_code(ErrorCode::Storage_GPTHeader);
@ -207,14 +207,14 @@ namespace Kernel
void StorageDevice::add_disk_cache()
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
ASSERT(!m_disk_cache.has_value());
m_disk_cache.emplace(sector_size(), *this);
}
BAN::ErrorOr<void> StorageDevice::sync_disk_cache()
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
if (m_disk_cache.has_value())
TRY(m_disk_cache->sync());
return {};
@ -224,15 +224,12 @@ namespace Kernel
{
ASSERT(buffer.size() >= sector_count * sector_size());
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
if (!m_disk_cache.has_value())
return read_sectors_impl(lba, sector_count, buffer);
}
for (uint64_t offset = 0; offset < sector_count; offset++)
{
LockGuard _(m_lock);
auto sector_buffer = buffer.slice(offset * sector_size(), sector_size());
if (m_disk_cache->read_from_cache(lba + offset, sector_buffer))
continue;
@ -247,15 +244,12 @@ namespace Kernel
{
ASSERT(buffer.size() >= sector_count * sector_size());
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
if (!m_disk_cache.has_value())
return write_sectors_impl(lba, sector_count, buffer);
}
for (uint8_t offset = 0; offset < sector_count; offset++)
{
LockGuard _(m_lock);
auto sector_buffer = buffer.slice(offset * sector_size(), sector_size());
if (m_disk_cache->write_to_cache(lba + offset, sector_buffer, true).is_error())
TRY(write_sectors_impl(lba + offset, 1, sector_buffer));

View File

@ -1,5 +1,4 @@
#include <BAN/Array.h>
#include <kernel/CriticalScope.h>
#include <kernel/Device/DeviceNumbers.h>
#include <kernel/FS/DevFS/FileSystem.h>
#include <kernel/IDT.h>
@ -235,7 +234,7 @@ namespace Kernel
uint8_t buffer[128];
{
CriticalScope _;
LockGuard _(m_lock);
if (m_input.empty())
return;
uint8_t* ptr = buffer;

View File

@ -4,7 +4,7 @@
#include <kernel/Debug.h>
#include <kernel/FS/DevFS/FileSystem.h>
#include <kernel/FS/VirtualFileSystem.h>
#include <kernel/LockGuard.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Process.h>
#include <kernel/Terminal/TTY.h>
@ -122,7 +122,7 @@ namespace Kernel
void TTY::on_key_event(Input::KeyEvent event)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
if (event.released())
return;
@ -205,7 +205,7 @@ namespace Kernel
if (ch == 0)
return;
LockGuard _(m_lock);
LockGuard _(m_mutex);
// ^C
if (ch == '\x03')
@ -310,24 +310,18 @@ namespace Kernel
void TTY::putchar(uint8_t ch)
{
LockGuard _(m_mutex);
if (m_tty_ctrl.draw_graphics)
putchar_impl(ch);
}
BAN::ErrorOr<size_t> TTY::read_impl(off_t, BAN::ByteSpan buffer)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
while (!m_output.flush)
{
// FIXME: this is very hacky way to unlock lock temporarily
uint32_t depth = m_lock.lock_depth();
for (uint32_t i = 0; i < depth; i++)
m_lock.unlock();
auto eintr = Thread::current().block_or_eintr_indefinite(m_output.semaphore);
for (uint32_t i = 0; i < depth; i++)
m_lock.lock();
if (eintr.is_error())
return eintr.release_error();
LockFreeGuard free(m_mutex);
TRY(Thread::current().block_or_eintr_indefinite(m_output.semaphore));
}
if (m_output.bytes == 0)
@ -352,7 +346,7 @@ namespace Kernel
BAN::ErrorOr<size_t> TTY::write_impl(off_t, BAN::ConstByteSpan buffer)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
for (size_t i = 0; i < buffer.size(); i++)
putchar(buffer[i]);
return buffer.size();
@ -361,7 +355,7 @@ namespace Kernel
void TTY::putchar_current(uint8_t ch)
{
ASSERT(s_tty);
LockGuard _(s_tty->m_lock);
LockGuard _(s_tty->m_mutex);
s_tty->putchar(ch);
}

View File

@ -4,7 +4,7 @@
#include <kernel/Debug.h>
#include <kernel/Device/DeviceNumbers.h>
#include <kernel/FS/DevFS/FileSystem.h>
#include <kernel/LockGuard.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Process.h>
#include <kernel/Terminal/VirtualTTY.h>
@ -57,6 +57,7 @@ namespace Kernel
void VirtualTTY::clear()
{
LockGuard _(m_write_lock);
for (uint32_t i = 0; i < m_width * m_height; i++)
m_buffer[i] = { .foreground = m_foreground, .background = m_background, .codepoint = ' ' };
m_terminal_driver->clear(m_background);
@ -64,6 +65,7 @@ namespace Kernel
void VirtualTTY::set_font(const Kernel::Font& font)
{
LockGuard _(m_write_lock);
m_terminal_driver->set_font(font);
uint32_t new_width = m_terminal_driver->width();
@ -306,7 +308,7 @@ namespace Kernel
void VirtualTTY::putchar_impl(uint8_t ch)
{
ASSERT(m_lock.is_locked());
LockGuard _(m_write_lock);
uint32_t codepoint = ch;

View File

@ -3,8 +3,8 @@
#include <kernel/GDT.h>
#include <kernel/InterruptController.h>
#include <kernel/InterruptStack.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/kmalloc.h>
#include <kernel/Memory/PageTableScope.h>
#include <kernel/Process.h>
#include <kernel/Scheduler.h>
#include <kernel/Thread.h>
@ -30,10 +30,10 @@ namespace Kernel
void Thread::terminate()
{
CriticalScope _;
LockGuard _(m_lock);
m_state = Thread::State::Terminated;
if (this == &Thread::current())
Scheduler::get().execute_current_thread();
Scheduler::get().reschedule_current_no_save();
}
static pid_t s_next_tid = 1;
@ -131,6 +131,8 @@ namespace Kernel
BAN::ErrorOr<Thread*> Thread::clone(Process* new_process, uintptr_t rsp, uintptr_t rip)
{
LockGuard _(m_lock);
ASSERT(m_is_userspace);
ASSERT(m_state == State::Executing);
@ -156,6 +158,8 @@ namespace Kernel
void Thread::setup_exec()
{
LockGuard _(m_lock);
ASSERT(is_userspace());
m_state = State::NotStarted;
static entry_t entry_trampoline(
@ -172,18 +176,24 @@ namespace Kernel
// Signal mask is inherited
// Setup stack for returning
{
// FIXME: don't use PageTableScope
PageTableScope _(process().page_table());
write_to_stack(m_rsp, nullptr); // alignment
write_to_stack(m_rsp, this);
write_to_stack(m_rsp, &Thread::on_exit);
write_to_stack(m_rsp, nullptr);
}
uintptr_t offset = m_rsp % PAGE_SIZE;
if (offset == 0)
offset = PAGE_SIZE;
ASSERT_GTE(offset, 4 * sizeof(uintptr_t));
PageTable::with_fast_page(process().page_table().physical_address_of((m_rsp - 4 * sizeof(uintptr_t)) & PAGE_ADDR_MASK), [&] {
uintptr_t rsp = PageTable::fast_page() + offset;
write_to_stack(rsp, nullptr); // alignment
write_to_stack(rsp, this);
write_to_stack(rsp, &Thread::on_exit);
write_to_stack(rsp, nullptr);
m_rsp -= 4 * sizeof(uintptr_t);
});
}
void Thread::setup_process_cleanup()
{
LockGuard _(m_lock);
m_state = State::NotStarted;
static entry_t entry(
[](void* process)
@ -199,19 +209,23 @@ namespace Kernel
m_signal_pending_mask = 0;
m_signal_block_mask = ~0ull;
// Setup stack for returning
{
// FIXME: don't use PageTableScope
PageTableScope _(process().page_table());
write_to_stack(m_rsp, nullptr); // alignment
write_to_stack(m_rsp, this);
write_to_stack(m_rsp, &Thread::on_exit);
write_to_stack(m_rsp, m_process);
}
uintptr_t offset = m_rsp % PAGE_SIZE;
if (offset == 0)
offset = PAGE_SIZE;
ASSERT_GTE(offset, 4 * sizeof(uintptr_t));
PageTable::with_fast_page(process().page_table().physical_address_of((m_rsp - 4 * sizeof(uintptr_t)) & PAGE_ADDR_MASK), [&] {
uintptr_t rsp = PageTable::fast_page() + offset;
write_to_stack(rsp, nullptr); // alignment
write_to_stack(rsp, this);
write_to_stack(rsp, &Thread::on_exit);
write_to_stack(rsp, m_process);
m_rsp -= 4 * sizeof(uintptr_t);
});
}
bool Thread::is_interrupted_by_signal()
{
LockGuard _(m_lock);
while (can_add_signal_to_execute())
handle_signal();
return will_execute_signal();
@ -219,6 +233,7 @@ namespace Kernel
bool Thread::can_add_signal_to_execute() const
{
LockGuard _(m_lock);
if (!is_userspace() || m_state != State::Executing)
return false;
auto& interrupt_stack = *reinterpret_cast<InterruptStack*>(interrupt_stack_base() + interrupt_stack_size() - sizeof(InterruptStack));
@ -230,6 +245,7 @@ namespace Kernel
bool Thread::will_execute_signal() const
{
LockGuard _(m_lock);
if (!is_userspace() || m_state != State::Executing)
return false;
auto& interrupt_stack = *reinterpret_cast<InterruptStack*>(interrupt_stack_base() + interrupt_stack_size() - sizeof(InterruptStack));
@ -238,6 +254,7 @@ namespace Kernel
void Thread::handle_signal(int signal)
{
LockGuard _(m_lock);
ASSERT(!interrupts_enabled());
ASSERT(&Thread::current() == this);
ASSERT(is_userspace());
@ -331,6 +348,7 @@ namespace Kernel
bool Thread::add_signal(int signal)
{
LockGuard _(m_lock);
ASSERT(!interrupts_enabled());
uint64_t mask = 1ull << signal;
if (!(m_signal_block_mask & mask))
@ -373,6 +391,7 @@ namespace Kernel
void Thread::validate_stack() const
{
LockGuard _(m_lock);
if (stack_base() <= m_rsp && m_rsp <= stack_base() + stack_size())
return;
if (interrupt_stack_base() <= m_rsp && m_rsp <= interrupt_stack_base() + interrupt_stack_size())

View File

@ -1,4 +1,5 @@
#include <BAN/ScopeGuard.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/ACPI.h>
#include <kernel/IDT.h>
#include <kernel/InterruptController.h>
@ -244,7 +245,7 @@ namespace Kernel
if (m_is_64bit)
return regs.main_counter.full;
CriticalScope _;
LockGuard _(m_lock);
uint32_t current_low = regs.main_counter.low;
uint32_t wraps = m_32bit_wraps;
if (current_low < (uint32_t)m_last_ticks)
@ -256,8 +257,10 @@ namespace Kernel
{
auto& regs = registers();
uint64_t current_ticks;
{
LockGuard _(m_lock);
uint64_t current_ticks;
if (m_is_64bit)
current_ticks = regs.main_counter.full;
else
@ -267,8 +270,8 @@ namespace Kernel
m_32bit_wraps++;
current_ticks = ((uint64_t)m_32bit_wraps << 32) | current_low;
}
m_last_ticks = current_ticks;
}
Scheduler::get().timer_reschedule();
}