Compare commits

..

11 Commits

42 changed files with 312 additions and 275 deletions

View File

@ -1,5 +1,4 @@
#include <BAN/ScopeGuard.h>
#include <kernel/CriticalScope.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Lock/LockGuard.h>
#include <LibELF/LoadableELF.h>

View File

@ -17,7 +17,7 @@ extern uint8_t g_userspace_end[];
namespace Kernel
{
SpinLock PageTable::s_fast_page_lock;
RecursiveSpinLock PageTable::s_fast_page_lock;
static PageTable* s_kernel = nullptr;
static PageTable* s_current = nullptr;
@ -209,7 +209,8 @@ namespace Kernel
{
ASSERT(s_kernel);
ASSERT_NEQ(paddr, 0);
ASSERT(!interrupts_enabled());
SpinLockGuard _(s_fast_page_lock);
constexpr vaddr_t uc_vaddr = uncanonicalize(fast_page());
constexpr uint64_t pml4e = (uc_vaddr >> 39) & 0x1FF;
@ -231,7 +232,8 @@ namespace Kernel
void PageTable::unmap_fast_page()
{
ASSERT(s_kernel);
ASSERT(!interrupts_enabled());
SpinLockGuard _(s_fast_page_lock);
constexpr vaddr_t uc_vaddr = uncanonicalize(fast_page());
constexpr uint64_t pml4e = (uc_vaddr >> 39) & 0x1FF;

View File

@ -2,6 +2,7 @@
#include <BAN/Vector.h>
#include <kernel/InterruptController.h>
#include <kernel/Lock/SpinLock.h>
#include <kernel/Memory/Types.h>
namespace Kernel
@ -52,6 +53,7 @@ namespace Kernel
};
private:
SpinLock m_lock;
BAN::Vector<Processor> m_processors;
Kernel::paddr_t m_local_apic_paddr = 0;
Kernel::vaddr_t m_local_apic_vaddr = 0;

View File

@ -1,30 +0,0 @@
#pragma once
#include <BAN/NoCopyMove.h>
#include <stddef.h>
namespace Kernel
{
class CriticalScope
{
BAN_NON_COPYABLE(CriticalScope);
BAN_NON_MOVABLE(CriticalScope);
public:
CriticalScope()
{
asm volatile("pushf; cli; pop %0" : "=r"(m_flags) :: "memory");
}
~CriticalScope()
{
asm volatile("push %0; popf" :: "rm"(m_flags) : "memory", "cc");
}
private:
size_t m_flags;
};
}

View File

@ -3,6 +3,7 @@
#include <BAN/Array.h>
#include <BAN/UniqPtr.h>
#include <kernel/Input/KeyEvent.h>
#include <kernel/Lock/SpinLock.h>
namespace Kernel::Input
{
@ -23,6 +24,7 @@ namespace Kernel::Input
BAN::Array<Key, 0xFF> m_keycode_to_key_normal;
BAN::Array<Key, 0xFF> m_keycode_to_key_shift;
BAN::Array<Key, 0xFF> m_keycode_to_key_altgr;
SpinLock m_lock;
friend class BAN::UniqPtr<KeyboardLayout>;
};

View File

@ -64,6 +64,7 @@ namespace Kernel::Input
BAN::CircularQueue<Command, 128> m_command_queue;
uint64_t m_command_send_time { 0 };
SpinLock m_command_lock;
};
}

View File

@ -41,6 +41,7 @@ namespace Kernel::Input
uint16_t m_modifiers { 0 };
BAN::CircularQueue<KeyEvent, 50> m_event_queue;
SpinLock m_event_lock;
PS2Keymap m_keymap;

View File

@ -37,6 +37,7 @@ namespace Kernel::Input
uint8_t m_button_mask { 0x00 };
BAN::CircularQueue<MouseEvent, 128> m_event_queue;
SpinLock m_event_lock;
Semaphore m_semaphore;

View File

@ -5,9 +5,6 @@
#include <stdint.h>
#define DISABLE_INTERRUPTS() asm volatile("cli")
#define ENABLE_INTERRUPTS() asm volatile("sti")
namespace Kernel
{
@ -51,11 +48,4 @@ namespace Kernel
bool m_using_apic { false };
};
inline bool interrupts_enabled()
{
uintptr_t flags;
asm volatile("pushf; pop %0" : "=r"(flags) :: "memory");
return flags & (1 << 9);
}
}

View File

@ -0,0 +1,37 @@
#pragma once
#include <kernel/Arch.h>
namespace Kernel
{
enum class InterruptState
{
Disabled,
Enabled,
};
#if ARCH(x86_64) || ARCH(i386)
inline void set_interrupt_state(InterruptState state)
{
if (state == InterruptState::Enabled)
asm volatile("sti");
else
asm volatile("cli");
}
inline InterruptState get_interrupt_state()
{
uintptr_t flags;
asm volatile("pushf; pop %0" : "=rm"(flags));
if (flags & (1 << 9))
return InterruptState::Enabled;
return InterruptState::Disabled;
}
#else
#error "Unknown architecure"
#endif
}

View File

@ -2,14 +2,13 @@
#include <BAN/Atomic.h>
#include <BAN/NoCopyMove.h>
#include <kernel/Interrupts.h>
#include <sys/types.h>
namespace Kernel
{
using InterruptState = bool;
class SpinLock
{
BAN_NON_COPYABLE(SpinLock);
@ -41,6 +40,37 @@ namespace Kernel
uint32_t m_lock_depth { 0 };
};
class SpinLockUnsafe
{
BAN_NON_COPYABLE(SpinLockUnsafe);
BAN_NON_MOVABLE(SpinLockUnsafe);
public:
SpinLockUnsafe() = default;
InterruptState lock()
{
auto state = get_interrupt_state();
set_interrupt_state(InterruptState::Disabled);
while (!m_locked.compare_exchange(false, true))
__builtin_ia32_pause();
return state;
}
void unlock(InterruptState state)
{
m_locked.store(false);
set_interrupt_state(state);
}
bool is_locked() const { return m_locked; }
private:
BAN::Atomic<bool> m_locked;
};
template<typename Lock>
class SpinLockGuard
{

View File

@ -2,7 +2,6 @@
#include <BAN/Errors.h>
#include <BAN/Traits.h>
#include <kernel/CriticalScope.h>
#include <kernel/Lock/SpinLock.h>
#include <kernel/Memory/Types.h>
@ -130,7 +129,7 @@ namespace Kernel
private:
paddr_t m_highest_paging_struct { 0 };
mutable RecursiveSpinLock m_lock;
static SpinLock s_fast_page_lock;
static RecursiveSpinLock s_fast_page_lock;
};
static constexpr size_t range_page_count(vaddr_t start, size_t bytes)

View File

@ -52,6 +52,7 @@ namespace Kernel
private:
SpinLock m_table_lock;
SpinLock m_pending_lock;
BAN::HashMap<BAN::IPv4Address, BAN::MACAddress> m_arp_table;

View File

@ -71,6 +71,7 @@ namespace Kernel
BAN::UniqPtr<DMARegion> m_tx_buffer_region;
BAN::UniqPtr<DMARegion> m_rx_descriptor_region;
BAN::UniqPtr<DMARegion> m_tx_descriptor_region;
SpinLock m_lock;
BAN::MACAddress m_mac_address {};
bool m_link_up { false };

View File

@ -76,6 +76,7 @@ namespace Kernel
BAN::UniqPtr<VirtualRange> m_pending_packet_buffer;
BAN::CircularQueue<PendingIPv4Packet, 128> m_pending_packets;
Semaphore m_pending_semaphore;
SpinLock m_pending_lock;
size_t m_pending_total_size { 0 };
BAN::HashMap<int, BAN::WeakPtr<NetworkSocket>> m_bound_sockets;

View File

@ -1,6 +1,7 @@
#pragma once
#include <kernel/InterruptController.h>
#include <kernel/Lock/SpinLock.h>
namespace Kernel
{
@ -20,8 +21,12 @@ namespace Kernel
private:
static PIC* create();
friend class InterruptController;
private:
SpinLock m_lock;
uint16_t m_reserved_irqs { 0 };
friend class InterruptController;
};
}

View File

@ -224,8 +224,8 @@ namespace Kernel
BAN::String m_working_directory;
BAN::Vector<Thread*> m_threads;
vaddr_t m_signal_handlers[_SIGMAX + 1] { };
uint64_t m_signal_pending_mask { 0 };
BAN::Atomic<vaddr_t> m_signal_handlers[_SIGMAX + 1] { };
BAN::Atomic<uint64_t> m_signal_pending_mask { 0 };
BAN::Vector<BAN::String> m_cmdline;
BAN::Vector<BAN::String> m_environ;

View File

@ -30,9 +30,12 @@ namespace Kernel
static pid_t current_tid();
[[noreturn]] void execute_current_thread();
[[noreturn]] void _execute_current_thread();
[[noreturn]] void execute_current_thread_locked();
[[noreturn]] void delete_current_process_and_thread();
// This is no return if called on current thread
void terminate_thread(Thread*);
private:
Scheduler() = default;
@ -43,6 +46,8 @@ namespace Kernel
void remove_and_advance_current_thread();
void advance_current_thread();
[[noreturn]] void execute_current_thread_stack_loaded();
BAN::ErrorOr<void> add_thread(Thread*);
private:
@ -57,6 +62,8 @@ namespace Kernel
Semaphore* semaphore;
};
SpinLockUnsafe m_lock;
Thread* m_idle_thread { nullptr };
BAN::LinkedList<SchedulerThread> m_active_threads;
BAN::LinkedList<SchedulerThread> m_sleeping_threads;

View File

@ -62,6 +62,7 @@ namespace Kernel
BAN::String m_name;
Serial m_serial;
BAN::CircularQueue<uint8_t, 128> m_input;
SpinLock m_input_lock;
public:
virtual dev_t rdev() const override { return m_rdev; }

View File

@ -65,8 +65,6 @@ namespace Kernel
uintptr_t rip() const { return m_rip; }
void set_started() { ASSERT(m_state == State::NotStarted); m_state = State::Executing; }
// Thread will no longer execute. If called on current thread, does not return
void terminate();
State state() const { return m_state; }
vaddr_t stack_base() const { return m_stack->vaddr(); }
@ -118,6 +116,7 @@ namespace Kernel
uint64_t m_signal_pending_mask { 0 };
uint64_t m_signal_block_mask { 0 };
SpinLock m_signal_lock;
static_assert(_SIGMAX < 64);
#if __enable_sse

View File

@ -1,6 +1,7 @@
#pragma once
#include <kernel/InterruptController.h>
#include <kernel/Lock/SpinLock.h>
#include <kernel/Timer/Timer.h>
namespace Kernel
@ -30,6 +31,8 @@ namespace Kernel
uint64_t read_main_counter() const;
private:
mutable SpinLock m_lock;
bool m_is_64bit { false };
uint64_t m_last_ticks { 0 };

View File

@ -223,7 +223,7 @@ namespace Kernel
void APIC::enable_irq(uint8_t irq)
{
CriticalScope _;
SpinLockGuard _(m_lock);
uint32_t gsi = m_irq_overrides[irq];
@ -268,7 +268,7 @@ namespace Kernel
BAN::ErrorOr<void> APIC::reserve_irq(uint8_t irq)
{
CriticalScope _;
SpinLockGuard _(m_lock);
uint32_t gsi = m_irq_overrides[irq];
@ -301,7 +301,7 @@ namespace Kernel
BAN::Optional<uint8_t> APIC::get_free_irq()
{
CriticalScope _;
SpinLockGuard _(m_lock);
for (int irq = 0; irq <= 0xFF; irq++)
{
uint32_t gsi = m_irq_overrides[irq];

View File

@ -1,5 +1,4 @@
#include <BAN/HashMap.h>
#include <kernel/CriticalScope.h>
#include <kernel/FS/VirtualFileSystem.h>
#include <kernel/Input/KeyboardLayout.h>
@ -74,6 +73,7 @@ namespace Kernel::Input
Key KeyboardLayout::get_key_from_event(KeyEvent event)
{
SpinLockGuard _(m_lock);
if (event.shift())
return m_keycode_to_key_shift[event.keycode];
if (event.ralt())
@ -256,7 +256,7 @@ namespace Kernel::Input
}
}
CriticalScope _;
SpinLockGuard _(m_lock);
for (size_t i = 0; i < new_layout->m_keycode_to_key_normal.size(); i++)
if (new_layout->m_keycode_to_key_normal[i] != Key::None)

View File

@ -6,7 +6,6 @@
#include <kernel/Input/PS2/Controller.h>
#include <kernel/Input/PS2/Keyboard.h>
#include <kernel/Input/PS2/Mouse.h>
#include <kernel/InterruptController.h>
#include <kernel/IO.h>
#include <kernel/Timer/Timer.h>
@ -21,7 +20,6 @@ namespace Kernel::Input
BAN::ErrorOr<void> PS2Controller::send_byte(uint16_t port, uint8_t byte)
{
ASSERT(interrupts_enabled());
LockGuard _(m_mutex);
uint64_t timeout = SystemTimer::get().ms_since_boot() + s_ps2_timeout_ms;
while (SystemTimer::get().ms_since_boot() < timeout)
@ -36,7 +34,6 @@ namespace Kernel::Input
BAN::ErrorOr<uint8_t> PS2Controller::read_byte()
{
ASSERT(interrupts_enabled());
LockGuard _(m_mutex);
uint64_t timeout = SystemTimer::get().ms_since_boot() + s_ps2_timeout_ms;
while (SystemTimer::get().ms_since_boot() < timeout)
@ -101,8 +98,7 @@ namespace Kernel::Input
bool PS2Controller::append_command_queue(PS2Device* device, uint8_t command, uint8_t response_size)
{
// NOTE: command queue push/pop must be done without interrupts
CriticalScope _;
SpinLockGuard _(m_command_lock);
if (m_command_queue.size() + 1 >= m_command_queue.capacity())
{
dprintln("PS/2 command queue full");
@ -121,8 +117,7 @@ namespace Kernel::Input
bool PS2Controller::append_command_queue(PS2Device* device, uint8_t command, uint8_t data, uint8_t response_size)
{
// NOTE: command queue push/pop must be done without interrupts
CriticalScope _;
SpinLockGuard _(m_command_lock);
if (m_command_queue.size() + 1 >= m_command_queue.capacity())
{
dprintln("PS/2 command queue full");
@ -141,35 +136,38 @@ namespace Kernel::Input
void PS2Controller::update_command_queue()
{
ASSERT(interrupts_enabled());
Command command_copy;
if (m_command_queue.empty())
return;
auto& command = m_command_queue.front();
if (command.state == Command::State::WaitingResponse || command.state == Command::State::WaitingAck)
{
if (SystemTimer::get().ms_since_boot() >= m_command_send_time + s_ps2_timeout_ms)
SpinLockGuard _(m_command_lock);
if (m_command_queue.empty())
return;
auto& command = m_command_queue.front();
if (command.state == Command::State::WaitingResponse || command.state == Command::State::WaitingAck)
{
dwarnln_if(DEBUG_PS2, "Command timedout");
m_devices[command.device_index]->command_timedout(command.out_data, command.out_count);
m_command_queue.pop();
if (SystemTimer::get().ms_since_boot() >= m_command_send_time + s_ps2_timeout_ms)
{
dwarnln_if(DEBUG_PS2, "Command timedout");
m_devices[command.device_index]->command_timedout(command.out_data, command.out_count);
m_command_queue.pop();
}
return;
}
return;
ASSERT(command.send_index < command.out_count);
command.state = Command::State::WaitingAck;
command_copy = command;
}
ASSERT(command.send_index < command.out_count);
command.state = Command::State::WaitingAck;
m_command_send_time = SystemTimer::get().ms_since_boot();
if (auto ret = device_send_byte(command.device_index, command.out_data[command.send_index]); ret.is_error())
{
command.state = Command::State::Sending;
if (auto ret = device_send_byte(command_copy.device_index, command_copy.out_data[command_copy.send_index]); ret.is_error())
dwarnln_if(DEBUG_PS2, "PS/2 send command byte: {}", ret.error());
}
}
bool PS2Controller::handle_command_byte(PS2Device* device, uint8_t byte)
{
// NOTE: command queue push/pop must be done without interrupts
ASSERT(!interrupts_enabled());
SpinLockGuard _(m_command_lock);
if (m_command_queue.empty())
return false;

View File

@ -1,5 +1,4 @@
#include <BAN/ScopeGuard.h>
#include <kernel/CriticalScope.h>
#include <kernel/FS/DevFS/FileSystem.h>
#include <kernel/Input/KeyboardLayout.h>
#include <kernel/Input/PS2/Config.h>
@ -165,6 +164,8 @@ namespace Kernel::Input
event.modifier = m_modifiers | (released ? 0 : KeyEvent::Modifier::Pressed);
event.keycode = keycode.value();
SpinLockGuard _(m_event_lock);
if (m_event_queue.full())
{
dwarnln("PS/2 event queue full");
@ -192,20 +193,20 @@ namespace Kernel::Input
if (buffer.size() < sizeof(KeyEvent))
return BAN::Error::from_errno(ENOBUFS);
while (true)
auto state = m_event_lock.lock();
while (m_event_queue.empty())
{
if (m_event_queue.empty())
TRY(Thread::current().block_or_eintr_indefinite(m_semaphore));
CriticalScope _;
if (m_event_queue.empty())
continue;
buffer.as<KeyEvent>() = m_event_queue.front();
m_event_queue.pop();
return sizeof(KeyEvent);
m_event_lock.unlock(state);
TRY(Thread::current().block_or_eintr_indefinite(m_semaphore));
state = m_event_lock.lock();
}
buffer.as<KeyEvent>() = m_event_queue.front();
m_event_queue.pop();
m_event_lock.unlock(state);
return sizeof(KeyEvent);
}
}

View File

@ -1,5 +1,4 @@
#include <BAN/ScopeGuard.h>
#include <kernel/CriticalScope.h>
#include <kernel/FS/DevFS/FileSystem.h>
#include <kernel/Input/PS2/Config.h>
#include <kernel/Input/PS2/Mouse.h>
@ -158,6 +157,8 @@ namespace Kernel::Input
event.scroll_event.scroll = rel_z;
}
SpinLockGuard _(m_event_lock);
for (int i = 0; i < event_count; i++)
{
if (m_event_queue.full())
@ -176,20 +177,20 @@ namespace Kernel::Input
if (buffer.size() < sizeof(MouseEvent))
return BAN::Error::from_errno(ENOBUFS);
while (true)
auto state = m_event_lock.lock();
while (m_event_queue.empty())
{
if (m_event_queue.empty())
TRY(Thread::current().block_or_eintr_indefinite(m_semaphore));
CriticalScope _;
if (m_event_queue.empty())
continue;
buffer.as<MouseEvent>() = m_event_queue.front();
m_event_queue.pop();
return sizeof(MouseEvent);
m_event_lock.unlock(state);
TRY(Thread::current().block_or_eintr_indefinite(m_semaphore));
state = m_event_lock.lock();
}
buffer.as<MouseEvent>() = m_event_queue.front();
m_event_queue.pop();
m_event_lock.unlock(state);
return sizeof(MouseEvent);
}
}

View File

@ -12,8 +12,8 @@ namespace Kernel
auto tid = Scheduler::current_tid();
ASSERT_NEQ(m_locker.load(), tid);
InterruptState state = interrupts_enabled();
DISABLE_INTERRUPTS();
auto state = get_interrupt_state();
set_interrupt_state(InterruptState::Disabled);
if (!m_locker.compare_exchange(-1, tid))
ASSERT_NOT_REACHED();
@ -25,16 +25,15 @@ namespace Kernel
{
ASSERT_EQ(m_locker.load(), Scheduler::current_tid());
m_locker.store(-1);
if (state)
ENABLE_INTERRUPTS();
set_interrupt_state(state);
}
InterruptState RecursiveSpinLock::lock()
{
auto tid = Scheduler::current_tid();
InterruptState state = interrupts_enabled();
DISABLE_INTERRUPTS();
auto state = get_interrupt_state();
set_interrupt_state(InterruptState::Disabled);
if (tid == m_locker)
ASSERT_GT(m_lock_depth, 0);
@ -57,8 +56,7 @@ namespace Kernel
ASSERT_GT(m_lock_depth, 0);
if (--m_lock_depth == 0)
m_locker = -1;
if (state)
ENABLE_INTERRUPTS();
set_interrupt_state(state);
}
}

View File

@ -1,4 +1,3 @@
#include <kernel/CriticalScope.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/FileBackedRegion.h>
#include <kernel/Memory/Heap.h>
@ -83,12 +82,9 @@ namespace Kernel
if (pages[page_index] == 0)
return;
{
CriticalScope _;
PageTable::with_fast_page(pages[page_index], [&] {
memcpy(page_buffer, PageTable::fast_page_as_ptr(), PAGE_SIZE);
});
}
PageTable::with_fast_page(pages[page_index], [&] {
memcpy(page_buffer, PageTable::fast_page_as_ptr(), PAGE_SIZE);
});
if (auto ret = inode->write(page_index * PAGE_SIZE, BAN::ConstByteSpan::from(page_buffer)); ret.is_error())
dwarnln("{}", ret.error());

View File

@ -1,4 +1,3 @@
#include <kernel/CriticalScope.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/MemoryBackedRegion.h>

View File

@ -1,4 +1,3 @@
#include <kernel/CriticalScope.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/VirtualRange.h>

View File

@ -1,5 +1,4 @@
#include <BAN/Errors.h>
#include <kernel/CriticalScope.h>
#include <kernel/kprint.h>
#include <kernel/Memory/kmalloc.h>
@ -81,6 +80,8 @@ struct kmalloc_info
};
static kmalloc_info s_kmalloc_info;
static Kernel::SpinLock s_kmalloc_lock;
template<size_t SIZE>
struct kmalloc_fixed_node
{
@ -144,6 +145,8 @@ void kmalloc_initialize()
void kmalloc_dump_info()
{
Kernel::SpinLockGuard _(s_kmalloc_lock);
kprintln("kmalloc: 0x{8H}->0x{8H}", s_kmalloc_info.base, s_kmalloc_info.end);
kprintln(" used: 0x{8H}", s_kmalloc_info.used);
kprintln(" free: 0x{8H}", s_kmalloc_info.free);
@ -155,6 +158,7 @@ void kmalloc_dump_info()
static bool is_corrupted()
{
Kernel::SpinLockGuard _(s_kmalloc_lock);
auto& info = s_kmalloc_info;
auto* temp = info.first();
for (; temp->end() <= info.end; temp = temp->after());
@ -163,6 +167,8 @@ static bool is_corrupted()
[[maybe_unused]] static void debug_dump()
{
Kernel::SpinLockGuard _(s_kmalloc_lock);
auto& info = s_kmalloc_info;
uint32_t used = 0;
@ -181,6 +187,8 @@ static bool is_corrupted()
static void* kmalloc_fixed()
{
Kernel::SpinLockGuard _(s_kmalloc_lock);
auto& info = s_kmalloc_fixed_info;
if (!info.free_list_head)
@ -223,6 +231,8 @@ static void* kmalloc_impl(size_t size, size_t align)
ASSERT(align % s_kmalloc_min_align == 0);
ASSERT(size % s_kmalloc_min_align == 0);
Kernel::SpinLockGuard _(s_kmalloc_lock);
auto& info = s_kmalloc_info;
for (auto* node = info.first(); node->end() <= info.end; node = node->after())
@ -304,8 +314,6 @@ void* kmalloc(size_t size, size_t align, bool force_identity_map)
align = s_kmalloc_min_align;
ASSERT(align <= PAGE_SIZE);
Kernel::CriticalScope critical;
if (size == 0 || size >= info.size)
goto no_memory;
@ -338,7 +346,7 @@ void kfree(void* address)
uintptr_t address_uint = (uintptr_t)address;
ASSERT(address_uint % s_kmalloc_min_align == 0);
Kernel::CriticalScope critical;
Kernel::SpinLockGuard _(s_kmalloc_lock);
if (s_kmalloc_fixed_info.base <= address_uint && address_uint < s_kmalloc_fixed_info.end)
{

View File

@ -145,24 +145,22 @@ namespace Kernel
{
for (;;)
{
BAN::Optional<PendingArpPacket> pending;
{
CriticalScope _;
if (!m_pending_packets.empty())
PendingArpPacket pending = ({
auto state = m_pending_lock.lock();
while (m_pending_packets.empty())
{
pending = m_pending_packets.front();
m_pending_packets.pop();
m_pending_lock.unlock(state);
m_pending_semaphore.block_indefinite();
state = m_pending_lock.lock();
}
}
auto packet = m_pending_packets.front();
m_pending_packets.pop();
m_pending_lock.unlock(state);
if (!pending.has_value())
{
m_pending_semaphore.block_indefinite();
continue;
}
packet;
});
if (auto ret = handle_arp_packet(pending->interface, pending->packet); ret.is_error())
if (auto ret = handle_arp_packet(pending.interface, pending.packet); ret.is_error())
dwarnln("{}", ret.error());
}
}
@ -171,6 +169,8 @@ namespace Kernel
{
auto& arp_packet = buffer.as<const ARPPacket>();
SpinLockGuard _(m_pending_lock);
if (m_pending_packets.full())
{
dprintln("arp packet queue full");

View File

@ -261,7 +261,7 @@ namespace Kernel
{
ASSERT_LTE(buffer.size() + sizeof(EthernetHeader), E1000_TX_BUFFER_SIZE);
CriticalScope _;
SpinLockGuard _(m_lock);
size_t tx_current = read32(REG_TDT) % E1000_TX_DESCRIPTOR_COUNT;
@ -291,6 +291,8 @@ namespace Kernel
if (read32(REG_ICR) & ICR_RxQ0)
return;
SpinLockGuard _(m_lock);
for (;;) {
uint32_t rx_current = (read32(REG_RDT0) + 1) % E1000_RX_DESCRIPTOR_COUNT;

View File

@ -287,30 +287,28 @@ namespace Kernel
{
for (;;)
{
BAN::Optional<PendingIPv4Packet> pending;
{
CriticalScope _;
if (!m_pending_packets.empty())
PendingIPv4Packet pending = ({
auto state = m_pending_lock.lock();
while (m_pending_packets.empty())
{
pending = m_pending_packets.front();
m_pending_packets.pop();
m_pending_lock.unlock(state);
m_pending_semaphore.block_indefinite();
state = m_pending_lock.lock();
}
}
auto packet = m_pending_packets.front();
m_pending_packets.pop();
m_pending_lock.unlock(state);
if (!pending.has_value())
{
m_pending_semaphore.block_indefinite();
continue;
}
packet;
});
uint8_t* buffer_start = reinterpret_cast<uint8_t*>(m_pending_packet_buffer->vaddr());
const size_t ipv4_packet_size = reinterpret_cast<const IPv4Header*>(buffer_start)->total_length;
if (auto ret = handle_ipv4_packet(pending->interface, BAN::ByteSpan(buffer_start, ipv4_packet_size)); ret.is_error())
if (auto ret = handle_ipv4_packet(pending.interface, BAN::ByteSpan(buffer_start, ipv4_packet_size)); ret.is_error())
dwarnln("{}", ret.error());
CriticalScope _;
SpinLockGuard _(m_pending_lock);
m_pending_total_size -= ipv4_packet_size;
if (m_pending_total_size)
memmove(buffer_start, buffer_start + ipv4_packet_size, m_pending_total_size);
@ -319,6 +317,8 @@ namespace Kernel
void IPv4Layer::add_ipv4_packet(NetworkInterface& interface, BAN::ConstByteSpan buffer)
{
SpinLockGuard _(m_pending_lock);
if (m_pending_packets.full())
{
dwarnln("IPv4 packet queue full");

View File

@ -1,4 +1,3 @@
#include <kernel/CriticalScope.h>
#include <kernel/IDT.h>
#include <kernel/IO.h>
#include <kernel/PIC.h>
@ -71,7 +70,7 @@ namespace Kernel
void PIC::eoi(uint8_t irq)
{
ASSERT(!interrupts_enabled());
SpinLockGuard _(m_lock);
if (irq >= 8)
IO::outb(PIC2_CMD, PIC_EOI);
IO::outb(PIC1_CMD, PIC_EOI);
@ -79,7 +78,7 @@ namespace Kernel
void PIC::enable_irq(uint8_t irq)
{
CriticalScope _;
SpinLockGuard _(m_lock);
ASSERT(irq < 16);
ASSERT(m_reserved_irqs & (1 << irq));
@ -99,7 +98,7 @@ namespace Kernel
dwarnln("PIC only supports 16 irqs");
return BAN::Error::from_errno(EFAULT);
}
CriticalScope _;
SpinLockGuard _(m_lock);
if (m_reserved_irqs & (1 << irq))
{
dwarnln("irq {} is already reserved", irq);
@ -111,7 +110,7 @@ namespace Kernel
BAN::Optional<uint8_t> PIC::get_free_irq()
{
CriticalScope _;
SpinLockGuard _(m_lock);
for (int irq = 0; irq < 16; irq++)
{
if (m_reserved_irqs & (1 << irq))
@ -119,12 +118,12 @@ namespace Kernel
m_reserved_irqs |= 1 << irq;
return irq;
}
return {};
}
bool PIC::is_in_service(uint8_t irq)
{
SpinLockGuard _(m_lock);
uint16_t port = PIC1_CMD;
if (irq >= 8)
{

View File

@ -1,6 +1,5 @@
#include <BAN/ScopeGuard.h>
#include <BAN/StringView.h>
#include <kernel/CriticalScope.h>
#include <kernel/FS/DevFS/FileSystem.h>
#include <kernel/FS/ProcFS/FileSystem.h>
#include <kernel/FS/VirtualFileSystem.h>
@ -62,18 +61,13 @@ namespace Kernel
Process* Process::create_process(const Credentials& credentials, pid_t parent, pid_t sid, pid_t pgrp)
{
static pid_t s_next_id = 1;
static BAN::Atomic<pid_t> s_next_id = 1;
pid_t pid;
pid_t pid = s_next_id++;
if (sid == 0 && pgrp == 0)
{
CriticalScope _;
pid = s_next_id;
if (sid == 0 && pgrp == 0)
{
sid = s_next_id;
pgrp = s_next_id;
}
s_next_id++;
sid = pid;
pgrp = pid;
}
ASSERT(sid > 0);
@ -226,7 +220,7 @@ namespace Kernel
void Process::on_thread_exit(Thread& thread)
{
ASSERT(!interrupts_enabled());
ASSERT(get_interrupt_state() == InterruptState::Disabled);
ASSERT(m_threads.size() > 0);
@ -236,7 +230,8 @@ namespace Kernel
m_threads.clear();
thread.setup_process_cleanup();
Scheduler::get().execute_current_thread();
// NOTE: This function is only called from scheduler when it is already locked
Scheduler::get().execute_current_thread_locked();
ASSERT_NOT_REACHED();
}
@ -258,9 +253,9 @@ namespace Kernel
m_exit_status.exit_code = __WGENEXITCODE(status, signal);
for (auto* thread : m_threads)
if (thread != &Thread::current())
thread->terminate();
Scheduler::get().terminate_thread(thread);
if (this == &Process::current())
Thread::current().terminate();
Scheduler::get().terminate_thread(&Thread::current());
}
size_t Process::proc_meminfo(off_t offset, BAN::ByteSpan buffer) const
@ -336,8 +331,8 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_exit(int status)
{
ASSERT(this == &Process::current());
exit(status, 0);
Thread::current().terminate();
ASSERT_NOT_REACHED();
}
@ -1484,7 +1479,6 @@ namespace Kernel
TRY(validate_pointer_access((void*)handler, sizeof(handler)));
}
CriticalScope _;
m_signal_handlers[signal] = (vaddr_t)handler;
return 0;
}
@ -1496,10 +1490,9 @@ namespace Kernel
if (signal != 0 && (signal < _SIGMIN || signal > _SIGMAX))
return BAN::Error::from_errno(EINVAL);
if (pid == Process::current().pid())
if (pid == m_pid)
{
CriticalScope _;
Process::current().m_signal_pending_mask |= 1 << signal;
m_signal_pending_mask |= 1 << signal;
return 0;
}
@ -1512,9 +1505,8 @@ namespace Kernel
found = true;
if (signal)
{
CriticalScope _;
process.m_signal_pending_mask |= 1 << signal;
// FIXME: This is super hacky
// FIXME: This feels hacky
Scheduler::get().unblock_thread(process.m_threads.front()->tid());
}
return (pid > 0) ? BAN::Iteration::Break : BAN::Iteration::Continue;

View File

@ -1,6 +1,5 @@
#include <kernel/Arch.h>
#include <kernel/Attributes.h>
#include <kernel/CriticalScope.h>
#include <kernel/GDT.h>
#include <kernel/InterruptController.h>
#include <kernel/Process.h>
@ -8,15 +7,6 @@
#include <kernel/Timer/Timer.h>
#define SCHEDULER_VERIFY_STACK 1
#define SCHEDULER_VERIFY_INTERRUPT_STATE 1
#if SCHEDULER_VERIFY_INTERRUPT_STATE
#define VERIFY_STI() ASSERT(interrupts_enabled())
#define VERIFY_CLI() ASSERT(!interrupts_enabled())
#else
#define VERIFY_STI()
#define VERIFY_CLI()
#endif
namespace Kernel
{
@ -50,10 +40,11 @@ namespace Kernel
void Scheduler::start()
{
VERIFY_CLI();
ASSERT(get_interrupt_state() == InterruptState::Disabled);
m_lock.lock();
ASSERT(!m_active_threads.empty());
m_current_thread = m_active_threads.begin();
execute_current_thread();
execute_current_thread_locked();
ASSERT_NOT_REACHED();
}
@ -71,48 +62,40 @@ namespace Kernel
void Scheduler::timer_reschedule()
{
VERIFY_CLI();
auto state = m_lock.lock();
wake_threads();
if (save_current_thread())
return;
return m_lock.unlock(state);
advance_current_thread();
execute_current_thread();
execute_current_thread_locked();
ASSERT_NOT_REACHED();
}
void Scheduler::reschedule()
{
DISABLE_INTERRUPTS();
auto state = m_lock.lock();
if (save_current_thread())
{
ENABLE_INTERRUPTS();
return;
}
return set_interrupt_state(state);
advance_current_thread();
execute_current_thread();
execute_current_thread_locked();
ASSERT_NOT_REACHED();
}
void Scheduler::reschedule_if_idling()
{
VERIFY_CLI();
auto state = m_lock.lock();
if (m_active_threads.empty() || &current_thread() != m_idle_thread)
return;
return m_lock.unlock(state);
if (save_current_thread())
return;
return m_lock.unlock(state);
m_current_thread = m_active_threads.begin();
execute_current_thread();
execute_current_thread_locked();
ASSERT_NOT_REACHED();
}
void Scheduler::wake_threads()
{
VERIFY_CLI();
ASSERT(m_lock.is_locked());
uint64_t current_time = SystemTimer::get().ms_since_boot();
while (!m_sleeping_threads.empty() && m_sleeping_threads.front().wake_time <= current_time)
@ -127,14 +110,22 @@ namespace Kernel
BAN::ErrorOr<void> Scheduler::add_thread(Thread* thread)
{
CriticalScope _;
SpinLockGuard _(m_lock);
TRY(m_active_threads.emplace_back(thread));
return {};
}
void Scheduler::terminate_thread(Thread* thread)
{
SpinLockGuard _(m_lock);
thread->m_state = Thread::State::Terminated;
if (thread == &current_thread())
execute_current_thread_locked();
}
void Scheduler::advance_current_thread()
{
VERIFY_CLI();
ASSERT(m_lock.is_locked());
if (m_active_threads.empty())
{
@ -147,7 +138,7 @@ namespace Kernel
void Scheduler::remove_and_advance_current_thread()
{
VERIFY_CLI();
ASSERT(m_lock.is_locked());
ASSERT(m_current_thread);
@ -168,7 +159,7 @@ namespace Kernel
// after getting the rsp
ALWAYS_INLINE bool Scheduler::save_current_thread()
{
VERIFY_CLI();
ASSERT(m_lock.is_locked());
uintptr_t rsp, rip;
push_callee_saved();
@ -190,7 +181,7 @@ namespace Kernel
void Scheduler::delete_current_process_and_thread()
{
DISABLE_INTERRUPTS();
m_lock.lock();
load_temp_stack();
PageTable::kernel().load();
@ -204,23 +195,33 @@ namespace Kernel
delete thread;
execute_current_thread();
execute_current_thread_locked();
ASSERT_NOT_REACHED();
}
void Scheduler::execute_current_thread()
{
VERIFY_CLI();
m_lock.lock();
load_temp_stack();
PageTable::kernel().load();
_execute_current_thread();
execute_current_thread_stack_loaded();
ASSERT_NOT_REACHED();
}
NEVER_INLINE void Scheduler::_execute_current_thread()
void Scheduler::execute_current_thread_locked()
{
VERIFY_CLI();
load_temp_stack();
PageTable::kernel().load();
execute_current_thread_stack_loaded();
ASSERT_NOT_REACHED();
}
NEVER_INLINE void Scheduler::execute_current_thread_stack_loaded()
{
ASSERT(m_lock.is_locked());
load_temp_stack();
PageTable::kernel().load();
#if SCHEDULER_VERIFY_STACK
vaddr_t rsp;
@ -267,10 +268,12 @@ namespace Kernel
{
case Thread::State::NotStarted:
current->set_started();
m_lock.unlock(InterruptState::Disabled);
start_thread(current->rsp(), current->rip());
case Thread::State::Executing:
while (current->can_add_signal_to_execute())
current->handle_signal();
m_lock.unlock(InterruptState::Disabled);
continue_thread(current->rsp(), current->rip());
case Thread::State::Terminated:
ASSERT_NOT_REACHED();
@ -281,11 +284,11 @@ namespace Kernel
void Scheduler::set_current_thread_sleeping_impl(uint64_t wake_time)
{
VERIFY_CLI();
ASSERT(m_lock.is_locked());
if (save_current_thread())
{
ENABLE_INTERRUPTS();
set_interrupt_state(InterruptState::Enabled);
return;
}
@ -304,35 +307,27 @@ namespace Kernel
m_current_thread = {};
advance_current_thread();
execute_current_thread();
execute_current_thread_locked();
ASSERT_NOT_REACHED();
}
void Scheduler::set_current_thread_sleeping(uint64_t wake_time)
{
VERIFY_STI();
DISABLE_INTERRUPTS();
ASSERT(m_current_thread);
SpinLockGuard _(m_lock);
m_current_thread->semaphore = nullptr;
set_current_thread_sleeping_impl(wake_time);
}
void Scheduler::block_current_thread(Semaphore* semaphore, uint64_t wake_time)
{
VERIFY_STI();
DISABLE_INTERRUPTS();
ASSERT(m_current_thread);
SpinLockGuard _(m_lock);
m_current_thread->semaphore = semaphore;
set_current_thread_sleeping_impl(wake_time);
}
void Scheduler::unblock_threads(Semaphore* semaphore)
{
CriticalScope critical;
SpinLockGuard _(m_lock);
for (auto it = m_sleeping_threads.begin(); it != m_sleeping_threads.end();)
{
@ -353,7 +348,7 @@ namespace Kernel
void Scheduler::unblock_thread(pid_t tid)
{
CriticalScope _;
SpinLockGuard _(m_lock);
for (auto it = m_sleeping_threads.begin(); it != m_sleeping_threads.end(); it++)
{

View File

@ -1,4 +1,3 @@
#include <kernel/CriticalScope.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/PageTable.h>

View File

@ -1,5 +1,4 @@
#include <BAN/Array.h>
#include <kernel/CriticalScope.h>
#include <kernel/Device/DeviceNumbers.h>
#include <kernel/FS/DevFS/FileSystem.h>
#include <kernel/IDT.h>
@ -219,6 +218,8 @@ namespace Kernel
void SerialTTY::handle_irq()
{
uint8_t ch = IO::inb(m_serial.port());
SpinLockGuard _(m_input_lock);
if (m_input.full())
{
dwarnln("Serial buffer full");
@ -235,7 +236,7 @@ namespace Kernel
uint8_t buffer[128];
{
CriticalScope _;
SpinLockGuard _(m_input_lock);
if (m_input.empty())
return;
uint8_t* ptr = buffer;

View File

@ -27,14 +27,6 @@ namespace Kernel
memcpy((void*)rsp, (void*)&value, sizeof(uintptr_t));
}
void Thread::terminate()
{
CriticalScope _;
m_state = Thread::State::Terminated;
if (this == &Thread::current())
Scheduler::get().execute_current_thread();
}
static pid_t s_next_tid = 1;
BAN::ErrorOr<Thread*> Thread::create_kernel(entry_t entry, void* data, Process* process)
@ -193,9 +185,10 @@ namespace Kernel
{
m_state = State::NotStarted;
static entry_t entry(
[](void* process)
[](void* process_ptr)
{
((Process*)process)->cleanup_function();
auto& process = *reinterpret_cast<Process*>(process_ptr);
process.cleanup_function();
Scheduler::get().delete_current_process_and_thread();
ASSERT_NOT_REACHED();
}
@ -245,10 +238,11 @@ namespace Kernel
void Thread::handle_signal(int signal)
{
ASSERT(!interrupts_enabled());
ASSERT(&Thread::current() == this);
ASSERT(is_userspace());
SpinLockGuard _(m_signal_lock);
auto& interrupt_stack = *reinterpret_cast<InterruptStack*>(interrupt_stack_base() + interrupt_stack_size() - sizeof(InterruptStack));
ASSERT(GDT::is_user_segment(interrupt_stack.cs));
@ -338,7 +332,8 @@ namespace Kernel
bool Thread::add_signal(int signal)
{
ASSERT(!interrupts_enabled());
SpinLockGuard _(m_signal_lock);
uint64_t mask = 1ull << signal;
if (!(m_signal_block_mask & mask))
{
@ -393,7 +388,7 @@ namespace Kernel
void Thread::on_exit()
{
ASSERT(this == &Thread::current());
terminate();
Scheduler::get().terminate_thread(this);
ASSERT_NOT_REACHED();
}

View File

@ -244,7 +244,7 @@ namespace Kernel
if (m_is_64bit)
return regs.main_counter.full;
CriticalScope _;
SpinLockGuard _(m_lock);
uint32_t current_low = regs.main_counter.low;
uint32_t wraps = m_32bit_wraps;
if (current_low < (uint32_t)m_last_ticks)
@ -256,19 +256,21 @@ namespace Kernel
{
auto& regs = registers();
uint64_t current_ticks;
if (m_is_64bit)
current_ticks = regs.main_counter.full;
else
{
uint32_t current_low = regs.main_counter.low;
if (current_low < (uint32_t)m_last_ticks)
m_32bit_wraps++;
current_ticks = ((uint64_t)m_32bit_wraps << 32) | current_low;
}
SpinLockGuard _(m_lock);
m_last_ticks = current_ticks;
uint64_t current_ticks;
if (m_is_64bit)
current_ticks = regs.main_counter.full;
else
{
uint32_t current_low = regs.main_counter.low;
if (current_low < (uint32_t)m_last_ticks)
m_32bit_wraps++;
current_ticks = ((uint64_t)m_32bit_wraps << 32) | current_low;
}
m_last_ticks = current_ticks;
}
Scheduler::get().timer_reschedule();
}

View File

@ -82,7 +82,7 @@ extern "C" void kernel_main(uint32_t boot_magic, uint32_t boot_info)
{
using namespace Kernel;
DISABLE_INTERRUPTS();
set_interrupt_state(InterruptState::Disabled);
if (!validate_boot_magic(boot_magic))
{