Compare commits

..

No commits in common. "3e0150f847f7894adb1a706be8b48b805c9f018f" and "4b917390acce021ffd7f015661a447bf61ceb381" have entirely different histories.

74 changed files with 877 additions and 1825 deletions

View File

@ -39,7 +39,6 @@ namespace BAN::Formatter
int base = 10;
int percision = 3;
int fill = 0;
char fill_char = '0';
bool upper = false;
};
@ -95,12 +94,6 @@ namespace BAN::Formatter
if (!format[i] || format[i] == '}')
break;
if (format[i] == ' ')
{
value_format.fill_char = ' ';
i++;
}
if ('0' <= format[i] && format[i] <= '9')
{
int fill = 0;
@ -168,8 +161,7 @@ namespace BAN::Formatter
{
if (value == 0)
{
for (int i = 0; i < format.fill - 1; i++)
putc(format.fill_char);
for (int i = 0; i < format.fill || i < 1; i++)
putc('0');
return;
}
@ -196,7 +188,7 @@ namespace BAN::Formatter
}
while (ptr >= buffer + sizeof(buffer) - format.fill)
*(--ptr) = format.fill_char;
*(--ptr) = '0';
if (sign)
*(--ptr) = '-';

View File

@ -71,7 +71,7 @@ set(KERNEL_SOURCES
kernel/Processor.cpp
kernel/Random.cpp
kernel/Scheduler.cpp
kernel/ThreadBlocker.cpp
kernel/Semaphore.cpp
kernel/SSP.cpp
kernel/Storage/ATA/AHCI/Controller.cpp
kernel/Storage/ATA/AHCI/Device.cpp

View File

@ -204,7 +204,7 @@ namespace Kernel
ASSERT(!(pt[pte] & Flags::Present));
pt[pte] = paddr | Flags::ReadWrite | Flags::Present;
invalidate(fast_page(), false);
invalidate(fast_page());
}
void PageTable::unmap_fast_page()
@ -224,7 +224,7 @@ namespace Kernel
ASSERT(pt[pte] & Flags::Present);
pt[pte] = 0;
invalidate(fast_page(), false);
invalidate(fast_page());
}
BAN::ErrorOr<PageTable*> PageTable::create_userspace()
@ -283,24 +283,13 @@ namespace Kernel
Processor::set_current_page_table(this);
}
void PageTable::invalidate(vaddr_t vaddr, bool send_smp_message)
void PageTable::invalidate(vaddr_t vaddr)
{
ASSERT(vaddr % PAGE_SIZE == 0);
asm volatile("invlpg (%0)" :: "r"(vaddr) : "memory");
if (send_smp_message)
{
Processor::broadcast_smp_message({
.type = Processor::SMPMessage::Type::FlushTLB,
.flush_tlb = {
.vaddr = vaddr,
.page_count = 1
}
});
}
}
void PageTable::unmap_page(vaddr_t vaddr, bool send_smp_message)
void PageTable::unmap_page(vaddr_t vaddr)
{
ASSERT(vaddr);
ASSERT(vaddr % PAGE_SIZE == 0);
@ -317,36 +306,30 @@ namespace Kernel
SpinLockGuard _(m_lock);
if (is_page_free(vaddr))
Kernel::panic("trying to unmap unmapped page 0x{H}", vaddr);
{
dwarnln("unmapping unmapped page {8H}", vaddr);
return;
}
uint64_t* pdpt = reinterpret_cast<uint64_t*>(P2V(m_highest_paging_struct));
uint64_t* pd = reinterpret_cast<uint64_t*>(P2V(pdpt[pdpte] & PAGE_ADDR_MASK));
uint64_t* pt = reinterpret_cast<uint64_t*>(P2V(pd[pde] & PAGE_ADDR_MASK));
pt[pte] = 0;
invalidate(vaddr, send_smp_message);
invalidate(vaddr);
}
void PageTable::unmap_range(vaddr_t vaddr, size_t size)
{
ASSERT(vaddr % PAGE_SIZE == 0);
size_t page_count = range_page_count(vaddr, size);
vaddr_t s_page = vaddr / PAGE_SIZE;
vaddr_t e_page = BAN::Math::div_round_up<vaddr_t>(vaddr + size, PAGE_SIZE);
SpinLockGuard _(m_lock);
for (vaddr_t page = 0; page < page_count; page++)
unmap_page(vaddr + page * PAGE_SIZE, false);
Processor::broadcast_smp_message({
.type = Processor::SMPMessage::Type::FlushTLB,
.flush_tlb = {
.vaddr = vaddr,
.page_count = page_count
}
});
for (vaddr_t page = s_page; page < e_page; page++)
unmap_page(page * PAGE_SIZE);
}
void PageTable::map_page_at(paddr_t paddr, vaddr_t vaddr, flags_t flags, MemoryType memory_type, bool send_smp_message)
void PageTable::map_page_at(paddr_t paddr, vaddr_t vaddr, flags_t flags, MemoryType memory_type)
{
ASSERT(vaddr);
ASSERT(vaddr != fast_page());
@ -400,7 +383,7 @@ namespace Kernel
uint64_t* pt = reinterpret_cast<uint64_t*>(P2V(pd[pde] & PAGE_ADDR_MASK));
pt[pte] = paddr | uwr_flags | extra_flags;
invalidate(vaddr, send_smp_message);
invalidate(vaddr);
}
void PageTable::map_range_at(paddr_t paddr, vaddr_t vaddr, size_t size, flags_t flags, MemoryType memory_type)
@ -413,15 +396,7 @@ namespace Kernel
SpinLockGuard _(m_lock);
for (size_t page = 0; page < page_count; page++)
map_page_at(paddr + page * PAGE_SIZE, vaddr + page * PAGE_SIZE, flags, memory_type, false);
Processor::broadcast_smp_message({
.type = Processor::SMPMessage::Type::FlushTLB,
.flush_tlb = {
.vaddr = vaddr,
.page_count = page_count
}
});
map_page_at(paddr + page * PAGE_SIZE, vaddr + page * PAGE_SIZE, flags, memory_type);
}
uint64_t PageTable::get_page_data(vaddr_t vaddr) const

View File

@ -15,14 +15,8 @@ asm_syscall_handler:
pushl %esi
pushl %ebp
# align stack
movl %esp, %ebp
subl $15, %esp
andl $0xFFFFFFF0, %esp
# push arguments
subl $4, %esp
pushl %ebp
# align stack and push arguments
pushl %esp
addl $32, (%esp)
pushl %edi
pushl %esi
@ -40,8 +34,7 @@ asm_syscall_handler:
movw %ax, %gs
call cpp_syscall_handler
movl %ebp, %esp
addl $28, %esp
# restore general purpose registers
popl %ebp

View File

@ -38,18 +38,13 @@ isr_stub:
movl 60(%esp), %ecx // error code
movl 56(%esp), %edx // isr number
movl %esp, %ebp
subl $15, %esp
andl $0xFFFFFFF0, %esp
subl $12, %esp
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
call cpp_isr_handler
movl %ebp, %esp
addl $16, %esp
addl $44, %esp
pop_userspace
addl $8, %esp
@ -61,15 +56,10 @@ irq_stub:
movl 40(%esp), %eax # interrupt number
movl %esp, %ebp
subl $15, %esp
andl $0xFFFFFFF0, %esp
subl $12, %esp
pushl %eax
call cpp_irq_handler
movl %ebp, %esp
addl $16, %esp
pop_userspace
addl $8, %esp
@ -83,36 +73,15 @@ asm_yield_handler:
movl %esp, %eax # interrupt registers ptr
leal 32(%esp), %ebx # interrupt stack ptr
movl %esp, %ebp
subl $15, %esp
andl $0xFFFFFFF0, %esp
subl $8, %esp
subl $4, %esp
pushl %eax
pushl %ebx
call cpp_yield_handler
movl %ebp, %esp
addl $12, %esp
popal
iret
.global asm_ipi_handler
asm_ipi_handler:
push_userspace
load_kernel_segments
movl %esp, %ebp
subl $15, %esp
andl $0xFFFFFFF0, %esp
call cpp_ipi_handler
movl %ebp, %esp
pop_userspace
iret
.macro isr n
.global isr\n
isr\n:
@ -201,3 +170,4 @@ irq 28
irq 29
irq 30
irq 31
irq 32

View File

@ -78,6 +78,7 @@ namespace Kernel
ASSERT(s_kernel);
s_kernel->initialize_kernel();
s_kernel->initial_load();
}
void PageTable::initial_load()
@ -236,7 +237,7 @@ namespace Kernel
ASSERT(!(pt[pte] & Flags::Present));
pt[pte] = paddr | Flags::ReadWrite | Flags::Present;
invalidate(fast_page(), false);
invalidate(fast_page());
}
void PageTable::unmap_fast_page()
@ -259,7 +260,7 @@ namespace Kernel
ASSERT(pt[pte] & Flags::Present);
pt[pte] = 0;
invalidate(fast_page(), false);
invalidate(fast_page());
}
BAN::ErrorOr<PageTable*> PageTable::create_userspace()
@ -321,24 +322,13 @@ namespace Kernel
Processor::set_current_page_table(this);
}
void PageTable::invalidate(vaddr_t vaddr, bool send_smp_message)
void PageTable::invalidate(vaddr_t vaddr)
{
ASSERT(vaddr % PAGE_SIZE == 0);
asm volatile("invlpg (%0)" :: "r"(vaddr) : "memory");
if (send_smp_message)
{
Processor::broadcast_smp_message({
.type = Processor::SMPMessage::Type::FlushTLB,
.flush_tlb = {
.vaddr = vaddr,
.page_count = 1
}
});
}
}
void PageTable::unmap_page(vaddr_t vaddr, bool send_smp_message)
void PageTable::unmap_page(vaddr_t vaddr)
{
ASSERT(vaddr);
ASSERT(vaddr != fast_page());
@ -360,7 +350,10 @@ namespace Kernel
SpinLockGuard _(m_lock);
if (is_page_free(vaddr))
Kernel::panic("trying to unmap unmapped page 0x{H}", vaddr);
{
dwarnln("unmapping unmapped page {8H}", vaddr);
return;
}
uint64_t* pml4 = (uint64_t*)P2V(m_highest_paging_struct);
uint64_t* pdpt = (uint64_t*)P2V(pml4[pml4e] & PAGE_ADDR_MASK);
@ -368,29 +361,20 @@ namespace Kernel
uint64_t* pt = (uint64_t*)P2V(pd[pde] & PAGE_ADDR_MASK);
pt[pte] = 0;
invalidate(vaddr, send_smp_message);
invalidate(vaddr);
}
void PageTable::unmap_range(vaddr_t vaddr, size_t size)
{
ASSERT(vaddr % PAGE_SIZE == 0);
size_t page_count = range_page_count(vaddr, size);
vaddr_t s_page = vaddr / PAGE_SIZE;
vaddr_t e_page = BAN::Math::div_round_up<vaddr_t>(vaddr + size, PAGE_SIZE);
SpinLockGuard _(m_lock);
for (vaddr_t page = 0; page < page_count; page++)
unmap_page(vaddr + page * PAGE_SIZE, false);
Processor::broadcast_smp_message({
.type = Processor::SMPMessage::Type::FlushTLB,
.flush_tlb = {
.vaddr = vaddr,
.page_count = page_count
}
});
for (vaddr_t page = s_page; page < e_page; page++)
unmap_page(page * PAGE_SIZE);
}
void PageTable::map_page_at(paddr_t paddr, vaddr_t vaddr, flags_t flags, MemoryType memory_type, bool send_smp_message)
void PageTable::map_page_at(paddr_t paddr, vaddr_t vaddr, flags_t flags, MemoryType memory_type)
{
ASSERT(vaddr);
ASSERT(vaddr != fast_page());
@ -457,7 +441,7 @@ namespace Kernel
uint64_t* pt = (uint64_t*)P2V(pd[pde] & PAGE_ADDR_MASK);
pt[pte] = paddr | uwr_flags | extra_flags;
invalidate(vaddr, send_smp_message);
invalidate(vaddr);
}
void PageTable::map_range_at(paddr_t paddr, vaddr_t vaddr, size_t size, flags_t flags, MemoryType memory_type)
@ -472,15 +456,7 @@ namespace Kernel
SpinLockGuard _(m_lock);
for (size_t page = 0; page < page_count; page++)
map_page_at(paddr + page * PAGE_SIZE, vaddr + page * PAGE_SIZE, flags, memory_type, false);
Processor::broadcast_smp_message({
.type = Processor::SMPMessage::Type::FlushTLB,
.flush_tlb = {
.vaddr = vaddr,
.page_count = page_count
}
});
map_page_at(paddr + page * PAGE_SIZE, vaddr + page * PAGE_SIZE, flags, memory_type);
}
uint64_t PageTable::get_page_data(vaddr_t vaddr) const

View File

@ -70,13 +70,6 @@ asm_yield_handler:
popaq
iretq
.global asm_ipi_handler
asm_ipi_handler:
pushaq
call cpp_ipi_handler
popaq
iretq
.macro isr n
.global isr\n
isr\n:
@ -165,3 +158,4 @@ irq 28
irq 29
irq 30
irq 31
irq 32

View File

@ -5,7 +5,6 @@
#include <kernel/ACPI/AML/Namespace.h>
#include <kernel/ACPI/Headers.h>
#include <kernel/Memory/Types.h>
#include <kernel/ThreadBlocker.h>
namespace Kernel::ACPI
{
@ -64,7 +63,7 @@ namespace Kernel::ACPI
FADT* m_fadt { nullptr };
ThreadBlocker m_event_thread_blocker;
Semaphore m_event_semaphore;
BAN::Array<BAN::RefPtr<AML::Method>, 0xFF> m_gpe_methods;
bool m_hardware_reduced { false };

View File

@ -120,7 +120,7 @@ namespace Kernel::ACPI::AML
{
if (SystemTimer::get().ms_since_boot() >= wake_time)
return ParseResult(Integer::Constants::Ones);
Processor::yield();
SystemTimer::get().sleep(1);
}
}

View File

@ -30,7 +30,7 @@ namespace Kernel::ACPI::AML
AML_DEBUG_PRINTLN("Sleeping for {} ms", sleep_time.value());
#endif
SystemTimer::get().sleep_ms(sleep_time.value());
SystemTimer::get().sleep(sleep_time.value());
return ParseResult::Success;
}
};

View File

@ -19,7 +19,6 @@ namespace Kernel
virtual BAN::Optional<uint8_t> get_free_irq() override;
virtual void initialize_multiprocessor() override;
virtual void send_ipi(ProcessorID target) override;
virtual void broadcast_ipi() override;
virtual void enable() override;

View File

@ -4,7 +4,7 @@
#include <kernel/Device/Device.h>
#include <kernel/FS/TmpFS/FileSystem.h>
#include <kernel/Lock/Mutex.h>
#include <kernel/ThreadBlocker.h>
#include <kernel/Semaphore.h>
namespace Kernel
{
@ -34,8 +34,8 @@ namespace Kernel
BAN::Vector<BAN::RefPtr<Device>> m_devices;
ThreadBlocker m_sync_done;
ThreadBlocker m_sync_thread_blocker;
Semaphore m_sync_done;
Semaphore m_sync_semaphore;
volatile bool m_should_sync { false };
};

View File

@ -1,7 +1,7 @@
#pragma once
#include <kernel/FS/Inode.h>
#include <kernel/ThreadBlocker.h>
#include <kernel/Semaphore.h>
namespace Kernel
{
@ -47,7 +47,7 @@ namespace Kernel
timespec m_mtime {};
timespec m_ctime {};
BAN::Vector<uint8_t> m_buffer;
ThreadBlocker m_thread_blocker;
Semaphore m_semaphore;
uint32_t m_writing_count { 1 };
};

View File

@ -8,8 +8,8 @@
#include <stdint.h>
constexpr uint8_t IRQ_VECTOR_BASE = 0x20;
constexpr uint8_t IRQ_YIELD = 32;
constexpr uint8_t IRQ_IPI = 33;
constexpr uint8_t IRQ_IPI = 32;
constexpr uint8_t IRQ_YIELD = 33;
namespace Kernel
{

View File

@ -3,7 +3,6 @@
#include <BAN/ByteSpan.h>
#include <kernel/Device/Device.h>
#include <kernel/ThreadBlocker.h>
namespace Kernel
{
@ -43,7 +42,7 @@ namespace Kernel
const Type m_type;
mutable SpinLock m_event_lock;
ThreadBlocker m_event_thread_blocker;
Semaphore m_event_semaphore;
static constexpr size_t m_max_event_count { 128 };
@ -64,7 +63,7 @@ namespace Kernel
public:
static BAN::ErrorOr<BAN::RefPtr<KeyboardDevice>> create(mode_t mode, uid_t uid, gid_t gid);
void notify() { m_thread_blocker.unblock(); }
void notify() { m_semaphore.unblock(); }
private:
KeyboardDevice(mode_t mode, uid_t uid, gid_t gid);
@ -80,7 +79,7 @@ namespace Kernel
private:
const dev_t m_rdev;
const BAN::StringView m_name;
ThreadBlocker m_thread_blocker;
Semaphore m_semaphore;
friend class BAN::RefPtr<KeyboardDevice>;
};
@ -90,7 +89,7 @@ namespace Kernel
public:
static BAN::ErrorOr<BAN::RefPtr<MouseDevice>> create(mode_t mode, uid_t uid, gid_t gid);
void notify() { m_thread_blocker.unblock(); }
void notify() { m_semaphore.unblock(); }
private:
MouseDevice(mode_t mode, uid_t uid, gid_t gid);
@ -106,7 +105,7 @@ namespace Kernel
private:
const dev_t m_rdev;
const BAN::StringView m_name;
ThreadBlocker m_thread_blocker;
Semaphore m_semaphore;
friend class BAN::RefPtr<MouseDevice>;
};

View File

@ -22,7 +22,6 @@ namespace Kernel
static InterruptController& get();
virtual void initialize_multiprocessor() = 0;
virtual void send_ipi(ProcessorID target) = 0;
virtual void broadcast_ipi() = 0;
virtual void enable() = 0;

View File

@ -1,7 +1,5 @@
#pragma once
#include <kernel/Arch.h>
#include <stdint.h>
namespace Kernel

View File

@ -2,7 +2,7 @@
#include <BAN/Atomic.h>
#include <BAN/NoCopyMove.h>
#include <kernel/Thread.h>
#include <kernel/Scheduler.h>
#include <sys/types.h>
@ -19,7 +19,7 @@ namespace Kernel
void lock()
{
const auto tid = Thread::current_tid();
auto tid = Scheduler::current_tid();
if (tid == m_locker)
ASSERT(m_lock_depth > 0);
else
@ -27,11 +27,11 @@ namespace Kernel
pid_t expected = -1;
while (!m_locker.compare_exchange(expected, tid))
{
Processor::yield();
Scheduler::get().yield();
expected = -1;
}
ASSERT(m_lock_depth == 0);
if (tid)
if (Scheduler::current_tid())
Thread::current().add_mutex();
}
m_lock_depth++;
@ -39,7 +39,7 @@ namespace Kernel
bool try_lock()
{
const auto tid = Thread::current_tid();
auto tid = Scheduler::current_tid();
if (tid == m_locker)
ASSERT(m_lock_depth > 0);
else
@ -48,7 +48,7 @@ namespace Kernel
if (!m_locker.compare_exchange(expected, tid))
return false;
ASSERT(m_lock_depth == 0);
if (tid)
if (Scheduler::current_tid())
Thread::current().add_mutex();
}
m_lock_depth++;
@ -57,13 +57,12 @@ namespace Kernel
void unlock()
{
const auto tid = Thread::current_tid();
ASSERT(m_locker == tid);
ASSERT(m_locker == Scheduler::current_tid());
ASSERT(m_lock_depth > 0);
if (--m_lock_depth == 0)
{
m_locker = -1;
if (tid)
if (Scheduler::current_tid())
Thread::current().remove_mutex();
}
}
@ -87,7 +86,7 @@ namespace Kernel
void lock()
{
const auto tid = Thread::current_tid();
auto tid = Scheduler::current_tid();
if (tid == m_locker)
ASSERT(m_lock_depth > 0);
else
@ -98,11 +97,11 @@ namespace Kernel
pid_t expected = -1;
while (!(has_priority || m_queue_length == 0) || !m_locker.compare_exchange(expected, tid))
{
Processor::yield();
Scheduler::get().yield();
expected = -1;
}
ASSERT(m_lock_depth == 0);
if (tid)
if (Scheduler::current_tid())
Thread::current().add_mutex();
}
m_lock_depth++;
@ -110,7 +109,7 @@ namespace Kernel
bool try_lock()
{
const auto tid = Thread::current_tid();
auto tid = Scheduler::current_tid();
if (tid == m_locker)
ASSERT(m_lock_depth > 0);
else
@ -122,7 +121,7 @@ namespace Kernel
if (has_priority)
m_queue_length++;
ASSERT(m_lock_depth == 0);
if (tid)
if (Scheduler::current_tid())
Thread::current().add_mutex();
}
m_lock_depth++;
@ -131,7 +130,7 @@ namespace Kernel
void unlock()
{
const auto tid = Thread::current_tid();
auto tid = Scheduler::current_tid();
ASSERT(m_locker == tid);
ASSERT(m_lock_depth > 0);
if (--m_lock_depth == 0)
@ -140,7 +139,7 @@ namespace Kernel
if (has_priority)
m_queue_length--;
m_locker = -1;
if (tid)
if (Scheduler::current_tid())
Thread::current().remove_mutex();
}
}

View File

@ -23,45 +23,34 @@ namespace Kernel
auto state = Processor::get_interrupt_state();
Processor::set_interrupt_state(InterruptState::Disabled);
auto id = Processor::current_id().as_u32();
ASSERT(m_locker.load(BAN::MemoryOrder::memory_order_relaxed) != id);
auto id = Processor::current_id();
ASSERT(m_locker != id);
auto expected = PROCESSOR_NONE.as_u32();
ProcessorID expected = PROCESSOR_NONE;
while (!m_locker.compare_exchange(expected, id, BAN::MemoryOrder::memory_order_acquire))
{
Processor::pause();
expected = PROCESSOR_NONE.as_u32();
__builtin_ia32_pause();
expected = PROCESSOR_NONE;
}
return state;
}
bool try_lock_interrupts_disabled()
{
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
auto id = Processor::current_id().as_u32();
ASSERT(m_locker.load(BAN::MemoryOrder::memory_order_relaxed) != id);
auto expected = PROCESSOR_NONE.as_u32();
return m_locker.compare_exchange(expected, id, BAN::MemoryOrder::memory_order_acquire);
}
void unlock(InterruptState state)
{
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
ASSERT(current_processor_has_lock());
m_locker.store(PROCESSOR_NONE.as_u32(), BAN::MemoryOrder::memory_order_release);
ASSERT(m_locker == Processor::current_id());
m_locker.store(PROCESSOR_NONE, BAN::MemoryOrder::memory_order_release);
Processor::set_interrupt_state(state);
}
bool current_processor_has_lock() const
{
return m_locker.load(BAN::MemoryOrder::memory_order_relaxed) == Processor::current_id().as_u32();
return m_locker == Processor::current_id();
}
private:
BAN::Atomic<ProcessorID::value_type> m_locker { PROCESSOR_NONE.as_u32() };
BAN::Atomic<ProcessorID> m_locker { PROCESSOR_NONE };
};
class RecursiveSpinLock
@ -77,15 +66,18 @@ namespace Kernel
auto state = Processor::get_interrupt_state();
Processor::set_interrupt_state(InterruptState::Disabled);
auto id = Processor::current_id().as_u32();
ProcessorID::value_type expected = PROCESSOR_NONE.as_u32();
while (!m_locker.compare_exchange(expected, id, BAN::MemoryOrder::memory_order_acq_rel))
auto id = Processor::current_id();
if (m_locker == id)
ASSERT(m_lock_depth > 0);
else
{
if (expected == id)
break;
Processor::pause();
expected = PROCESSOR_NONE.as_u32();
ProcessorID expected = PROCESSOR_NONE;
while (!m_locker.compare_exchange(expected, id, BAN::MemoryOrder::memory_order_acquire))
{
__builtin_ia32_pause();
expected = PROCESSOR_NONE;
}
ASSERT(m_lock_depth == 0);
}
m_lock_depth++;
@ -96,20 +88,20 @@ namespace Kernel
void unlock(InterruptState state)
{
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
ASSERT(current_processor_has_lock());
ASSERT(m_locker == Processor::current_id());
ASSERT(m_lock_depth > 0);
if (--m_lock_depth == 0)
m_locker.store(PROCESSOR_NONE.as_u32(), BAN::MemoryOrder::memory_order_release);
m_locker.store(PROCESSOR_NONE, BAN::MemoryOrder::memory_order_release);
Processor::set_interrupt_state(state);
}
bool current_processor_has_lock() const
{
return m_locker.load(BAN::MemoryOrder::memory_order_relaxed) == Processor::current_id().as_u32();
return m_locker == Processor::current_id();
}
private:
BAN::Atomic<ProcessorID::value_type> m_locker { PROCESSOR_NONE.as_u32() };
BAN::Atomic<ProcessorID> m_locker { PROCESSOR_NONE };
uint32_t m_lock_depth { 0 };
};

View File

@ -95,11 +95,11 @@ namespace Kernel
static BAN::ErrorOr<PageTable*> create_userspace();
~PageTable();
void unmap_page(vaddr_t, bool send_smp_message = true);
void unmap_page(vaddr_t);
void unmap_range(vaddr_t, size_t bytes);
void map_page_at(paddr_t, vaddr_t, flags_t, MemoryType = MemoryType::Normal, bool send_smp_message = true);
void map_range_at(paddr_t, vaddr_t, size_t bytes, flags_t, MemoryType = MemoryType::Normal);
void map_page_at(paddr_t, vaddr_t, flags_t, MemoryType = MemoryType::Normal);
paddr_t physical_address_of(vaddr_t) const;
flags_t get_page_flags(vaddr_t) const;
@ -127,8 +127,7 @@ namespace Kernel
void initialize_kernel();
void map_kernel_memory();
void prepare_fast_page();
static void invalidate(vaddr_t, bool send_smp_message);
static void invalidate(vaddr_t);
static void map_fast_page(paddr_t);
static void unmap_fast_page();

View File

@ -30,20 +30,19 @@ namespace Kernel
BAN::ErrorOr<void> allocate_page_for_demand_paging(vaddr_t address);
void copy_from(size_t offset, const uint8_t* buffer, size_t bytes);
private:
VirtualRange(PageTable&, bool preallocated, vaddr_t, size_t, PageTable::flags_t);
BAN::ErrorOr<void> initialize();
VirtualRange(PageTable&, bool preallocated);
void set_zero();
private:
PageTable& m_page_table;
const bool m_preallocated;
const vaddr_t m_vaddr;
const size_t m_size;
const PageTable::flags_t m_flags;
BAN::Vector<paddr_t> m_paddrs;
SpinLock m_lock;
friend class BAN::UniqPtr<VirtualRange>;
vaddr_t m_vaddr { 0 };
size_t m_size { 0 };
PageTable::flags_t m_flags { 0 };
};
}

View File

@ -5,7 +5,7 @@
#include <BAN/UniqPtr.h>
#include <kernel/Networking/NetworkInterface.h>
#include <kernel/Process.h>
#include <kernel/ThreadBlocker.h>
#include <kernel/Semaphore.h>
namespace Kernel
{
@ -58,7 +58,7 @@ namespace Kernel
Process* m_process = nullptr;
BAN::CircularQueue<PendingArpPacket, 128> m_pending_packets;
ThreadBlocker m_pending_thread_blocker;
Semaphore m_pending_semaphore;
friend class BAN::UniqPtr<ARPTable>;
};

View File

@ -77,7 +77,7 @@ namespace Kernel
static constexpr size_t pending_packet_buffer_size = 128 * PAGE_SIZE;
BAN::UniqPtr<VirtualRange> m_pending_packet_buffer;
BAN::CircularQueue<PendingIPv4Packet, 128> m_pending_packets;
ThreadBlocker m_pending_thread_blocker;
Semaphore m_pending_semaphore;
SpinLock m_pending_lock;
size_t m_pending_total_size { 0 };

View File

@ -7,7 +7,7 @@
#include <kernel/Networking/NetworkInterface.h>
#include <kernel/Networking/NetworkSocket.h>
#include <kernel/Process.h>
#include <kernel/ThreadBlocker.h>
#include <kernel/Semaphore.h>
namespace Kernel
{
@ -161,7 +161,7 @@ namespace Kernel
uint64_t m_time_wait_start_ms { 0 };
ThreadBlocker m_thread_blocker;
Semaphore m_semaphore;
RecvWindowInfo m_recv_window;
SendWindowInfo m_send_window;

View File

@ -6,7 +6,7 @@
#include <kernel/Memory/VirtualRange.h>
#include <kernel/Networking/NetworkInterface.h>
#include <kernel/Networking/NetworkSocket.h>
#include <kernel/ThreadBlocker.h>
#include <kernel/Semaphore.h>
namespace Kernel
{
@ -57,7 +57,7 @@ namespace Kernel
BAN::CircularQueue<PacketInfo, 32> m_packets;
size_t m_packet_total_size { 0 };
SpinLock m_packet_lock;
ThreadBlocker m_packet_thread_blocker;
Semaphore m_packet_semaphore;
friend class BAN::RefPtr<UDPSocket>;
};

View File

@ -48,7 +48,7 @@ namespace Kernel
mutable BAN::Atomic<bool> target_closed { false };
BAN::WeakPtr<UnixDomainSocket> connection;
BAN::Queue<BAN::RefPtr<UnixDomainSocket>> pending_connections;
ThreadBlocker pending_thread_blocker;
Semaphore pending_semaphore;
SpinLock pending_lock;
};
@ -67,7 +67,7 @@ namespace Kernel
size_t m_packet_size_total { 0 };
BAN::UniqPtr<VirtualRange> m_packet_buffer;
SpinLock m_packet_lock;
ThreadBlocker m_packet_thread_blocker;
Semaphore m_packet_semaphore;
friend class BAN::RefPtr<UnixDomainSocket>;
};

View File

@ -17,7 +17,6 @@ namespace Kernel
virtual BAN::Optional<uint8_t> get_free_irq() override;
virtual void initialize_multiprocessor() override;
virtual void send_ipi(ProcessorID) override {}
virtual void broadcast_ipi() override {}
virtual void enable() override {}

View File

@ -251,7 +251,7 @@ namespace Kernel
private:
struct ExitStatus
{
ThreadBlocker thread_blocker;
Semaphore semaphore;
int exit_code { 0 };
BAN::Atomic<bool> exited { false };
BAN::Atomic<int> waiting { 0 };

View File

@ -1,14 +1,12 @@
#pragma once
#include <BAN/Atomic.h>
#include <BAN/Formatter.h>
#include <BAN/ForwardList.h>
#include <kernel/Arch.h>
#include <kernel/GDT.h>
#include <kernel/IDT.h>
#include <kernel/InterruptStack.h>
#include <kernel/Scheduler.h>
#include <kernel/SchedulerQueue.h>
namespace Kernel
{
@ -19,28 +17,8 @@ namespace Kernel
Enabled,
};
class ProcessorID
{
public:
using value_type = uint32_t;
public:
ProcessorID() = default;
uint32_t as_u32() const { return m_id; }
bool operator==(ProcessorID other) const { return m_id == other.m_id; }
private:
explicit ProcessorID(uint32_t id) : m_id(id) {}
private:
uint32_t m_id = static_cast<uint32_t>(-1);
friend class Processor;
friend class APIC;
};
constexpr ProcessorID PROCESSOR_NONE { };
using ProcessorID = uint32_t;
constexpr ProcessorID PROCESSOR_NONE = 0xFFFFFFFF;
#if ARCH(x86_64) || ARCH(i686)
class Processor
@ -48,44 +26,12 @@ namespace Kernel
BAN_NON_COPYABLE(Processor);
BAN_NON_MOVABLE(Processor);
public:
struct SMPMessage
{
enum class Type
{
FlushTLB,
NewThread,
UnblockThread,
// FIXME: all processors should LAPIC for their preemption
SchedulerPreemption,
};
SMPMessage* next { nullptr };
Type type;
union
{
struct
{
uintptr_t vaddr;
size_t page_count;
} flush_tlb;
Scheduler::NewThreadRequest new_thread;
Scheduler::UnblockRequest unblock_thread;
uintptr_t scheduler_preemption;
};
};
public:
static Processor& create(ProcessorID id);
static Processor& initialize();
static void allocate_idle_thread();
static ProcessorID current_id() { return read_gs_sized<ProcessorID>(offsetof(Processor, m_id)); }
static ProcessorID id_from_index(size_t index);
static uint8_t count() { return s_processor_count; }
static bool is_smp_enabled() { return s_is_smp_enabled; }
static void wait_until_processors_ready();
static void toggle_should_print_cpu_load() { s_should_print_cpu_load = !s_should_print_cpu_load; }
static ProcessorID bsb_id() { return s_bsb_id; }
static bool current_is_bsb() { return current_id() == bsb_id(); }
@ -107,40 +53,31 @@ namespace Kernel
return InterruptState::Disabled;
};
static void pause()
{
__builtin_ia32_pause();
if (is_smp_enabled())
handle_smp_messages();
}
static uintptr_t current_stack_bottom() { return read_gs_sized<uintptr_t>(offsetof(Processor, m_stack)); }
static uintptr_t current_stack_bottom() { return reinterpret_cast<uintptr_t>(read_gs_ptr(offsetof(Processor, m_stack))); }
static uintptr_t current_stack_top() { return current_stack_bottom() + s_stack_size; }
uintptr_t stack_bottom() const { return reinterpret_cast<uintptr_t>(m_stack); }
uintptr_t stack_top() const { return stack_bottom() + s_stack_size; }
static GDT& gdt() { return *read_gs_sized<GDT*>(offsetof(Processor, m_gdt)); }
static IDT& idt() { return *read_gs_sized<IDT*>(offsetof(Processor, m_idt)); }
static GDT& gdt() { return *reinterpret_cast<GDT*>(read_gs_ptr(offsetof(Processor, m_gdt))); }
static IDT& idt() { return *reinterpret_cast<IDT*>(read_gs_ptr(offsetof(Processor, m_idt))); }
static void* get_current_page_table() { return read_gs_sized<void*>(offsetof(Processor, m_current_page_table)); }
static void set_current_page_table(void* page_table) { write_gs_sized<void*>(offsetof(Processor, m_current_page_table), page_table); }
static void* get_current_page_table() { return read_gs_ptr(offsetof(Processor, m_current_page_table)); }
static void set_current_page_table(void* page_table) { write_gs_ptr(offsetof(Processor, m_current_page_table), page_table); }
static void yield();
static Scheduler& scheduler() { return *read_gs_sized<Scheduler*>(offsetof(Processor, m_scheduler)); }
static Thread* idle_thread() { return reinterpret_cast<Thread*>(read_gs_ptr(offsetof(Processor, m_idle_thread))); }
static SchedulerQueue::Node* get_current_thread() { return reinterpret_cast<SchedulerQueue::Node*>(read_gs_ptr(offsetof(Processor, m_current_thread))); }
static void set_current_thread(SchedulerQueue::Node* thread) { write_gs_ptr(offsetof(Processor, m_current_thread), thread); }
static void handle_ipi();
static void handle_smp_messages();
static void send_smp_message(ProcessorID, const SMPMessage&, bool send_ipi = true);
static void broadcast_smp_message(const SMPMessage&);
static void enter_interrupt(InterruptStack*, InterruptRegisters*);
static void leave_interrupt();
static InterruptStack& get_interrupt_stack();
static InterruptRegisters& get_interrupt_registers();
private:
Processor() = default;
~Processor() { ASSERT_NOT_REACHED(); }
static ProcessorID read_processor_id();
template<typename T>
static T read_gs_sized(uintptr_t offset) requires(sizeof(T) <= 8)
{
@ -173,11 +110,11 @@ namespace Kernel
#undef __ASM_INPUT
}
static void* read_gs_ptr(uintptr_t offset) { return read_gs_sized<void*>(offset); }
static void write_gs_ptr(uintptr_t offset, void* value) { write_gs_sized<void*>(offset, value); }
private:
static ProcessorID s_bsb_id;
static BAN::Atomic<uint8_t> s_processor_count;
static BAN::Atomic<bool> s_is_smp_enabled;
static BAN::Atomic<bool> s_should_print_cpu_load;
ProcessorID m_id { PROCESSOR_NONE };
@ -187,20 +124,11 @@ namespace Kernel
GDT* m_gdt { nullptr };
IDT* m_idt { nullptr };
Scheduler* m_scheduler { nullptr };
Thread* m_idle_thread { nullptr };
SchedulerQueue::Node* m_current_thread { nullptr };
uint64_t m_start_ns { 0 };
uint64_t m_idle_ns { 0 };
uint64_t m_last_update_ns { 0 };
uint64_t m_next_update_ns { 0 };
BAN::Atomic<bool> m_smp_pending_lock { false };
SMPMessage* m_smp_pending { nullptr };
BAN::Atomic<bool> m_smp_free_lock { false };
SMPMessage* m_smp_free { nullptr };
SMPMessage* m_smp_message_storage;
InterruptStack* m_interrupt_stack { nullptr };
InterruptRegisters* m_interrupt_registers { nullptr };
void* m_current_page_table { nullptr };
@ -211,14 +139,3 @@ namespace Kernel
#endif
}
namespace BAN::Formatter
{
template<typename F>
void print_argument(F putc, Kernel::ProcessorID processor_id, const ValueFormat& format)
{
print_argument(putc, processor_id.as_u32(), format);
}
}

View File

@ -1,149 +1,55 @@
#pragma once
#include <BAN/Array.h>
#include <BAN/ForwardList.h>
#include <BAN/NoCopyMove.h>
#include <kernel/InterruptStack.h>
#include <sys/types.h>
#include <kernel/SchedulerQueue.h>
#include <kernel/Semaphore.h>
#include <kernel/Thread.h>
namespace Kernel
{
class Thread;
class ThreadBlocker;
class SchedulerQueue
{
public:
struct Node
{
Node(Thread* thread)
: thread(thread)
{}
Node* next { nullptr };
Node* prev { nullptr };
Thread* thread;
ThreadBlocker* blocker { nullptr };
uint64_t wake_time_ns { static_cast<uint64_t>(-1) };
uint64_t last_start_ns { 0 };
uint64_t time_used_ns { 0 };
};
public:
void add_thread_to_back(Node*);
void add_thread_with_wake_time(Node*);
template<typename F>
Node* remove_with_condition(F callback);
void remove_node(Node*);
Node* front();
Node* pop_front();
bool empty() const { return m_head == nullptr; }
private:
Node* m_head { nullptr };
Node* m_tail { nullptr };
};
class Scheduler
{
BAN_NON_COPYABLE(Scheduler);
BAN_NON_MOVABLE(Scheduler);
public:
struct NewThreadRequest
{
SchedulerQueue::Node* node;
bool blocked;
};
static BAN::ErrorOr<void> initialize();
static Scheduler& get();
struct UnblockRequest
{
enum class Type
{
ThreadBlocker,
ThreadID,
};
Type type;
union
{
ThreadBlocker* blocker;
pid_t tid;
};
};
[[noreturn]] void start();
public:
static BAN::ErrorOr<Scheduler*> create();
BAN::ErrorOr<void> initialize();
void yield();
void reschedule(InterruptStack*, InterruptRegisters*);
void reschedule_if_idle();
void timer_reschedule();
void irq_reschedule();
void reschedule_if_idling();
void timer_interrupt();
void set_current_thread_sleeping(uint64_t wake_time);
BAN::ErrorOr<void> add_thread(Thread*);
void block_current_thread(ThreadBlocker* thread_blocker, uint64_t wake_time_ns);
void unblock_threads(ThreadBlocker*);
void block_current_thread(Semaphore*, uint64_t wake_time);
void unblock_threads(Semaphore*);
// Makes sleeping or blocked thread with tid active.
void unblock_thread(pid_t tid);
Thread& current_thread();
Thread& idle_thread();
static pid_t current_tid();
pid_t current_tid() const;
bool is_idle() const;
// This is no return if called on current thread
void terminate_thread(Thread*);
private:
Scheduler() = default;
void add_current_to_most_loaded(SchedulerQueue* target_queue);
void update_most_loaded_node_queue(SchedulerQueue::Node*, SchedulerQueue* target_queue);
void remove_node_from_most_loaded(SchedulerQueue::Node*);
void set_current_thread_sleeping_impl(Semaphore* semaphore, uint64_t wake_time);
bool do_unblock(ThreadBlocker*);
bool do_unblock(pid_t);
void do_load_balancing();
void setup_next_thread();
class ProcessorID find_least_loaded_processor() const;
void preempt();
void handle_unblock_request(const UnblockRequest&);
void handle_new_thread_request(const NewThreadRequest&);
BAN::ErrorOr<void> add_thread(Thread*);
private:
SchedulerQueue m_run_queue;
SchedulerQueue m_block_queue;
SchedulerQueue::Node* m_current { nullptr };
bool m_current_will_block { false };
SpinLock m_lock;
uint32_t m_thread_count { 0 };
SchedulerQueue m_active_threads;
SchedulerQueue m_blocking_threads;
InterruptStack* m_interrupt_stack { nullptr };
InterruptRegisters* m_interrupt_registers { nullptr };
uint64_t m_last_reschedule_ns { 0 };
uint64_t m_last_load_balance_ns { 0 };
struct ThreadInfo
{
SchedulerQueue* queue { nullptr };
SchedulerQueue::Node* node { nullptr };
};
BAN::Array<ThreadInfo, 10> m_most_loaded_threads;
uint64_t m_idle_start_ns { 0 };
uint64_t m_idle_ns { 0 };
bool m_should_calculate_max_load_threads { true };
Thread* m_idle_thread { nullptr };
friend class Processor;
friend class Process;
};
}

View File

@ -0,0 +1,127 @@
#pragma once
#include <BAN/Assert.h>
#include <BAN/NoCopyMove.h>
#include <stdint.h>
namespace Kernel
{
class Thread;
class Semaphore;
class SchedulerQueue
{
BAN_NON_COPYABLE(SchedulerQueue);
BAN_NON_MOVABLE(SchedulerQueue);
public:
struct Node
{
Node(Thread* thread)
: thread(thread)
{}
Thread* thread;
uint64_t wake_time { 0 };
Semaphore* semaphore { nullptr };
bool should_block { false };
private:
Node* next { nullptr };
friend class SchedulerQueue;
friend class Scheduler;
};
public:
SchedulerQueue() = default;
~SchedulerQueue() { ASSERT_NOT_REACHED(); }
bool empty() const { return m_front == nullptr; }
Node* pop_front()
{
ASSERT(!empty());
Node* node = m_front;
m_front = m_front->next;
if (m_front == nullptr)
m_back = nullptr;
node->next = nullptr;
return node;
}
void push_back(Node* node)
{
ASSERT(node);
node->next = nullptr;
(empty() ? m_front : m_back->next) = node;
m_back = node;
}
void add_with_wake_time(Node* node)
{
ASSERT(node);
node->next = nullptr;
if (empty() || node->wake_time >= m_back->wake_time)
{
push_back(node);
return;
}
if (node->wake_time < m_front->wake_time)
{
node->next = m_front;
m_front = node;
return;
}
Node* prev = m_front;
for (; node->wake_time >= prev->next->wake_time; prev = prev->next)
continue;
node->next = prev->next;
prev->next = node;
}
void remove_with_wake_time(SchedulerQueue& out, uint64_t current_time)
{
while (!empty() && m_front->wake_time <= current_time)
out.push_back(pop_front());
}
template<typename F>
void remove_with_condition(SchedulerQueue& out, F comp)
{
while (!empty() && comp(m_front))
out.push_back(pop_front());
if (empty())
return;
for (Node* prev = m_front; prev->next;)
{
Node* node = prev->next;
if (!comp(node))
prev = prev->next;
else
{
prev->next = node->next;
if (node == m_back)
m_back = prev;
out.push_back(node);
}
}
}
private:
Node* m_front { nullptr };
Node* m_back { nullptr };
};
}

View File

@ -0,0 +1,15 @@
#pragma once
namespace Kernel
{
class Semaphore
{
public:
void block_indefinite();
void block_with_timeout(uint64_t timeout_ms);
void block_with_wake_time(uint64_t wake_time_ms);
void unblock();
};
}

View File

@ -1,6 +1,6 @@
#pragma once
#include <kernel/ThreadBlocker.h>
#include <kernel/Semaphore.h>
#include <kernel/Storage/ATA/AHCI/Definitions.h>
#include <kernel/Storage/ATA/ATADevice.h>

View File

@ -53,7 +53,7 @@ namespace Kernel
const uint16_t m_ctrl;
Mutex m_mutex;
BAN::Atomic<bool> m_has_got_irq { false };
volatile bool m_has_got_irq { false };
// Non-owning pointers
BAN::Vector<ATADevice*> m_devices;

View File

@ -4,7 +4,7 @@
#include <BAN/Vector.h>
#include <kernel/Interruptable.h>
#include <kernel/Memory/DMARegion.h>
#include <kernel/ThreadBlocker.h>
#include <kernel/Semaphore.h>
#include <kernel/Storage/NVMe/Definitions.h>
namespace Kernel
@ -31,7 +31,7 @@ namespace Kernel
uint32_t m_cq_head { 0 };
uint16_t m_cq_valid_phase { 1 };
ThreadBlocker m_thread_blocker;
Semaphore m_semaphore;
SpinLock m_lock;
BAN::Atomic<size_t> m_used_mask { 0 };
BAN::Atomic<size_t> m_done_mask { 0 };

View File

@ -5,7 +5,7 @@
#include <kernel/Lock/SpinLock.h>
#include <kernel/Terminal/TerminalDriver.h>
#include <kernel/Terminal/termios.h>
#include <kernel/ThreadBlocker.h>
#include <kernel/Semaphore.h>
#include <LibInput/KeyEvent.h>
namespace Kernel
@ -74,7 +74,7 @@ namespace Kernel
{
bool draw_graphics { true };
bool receive_input { true };
ThreadBlocker thread_blocker;
Semaphore semaphore;
};
tty_ctrl_t m_tty_ctrl;
@ -83,7 +83,7 @@ namespace Kernel
BAN::Array<uint8_t, 1024> buffer;
size_t bytes { 0 };
bool flush { false };
ThreadBlocker thread_blocker;
Semaphore semaphore;
};
Buffer m_output;

View File

@ -5,7 +5,7 @@
#include <kernel/Terminal/TerminalDriver.h>
#include <kernel/Terminal/termios.h>
#include <kernel/Terminal/TTY.h>
#include <kernel/ThreadBlocker.h>
#include <kernel/Semaphore.h>
namespace Kernel
{

View File

@ -47,12 +47,10 @@ namespace Kernel
void handle_signal(int signal = 0);
bool add_signal(int signal);
// blocks current thread and returns either on unblock, eintr, spuriously or after timeout
BAN::ErrorOr<void> block_or_eintr_indefinite(ThreadBlocker& thread_blocker);
BAN::ErrorOr<void> block_or_eintr_or_timeout_ms(ThreadBlocker& thread_blocker, uint64_t timeout_ms, bool etimedout) { return block_or_eintr_or_timeout_ns(thread_blocker, timeout_ms * 1'000'000, etimedout); }
BAN::ErrorOr<void> block_or_eintr_or_waketime_ms(ThreadBlocker& thread_blocker, uint64_t wake_time_ms, bool etimedout) { return block_or_eintr_or_waketime_ns(thread_blocker, wake_time_ms * 1'000'000, etimedout); }
BAN::ErrorOr<void> block_or_eintr_or_timeout_ns(ThreadBlocker& thread_blocker, uint64_t timeout_ns, bool etimedout);
BAN::ErrorOr<void> block_or_eintr_or_waketime_ns(ThreadBlocker& thread_blocker, uint64_t wake_time_ns, bool etimedout);
// blocks semaphore and returns either on unblock, eintr, spuriously or after timeout
BAN::ErrorOr<void> block_or_eintr_indefinite(Semaphore& semaphore);
BAN::ErrorOr<void> block_or_eintr_or_timeout(Semaphore& semaphore, uint64_t timeout_ms, bool etimedout);
BAN::ErrorOr<void> block_or_eintr_or_waketime(Semaphore& semaphore, uint64_t wake_time_ms, bool etimedout);
pid_t tid() const { return m_tid; }

View File

@ -1,17 +0,0 @@
#pragma once
namespace Kernel
{
class ThreadBlocker
{
public:
void block_indefinite();
void block_with_timeout_ms(uint64_t timeout_ms) { return block_with_timeout_ns(timeout_ms * 1'000'000); }
void block_with_wake_time_ms(uint64_t wake_time_ms) { return block_with_wake_time_ns(wake_time_ms * 1'000'000); }
void block_with_timeout_ns(uint64_t timeout_ns);
void block_with_wake_time_ns(uint64_t wake_time_ns);
void unblock();
};
}

View File

@ -1,7 +1,6 @@
#pragma once
#include <kernel/Interruptable.h>
#include <kernel/Lock/SpinLock.h>
#include <kernel/Timer/Timer.h>
namespace Kernel
@ -20,11 +19,9 @@ namespace Kernel
private:
void initialize();
uint64_t read_counter() const;
private:
mutable SpinLock m_lock;
uint64_t m_system_time { 0 };
volatile uint64_t m_system_time { 0 };
};
}

View File

@ -29,8 +29,7 @@ namespace Kernel
virtual uint64_t ns_since_boot() const override;
virtual timespec time_since_boot() const override;
void sleep_ms(uint64_t ms) const { return sleep_ns(ms * 1'000'000); }
void sleep_ns(uint64_t ns) const;
void sleep(uint64_t ms) const;
timespec real_time() const;

View File

@ -4,7 +4,6 @@
#include <kernel/Lock/Mutex.h>
#include <kernel/Memory/DMARegion.h>
#include <kernel/ThreadBlocker.h>
#include <kernel/USB/USBManager.h>
#include <kernel/USB/XHCI/Definitions.h>
@ -80,7 +79,7 @@ namespace Kernel
Mutex m_mutex;
Process* m_port_updater { nullptr };
ThreadBlocker m_port_thread_blocker;
Semaphore m_port_semaphore;
BAN::Atomic<bool> m_port_changed { false };
PCI::Device& m_pci_device;

View File

@ -602,7 +602,7 @@ acpi_release_global_lock:
{
if (IO::inw(fadt().pm1a_cnt_blk) & PM1_CNT_SCI_EN)
break;
SystemTimer::get().sleep_ms(10);
SystemTimer::get().sleep(10);
}
if (!(IO::inw(fadt().pm1a_cnt_blk) & PM1_CNT_SCI_EN))
@ -761,7 +761,7 @@ acpi_release_global_lock:
// FIXME: this can cause missing of event if it happens between
// reading the status and blocking
m_event_thread_blocker.block_with_timeout_ms(100);
m_event_semaphore.block_with_timeout(100);
continue;
handle_event:
@ -782,7 +782,7 @@ handle_event:
void ACPI::handle_irq()
{
m_event_thread_blocker.unblock();
m_event_semaphore.unblock();
}
}

View File

@ -244,7 +244,7 @@ namespace Kernel
dprintln("System has {} processors", m_processors.size());
uint8_t bsp_id = Kernel::Processor::current_id().as_u32();
uint8_t bsp_id = Kernel::Processor::current_id();
dprintln("BSP lapic id: {}", bsp_id);
if (m_processors.size() == 1)
@ -267,7 +267,7 @@ namespace Kernel
dprintln("Trying to enable processor (lapic id {})", processor.apic_id);
auto& proc = Kernel::Processor::create(ProcessorID(processor.apic_id));
auto& proc = Kernel::Processor::create(processor.apic_id);
PageTable::with_fast_page((paddr_t)g_ap_init_addr, [&] {
PageTable::fast_page_as_sized<uint32_t>(2) = V2P(proc.stack_top());
});
@ -308,50 +308,19 @@ namespace Kernel
}
// give processor upto 100 * 100 us + 200 us to boot
for (int i = 0; i < 100; i++)
{
if (__atomic_load_n(&g_ap_stack_loaded[0], __ATOMIC_SEQ_CST))
break;
for (int i = 0; *g_ap_stack_loaded == 0 && i < 100; i++)
udelay(100);
}
}
__atomic_store_n(&g_ap_startup_done[0], 1, __ATOMIC_SEQ_CST);
const size_t timeout_ms = SystemTimer::get().ms_since_boot() + 500;
while (__atomic_load_n(&g_ap_running_count[0], __ATOMIC_SEQ_CST) < m_processors.size() - 1)
{
if (SystemTimer::get().ms_since_boot() >= timeout_ms)
Kernel::panic("Could not start all processors");
__builtin_ia32_pause();
}
*g_ap_startup_done = 1;
// give processors 100 us time to increment running count
udelay(100);
dprintln("{} processors started", *g_ap_running_count);
}
void APIC::send_ipi(ProcessorID target)
{
ASSERT(Kernel::Processor::get_interrupt_state() == InterruptState::Disabled);
while ((read_from_local_apic(LAPIC_ICR_LO_REG) & ICR_LO_delivery_status_send_pending) == ICR_LO_delivery_status_send_pending)
__builtin_ia32_pause();
write_to_local_apic(LAPIC_ICR_HI_REG, (read_from_local_apic(LAPIC_ICR_HI_REG) & 0x00FFFFFF) | (target.as_u32() << 24));
write_to_local_apic(LAPIC_ICR_LO_REG,
(read_from_local_apic(LAPIC_ICR_LO_REG) & ICR_LO_reserved_mask)
| ICR_LO_delivery_mode_fixed
| ICR_LO_destination_mode_physical
| ICR_LO_level_assert
| ICR_LO_trigger_mode_level
| ICR_LO_destination_shorthand_none
| (IRQ_VECTOR_BASE + IRQ_IPI)
);
}
void APIC::broadcast_ipi()
{
ASSERT(Kernel::Processor::get_interrupt_state() == InterruptState::Disabled);
while ((read_from_local_apic(LAPIC_ICR_LO_REG) & ICR_LO_delivery_status_send_pending) == ICR_LO_delivery_status_send_pending)
__builtin_ia32_pause();
write_to_local_apic(LAPIC_ICR_HI_REG, (read_from_local_apic(LAPIC_ICR_HI_REG) & 0x00FFFFFF) | 0xFF000000);
write_to_local_apic(LAPIC_ICR_LO_REG,
(read_from_local_apic(LAPIC_ICR_LO_REG) & ICR_LO_reserved_mask)
@ -362,6 +331,8 @@ namespace Kernel
| ICR_LO_destination_shorthand_all_excluding_self
| (IRQ_VECTOR_BASE + IRQ_IPI)
);
while ((read_from_local_apic(LAPIC_ICR_LO_REG) & ICR_LO_delivery_status_send_pending) == ICR_LO_delivery_status_send_pending)
__builtin_ia32_pause();
}
void APIC::enable()
@ -369,6 +340,7 @@ namespace Kernel
write_to_local_apic(LAPIC_SIV_REG, read_from_local_apic(LAPIC_SIV_REG) | 0x1FF);
}
uint32_t APIC::read_from_local_apic(ptrdiff_t offset)
{
return MMIO::read32(m_local_apic_vaddr + offset);
@ -427,7 +399,7 @@ namespace Kernel
redir.vector = IRQ_VECTOR_BASE + irq;
redir.mask = 0;
// FIXME: distribute IRQs more evenly?
redir.destination = Kernel::Processor::bsb_id().as_u32();
redir.destination = Kernel::Processor::bsb_id();
ioapic->write(IOAPIC_REDIRS + gsi * 2, redir.lo_dword);
ioapic->write(IOAPIC_REDIRS + gsi * 2 + 1, redir.hi_dword);

View File

@ -49,7 +49,7 @@ namespace Kernel
for (auto& device : s_instance->m_devices)
device->update();
}
SystemTimer::get().sleep_ms(10);
SystemTimer::get().sleep(10);
}
}, nullptr
);
@ -65,7 +65,7 @@ namespace Kernel
while (!s_instance->m_should_sync)
{
LockFreeGuard _(s_instance->m_device_lock);
s_instance->m_sync_thread_blocker.block_indefinite();
s_instance->m_sync_semaphore.block_indefinite();
}
for (auto& device : s_instance->m_devices)
@ -84,11 +84,11 @@ namespace Kernel
{
while (true)
{
SystemTimer::get().sleep_ms(10'000);
SystemTimer::get().sleep(10000);
LockGuard _(s_instance->m_device_lock);
s_instance->m_should_sync = true;
s_instance->m_sync_thread_blocker.unblock();
s_instance->m_sync_semaphore.unblock();
}
}, nullptr, sync_process
)));
@ -101,7 +101,7 @@ namespace Kernel
{
LockGuard _(m_device_lock);
m_should_sync = true;
m_sync_thread_blocker.unblock();
m_sync_semaphore.unblock();
}
if (should_block)
m_sync_done.block_indefinite();

View File

@ -37,7 +37,7 @@ namespace Kernel
ASSERT(m_writing_count > 0);
m_writing_count--;
if (m_writing_count == 0)
m_thread_blocker.unblock();
m_semaphore.unblock();
}
BAN::ErrorOr<size_t> Pipe::read_impl(off_t, BAN::ByteSpan buffer)
@ -48,7 +48,7 @@ namespace Kernel
if (m_writing_count == 0)
return 0;
LockFreeGuard lock_free(m_mutex);
TRY(Thread::current().block_or_eintr_indefinite(m_thread_blocker));
TRY(Thread::current().block_or_eintr_indefinite(m_semaphore));
}
size_t to_copy = BAN::Math::min<size_t>(buffer.size(), m_buffer.size());
@ -59,7 +59,7 @@ namespace Kernel
m_atime = SystemTimer::get().real_time();
m_thread_blocker.unblock();
m_semaphore.unblock();
return to_copy;
}
@ -77,7 +77,7 @@ namespace Kernel
m_mtime = current_time;
m_ctime = current_time;
m_thread_blocker.unblock();
m_semaphore.unblock();
return buffer.size();
}

View File

@ -10,7 +10,7 @@
#include <kernel/Timer/PIT.h>
#define ISR_LIST_X X(0) X(1) X(2) X(3) X(4) X(5) X(6) X(7) X(8) X(9) X(10) X(11) X(12) X(13) X(14) X(15) X(16) X(17) X(18) X(19) X(20) X(21) X(22) X(23) X(24) X(25) X(26) X(27) X(28) X(29) X(30) X(31)
#define IRQ_LIST_X X(0) X(1) X(2) X(3) X(4) X(5) X(6) X(7) X(8) X(9) X(10) X(11) X(12) X(13) X(14) X(15) X(16) X(17) X(18) X(19) X(20) X(21) X(22) X(23) X(24) X(25) X(26) X(27) X(28) X(29) X(30) X(31)
#define IRQ_LIST_X X(0) X(1) X(2) X(3) X(4) X(5) X(6) X(7) X(8) X(9) X(10) X(11) X(12) X(13) X(14) X(15) X(16) X(17) X(18) X(19) X(20) X(21) X(22) X(23) X(24) X(25) X(26) X(27) X(28) X(29) X(30) X(31) X(32)
namespace Kernel
{
@ -168,22 +168,20 @@ namespace Kernel
asm volatile("cli; 1: hlt; jmp 1b");
}
const pid_t tid = Thread::current_tid();
const pid_t pid = (tid && Thread::current().has_process()) ? Process::current().pid() : 0;
pid_t tid = Scheduler::current_tid();
pid_t pid = tid ? Process::current().pid() : 0;
if (tid)
{
auto& thread = Thread::current();
#if __enable_sse
thread.save_sse();
Thread::current().save_sse();
#endif
if (isr == ISR::PageFault && Thread::current().is_userspace())
if (isr == ISR::PageFault)
{
// Check if stack is OOB
if (ARCH(i686) && !GDT::is_user_segment(interrupt_stack->cs))
; // 32 bit does not push stack pointer when no CPL change happens
else if (thread.userspace_stack_bottom() < interrupt_stack->sp && interrupt_stack->sp <= thread.userspace_stack_top())
auto& thread = Thread::current();
if (thread.userspace_stack_bottom() < interrupt_stack->sp && interrupt_stack->sp <= thread.userspace_stack_top())
; // using userspace stack
else if (thread.kernel_stack_bottom() < interrupt_stack->sp && interrupt_stack->sp <= thread.kernel_stack_top())
; // using kernel stack
@ -200,10 +198,13 @@ namespace Kernel
goto done;
}
// Demand paging is only supported in userspace
if (thread.is_userspace())
{
// Try demand paging on non present pages
PageFaultError page_fault_error;
page_fault_error.raw = error;
if (pid && !page_fault_error.present)
if (!page_fault_error.present)
{
Processor::set_interrupt_state(InterruptState::Enabled);
auto result = Process::current().allocate_page_for_demand_paging(regs->cr2);
@ -221,6 +222,7 @@ namespace Kernel
}
}
}
}
Debug::s_debug_lock.lock();
@ -241,13 +243,13 @@ namespace Kernel
#if ARCH(x86_64)
dwarnln(
"CPU {}: {} (error code: 0x{8H}), pid {}, tid {}\r\n"
"{} (error code: 0x{8H}), pid {}, tid {}\r\n"
"Register dump\r\n"
"rax=0x{16H}, rbx=0x{16H}, rcx=0x{16H}, rdx=0x{16H}\r\n"
"rsp=0x{16H}, rbp=0x{16H}, rdi=0x{16H}, rsi=0x{16H}\r\n"
"rip=0x{16H}, rflags=0x{16H}\r\n"
"cr0=0x{16H}, cr2=0x{16H}, cr3=0x{16H}, cr4=0x{16H}",
Processor::current_id(), isr_exceptions[isr], error, pid, tid,
isr_exceptions[isr], error, pid, tid,
regs->rax, regs->rbx, regs->rcx, regs->rdx,
interrupt_stack->sp, regs->rbp, regs->rdi, regs->rsi,
interrupt_stack->ip, interrupt_stack->flags,
@ -255,13 +257,13 @@ namespace Kernel
);
#elif ARCH(i686)
dwarnln(
"CPU {}: {} (error code: 0x{8H}), pid {}, tid {}\r\n"
"{} (error code: 0x{8H}), pid {}, tid {}\r\n"
"Register dump\r\n"
"eax=0x{8H}, ebx=0x{8H}, ecx=0x{8H}, edx=0x{8H}\r\n"
"esp=0x{8H}, ebp=0x{8H}, edi=0x{8H}, esi=0x{8H}\r\n"
"eip=0x{8H}, eflags=0x{8H}\r\n"
"cr0=0x{8H}, cr2=0x{8H}, cr3=0x{8H}, cr4=0x{8H}",
Processor::current_id(), isr_exceptions[isr], error, pid, tid,
isr_exceptions[isr], error, pid, tid,
regs->eax, regs->ebx, regs->ecx, regs->edx,
interrupt_stack->sp, regs->ebp, regs->edi, regs->esi,
interrupt_stack->ip, interrupt_stack->flags,
@ -320,17 +322,12 @@ done:
extern "C" void cpp_yield_handler(InterruptStack* interrupt_stack, InterruptRegisters* interrupt_registers)
{
// yield is raised through kernel software interrupt
ASSERT(!InterruptController::get().is_in_service(IRQ_YIELD));
ASSERT(!GDT::is_user_segment(interrupt_stack->cs));
Processor::scheduler().reschedule(interrupt_stack, interrupt_registers);
}
extern "C" void cpp_ipi_handler()
{
ASSERT(InterruptController::get().is_in_service(IRQ_IPI));
InterruptController::get().eoi(IRQ_IPI);
Processor::handle_ipi();
Processor::enter_interrupt(interrupt_stack, interrupt_registers);
Scheduler::get().irq_reschedule();
Processor::leave_interrupt();
}
extern "C" void cpp_irq_handler(uint32_t irq)
@ -354,6 +351,8 @@ done:
InterruptController::get().eoi(irq);
if (auto* handler = s_interruptables[irq])
handler->handle_irq();
else if (irq == IRQ_IPI)
Scheduler::get().yield();
else
dprintln("no handler for irq 0x{2H}", irq);
}
@ -362,7 +361,7 @@ done:
if (current_thread.can_add_signal_to_execute())
current_thread.handle_signal();
Processor::scheduler().reschedule_if_idle();
Scheduler::get().reschedule_if_idling();
ASSERT(Thread::current().state() != Thread::State::Terminated);
@ -408,7 +407,6 @@ done:
#undef X
extern "C" void asm_yield_handler();
extern "C" void asm_ipi_handler();
extern "C" void asm_syscall_handler();
IDT* IDT::create()
@ -427,7 +425,6 @@ done:
#undef X
idt->register_interrupt_handler(IRQ_VECTOR_BASE + IRQ_YIELD, asm_yield_handler);
idt->register_interrupt_handler(IRQ_VECTOR_BASE + IRQ_IPI, asm_ipi_handler);
idt->register_syscall_handler(0x80, asm_syscall_handler);

View File

@ -107,23 +107,6 @@ namespace Kernel
}
}
if (m_type == Type::Keyboard)
{
auto& key_event = event.as<const LibInput::RawKeyEvent>();
if (key_event.modifier & LibInput::KeyEvent::Modifier::Pressed)
{
switch (key_event.keycode)
{
case LibInput::keycode_function(1):
Processor::toggle_should_print_cpu_load();
break;
case LibInput::keycode_function(12):
Kernel::panic("Keyboard kernel panic :)");
break;
}
}
}
if (m_event_count == m_max_event_count)
{
m_event_tail = (m_event_tail + 1) % m_max_event_count;
@ -134,7 +117,7 @@ namespace Kernel
m_event_head = (m_event_head + 1) % m_max_event_count;
m_event_count++;
m_event_thread_blocker.unblock();
m_event_semaphore.unblock();
if (m_type == Type::Keyboard && s_keyboard_device)
s_keyboard_device->notify();
if (m_type == Type::Mouse && s_mouse_device)
@ -152,7 +135,7 @@ namespace Kernel
m_event_lock.unlock(state);
{
LockFreeGuard _(m_mutex);
TRY(Thread::current().block_or_eintr_indefinite(m_event_thread_blocker));
TRY(Thread::current().block_or_eintr_indefinite(m_event_semaphore));
}
state = m_event_lock.lock();
}
@ -216,7 +199,7 @@ namespace Kernel
}
LockFreeGuard _(m_mutex);
TRY(Thread::current().block_or_eintr_indefinite(m_thread_blocker));
TRY(Thread::current().block_or_eintr_indefinite(m_semaphore));
}
}
@ -262,7 +245,7 @@ namespace Kernel
}
LockFreeGuard _(m_mutex);
TRY(Thread::current().block_or_eintr_indefinite(m_thread_blocker));
TRY(Thread::current().block_or_eintr_indefinite(m_semaphore));
}
}

View File

@ -126,6 +126,8 @@ namespace Kernel::Input
return;
auto dummy_event = LibInput::KeyboardLayout::get().key_event_from_raw(RawKeyEvent { .modifier = 0, .keycode = keycode.value() });
if (dummy_event.key == Key::F1)
panic("OOF");
uint16_t modifier_mask = 0;
uint16_t toggle_mask = 0;

View File

@ -11,9 +11,36 @@ namespace Kernel
ASSERT(vaddr % PAGE_SIZE == 0);
ASSERT(vaddr > 0);
auto result = TRY(BAN::UniqPtr<VirtualRange>::create(page_table, preallocate_pages, vaddr, size, flags));
VirtualRange* result_ptr = new VirtualRange(page_table, preallocate_pages);
if (result_ptr == nullptr)
return BAN::Error::from_errno(ENOMEM);
auto result = BAN::UniqPtr<VirtualRange>::adopt(result_ptr);
result->m_vaddr = vaddr;
result->m_size = size;
result->m_flags = flags;
ASSERT(page_table.reserve_range(vaddr, size));
TRY(result->initialize());
if (!preallocate_pages)
return result;
size_t needed_pages = size / PAGE_SIZE;
for (size_t i = 0; i < needed_pages; i++)
{
paddr_t paddr = Heap::get().take_free_page();
if (paddr == 0)
{
for (size_t j = 0; j < i; j++)
Heap::get().release_page(page_table.physical_address_of(vaddr + j * PAGE_SIZE));
page_table.unmap_range(vaddr, size);
result->m_vaddr = 0;
return BAN::Error::from_errno(ENOMEM);
}
page_table.map_page_at(paddr, vaddr + i * PAGE_SIZE, flags);
}
result->set_zero();
return result;
}
@ -34,71 +61,34 @@ namespace Kernel
vaddr_t vaddr = page_table.reserve_free_contiguous_pages(size / PAGE_SIZE, vaddr_start, vaddr_end);
if (vaddr == 0)
{
dprintln("no free {} byte area", size);
return BAN::Error::from_errno(ENOMEM);
ASSERT(vaddr >= vaddr_start);
}
ASSERT(vaddr + size <= vaddr_end);
auto result_or_error = BAN::UniqPtr<VirtualRange>::create(page_table, preallocate_pages, vaddr, size, flags);
if (result_or_error.is_error())
{
page_table.unmap_range(vaddr, size);
return result_or_error.release_error();
SpinLockGuard _(page_table);
page_table.unmap_range(vaddr, size); // We have to unmap here to allow reservation in create_to_vaddr()
return create_to_vaddr(page_table, vaddr, size, flags, preallocate_pages);
}
auto result = result_or_error.release_value();
TRY(result->initialize());
return result;
}
VirtualRange::VirtualRange(PageTable& page_table, bool preallocated, vaddr_t vaddr, size_t size, PageTable::flags_t flags)
VirtualRange::VirtualRange(PageTable& page_table, bool preallocated)
: m_page_table(page_table)
, m_preallocated(preallocated)
, m_vaddr(vaddr)
, m_size(size)
, m_flags(flags)
{ }
VirtualRange::~VirtualRange()
{
ASSERT(m_vaddr);
m_page_table.unmap_range(m_vaddr, m_size);
if (m_vaddr == 0)
return;
for (paddr_t paddr : m_paddrs)
if (paddr != 0)
for (size_t offset = 0; offset < size(); offset += PAGE_SIZE)
{
paddr_t paddr = m_page_table.physical_address_of(vaddr() + offset);
if (paddr)
Heap::get().release_page(paddr);
}
BAN::ErrorOr<void> VirtualRange::initialize()
{
TRY(m_paddrs.resize(m_size / PAGE_SIZE, 0));
if (!m_preallocated)
return {};
const size_t page_count = m_size / PAGE_SIZE;
for (size_t i = 0; i < page_count; i++)
{
m_paddrs[i] = Heap::get().take_free_page();
if (m_paddrs[i] == 0)
return BAN::Error::from_errno(ENOMEM);
m_page_table.map_page_at(m_paddrs[i], m_vaddr + i * PAGE_SIZE, m_flags);
}
if (&PageTable::current() == &m_page_table || &PageTable::kernel() == &m_page_table)
memset(reinterpret_cast<void*>(m_vaddr), 0, m_size);
else
{
const size_t page_count = m_size / PAGE_SIZE;
for (size_t i = 0; i < page_count; i++)
{
PageTable::with_fast_page(m_paddrs[i], [&] {
memset(PageTable::fast_page_as_ptr(), 0, PAGE_SIZE);
});
}
}
return {};
m_page_table.unmap_range(vaddr(), size());
}
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::clone(PageTable& page_table)
@ -106,51 +96,92 @@ namespace Kernel
ASSERT(&PageTable::current() == &m_page_table);
ASSERT(&m_page_table != &page_table);
auto result = TRY(create_to_vaddr(page_table, m_vaddr, m_size, m_flags, m_preallocated));
auto result = TRY(create_to_vaddr(page_table, vaddr(), size(), flags(), m_preallocated));
SpinLockGuard _(m_lock);
const size_t page_count = m_size / PAGE_SIZE;
for (size_t i = 0; i < page_count; i++)
SpinLockGuard _(m_page_table);
for (size_t offset = 0; offset < size(); offset += PAGE_SIZE)
{
if (m_paddrs[i] == 0)
continue;
if (!result->m_preallocated)
if (!m_preallocated && m_page_table.physical_address_of(vaddr() + offset))
{
result->m_paddrs[i] = Heap::get().take_free_page();
if (result->m_paddrs[i] == 0)
paddr_t paddr = Heap::get().take_free_page();
if (paddr == 0)
return BAN::Error::from_errno(ENOMEM);
result->m_page_table.map_page_at(result->m_paddrs[i], m_vaddr + i * PAGE_SIZE, m_flags);
result->m_page_table.map_page_at(paddr, vaddr() + offset, m_flags);
}
PageTable::with_fast_page(result->m_paddrs[i], [&] {
memcpy(PageTable::fast_page_as_ptr(), reinterpret_cast<void*>(m_vaddr + i * PAGE_SIZE), PAGE_SIZE);
PageTable::with_fast_page(result->m_page_table.physical_address_of(vaddr() + offset), [&] {
memcpy(PageTable::fast_page_as_ptr(), (void*)(vaddr() + offset), PAGE_SIZE);
});
}
return result;
}
BAN::ErrorOr<void> VirtualRange::allocate_page_for_demand_paging(vaddr_t vaddr)
BAN::ErrorOr<void> VirtualRange::allocate_page_for_demand_paging(vaddr_t address)
{
ASSERT(!m_preallocated);
ASSERT(vaddr % PAGE_SIZE == 0);
ASSERT(contains(vaddr));
ASSERT(contains(address));
ASSERT(&PageTable::current() == &m_page_table);
const size_t index = (vaddr - m_vaddr) / PAGE_SIZE;
ASSERT(m_paddrs[index] == 0);
vaddr_t vaddr = address & PAGE_ADDR_MASK;
ASSERT(m_page_table.physical_address_of(vaddr) == 0);
SpinLockGuard _(m_lock);
m_paddrs[index] = Heap::get().take_free_page();
if (m_paddrs[index] == 0)
paddr_t paddr = Heap::get().take_free_page();
if (paddr == 0)
return BAN::Error::from_errno(ENOMEM);
m_page_table.map_page_at(m_paddrs[index], vaddr, m_flags);
memset(reinterpret_cast<void*>(vaddr), 0, PAGE_SIZE);
m_page_table.map_page_at(paddr, vaddr, m_flags);
memset((void*)vaddr, 0x00, PAGE_SIZE);
return {};
}
void VirtualRange::set_zero()
{
if (&PageTable::current() == &m_page_table || &PageTable::kernel() == &m_page_table)
{
memset((void*)vaddr(), 0, size());
return;
}
for (size_t offset = 0; offset < size(); offset += PAGE_SIZE)
{
PageTable::with_fast_page(m_page_table.physical_address_of(vaddr() + offset), [&] {
memset(PageTable::fast_page_as_ptr(), 0x00, PAGE_SIZE);
});
}
}
void VirtualRange::copy_from(size_t offset, const uint8_t* buffer, size_t bytes)
{
if (bytes == 0)
return;
// Verify no overflow
ASSERT(bytes <= size());
ASSERT(offset <= size());
ASSERT(offset <= size() - bytes);
if (&PageTable::current() == &m_page_table || &PageTable::kernel() == &m_page_table)
{
memcpy((void*)(vaddr() + offset), buffer, bytes);
return;
}
size_t page_offset = offset % PAGE_SIZE;
size_t page_index = offset / PAGE_SIZE;
while (bytes > 0)
{
PageTable::with_fast_page(m_page_table.physical_address_of(vaddr() + page_index * PAGE_SIZE), [&] {
memcpy(PageTable::fast_page_as_ptr(page_offset), buffer, PAGE_SIZE - page_offset);
});
buffer += PAGE_SIZE - page_offset;
bytes -= PAGE_SIZE - page_offset;
page_offset = 0;
page_index++;
}
}
}

View File

@ -79,7 +79,7 @@ namespace Kernel
if (it != m_arp_table.end())
return it->value;
}
Processor::yield();
Scheduler::get().yield();
}
return BAN::Error::from_errno(ETIMEDOUT);
@ -150,7 +150,7 @@ namespace Kernel
while (m_pending_packets.empty())
{
m_pending_lock.unlock(state);
m_pending_thread_blocker.block_indefinite();
m_pending_semaphore.block_indefinite();
state = m_pending_lock.lock();
}
auto packet = m_pending_packets.front();
@ -178,7 +178,7 @@ namespace Kernel
}
m_pending_packets.push({ .interface = interface, .packet = arp_packet });
m_pending_thread_blocker.unblock();
m_pending_semaphore.unblock();
}
}

View File

@ -308,7 +308,7 @@ namespace Kernel
while (m_pending_packets.empty())
{
m_pending_lock.unlock(state);
m_pending_thread_blocker.block_indefinite();
m_pending_semaphore.block_indefinite();
state = m_pending_lock.lock();
}
auto packet = m_pending_packets.front();
@ -367,7 +367,7 @@ namespace Kernel
m_pending_total_size += ipv4_header.total_length;
m_pending_packets.push({ .interface = interface });
m_pending_thread_blocker.unblock();
m_pending_semaphore.unblock();
}
}

View File

@ -75,7 +75,7 @@ namespace Kernel
while (m_pending_connections.empty())
{
LockFreeGuard _(m_mutex);
TRY(Thread::current().block_or_eintr_indefinite(m_thread_blocker));
TRY(Thread::current().block_or_eintr_indefinite(m_semaphore));
}
auto connection = m_pending_connections.front();
@ -113,7 +113,7 @@ namespace Kernel
if (SystemTimer::get().ms_since_boot() >= wake_time_ms)
return BAN::Error::from_errno(ECONNABORTED);
LockFreeGuard free(m_mutex);
TRY(Thread::current().block_or_eintr_or_waketime_ms(return_inode->m_thread_blocker, wake_time_ms, true));
TRY(Thread::current().block_or_eintr_or_waketime(return_inode->m_semaphore, wake_time_ms, true));
}
if (address)
@ -170,7 +170,7 @@ namespace Kernel
if (SystemTimer::get().ms_since_boot() >= wake_time_ms)
return BAN::Error::from_errno(ECONNREFUSED);
LockFreeGuard free(m_mutex);
TRY(Thread::current().block_or_eintr_or_waketime_ms(m_thread_blocker, wake_time_ms, true));
TRY(Thread::current().block_or_eintr_or_waketime(m_semaphore, wake_time_ms, true));
}
return {};
@ -207,7 +207,7 @@ namespace Kernel
if (m_state != State::Established)
return return_with_maybe_zero();
LockFreeGuard free(m_mutex);
TRY(Thread::current().block_or_eintr_indefinite(m_thread_blocker));
TRY(Thread::current().block_or_eintr_indefinite(m_semaphore));
}
const uint32_t to_recv = BAN::Math::min<uint32_t>(buffer.size(), m_recv_window.data_size);
@ -249,7 +249,7 @@ namespace Kernel
if (m_send_window.data_size + message.size() <= m_send_window.buffer->size())
break;
LockFreeGuard free(m_mutex);
TRY(Thread::current().block_or_eintr_indefinite(m_thread_blocker));
TRY(Thread::current().block_or_eintr_indefinite(m_semaphore));
}
{
@ -259,14 +259,14 @@ namespace Kernel
}
const uint32_t target_ack = m_send_window.start_seq + m_send_window.data_size;
m_thread_blocker.unblock();
m_semaphore.unblock();
while (m_send_window.current_ack < target_ack)
{
if (m_state != State::Established)
return return_with_maybe_zero();
LockFreeGuard free(m_mutex);
TRY(Thread::current().block_or_eintr_indefinite(m_thread_blocker));
TRY(Thread::current().block_or_eintr_indefinite(m_semaphore));
}
return message.size();
@ -597,7 +597,7 @@ namespace Kernel
}
}
m_thread_blocker.unblock();
m_semaphore.unblock();
}
void TCPSocket::set_connection_as_closed()
@ -743,11 +743,11 @@ namespace Kernel
}
}
m_thread_blocker.unblock();
m_thread_blocker.block_with_wake_time_ms(current_ms + retransmit_timeout_ms);
m_semaphore.unblock();
m_semaphore.block_with_wake_time(current_ms + retransmit_timeout_ms);
}
m_thread_blocker.unblock();
m_semaphore.unblock();
}
}

View File

@ -70,7 +70,7 @@ namespace Kernel
m_packets.emplace(packet_info);
m_packet_total_size += payload.size();
m_packet_thread_blocker.unblock();
m_packet_semaphore.unblock();
}
BAN::ErrorOr<void> UDPSocket::bind_impl(const sockaddr* address, socklen_t address_len)
@ -93,7 +93,7 @@ namespace Kernel
while (m_packets.empty())
{
m_packet_lock.unlock(state);
TRY(Thread::current().block_or_eintr_indefinite(m_packet_thread_blocker));
TRY(Thread::current().block_or_eintr_indefinite(m_packet_semaphore));
state = m_packet_lock.lock();
}

View File

@ -73,7 +73,7 @@ namespace Kernel
return BAN::Error::from_errno(EINVAL);
while (connection_info.pending_connections.empty())
TRY(Thread::current().block_or_eintr_indefinite(connection_info.pending_thread_blocker));
TRY(Thread::current().block_or_eintr_indefinite(connection_info.pending_semaphore));
BAN::RefPtr<UnixDomainSocket> pending;
@ -81,7 +81,7 @@ namespace Kernel
SpinLockGuard _(connection_info.pending_lock);
pending = connection_info.pending_connections.front();
connection_info.pending_connections.pop();
connection_info.pending_thread_blocker.unblock();
connection_info.pending_semaphore.unblock();
}
BAN::RefPtr<UnixDomainSocket> return_inode;
@ -162,15 +162,15 @@ namespace Kernel
if (target_info.pending_connections.size() < target_info.pending_connections.capacity())
{
MUST(target_info.pending_connections.push(this));
target_info.pending_thread_blocker.unblock();
target_info.pending_semaphore.unblock();
break;
}
}
TRY(Thread::current().block_or_eintr_indefinite(target_info.pending_thread_blocker));
TRY(Thread::current().block_or_eintr_indefinite(target_info.pending_semaphore));
}
while (!connection_info.connection_done)
Processor::yield();
Scheduler::get().yield();
return {};
}
@ -241,7 +241,7 @@ namespace Kernel
while (m_packet_sizes.full() || m_packet_size_total + packet.size() > s_packet_buffer_size)
{
m_packet_lock.unlock(state);
TRY(Thread::current().block_or_eintr_indefinite(m_packet_thread_blocker));
TRY(Thread::current().block_or_eintr_indefinite(m_packet_semaphore));
state = m_packet_lock.lock();
}
@ -252,7 +252,7 @@ namespace Kernel
if (!is_streaming())
m_packet_sizes.push(packet.size());
m_packet_thread_blocker.unblock();
m_packet_semaphore.unblock();
m_packet_lock.unlock(state);
return {};
}
@ -357,7 +357,7 @@ namespace Kernel
while (m_packet_size_total == 0)
{
m_packet_lock.unlock(state);
TRY(Thread::current().block_or_eintr_indefinite(m_packet_thread_blocker));
TRY(Thread::current().block_or_eintr_indefinite(m_packet_semaphore));
state = m_packet_lock.lock();
}
@ -376,7 +376,7 @@ namespace Kernel
memmove(packet_buffer, packet_buffer + nread, m_packet_size_total - nread);
m_packet_size_total -= nread;
m_packet_thread_blocker.unblock();
m_packet_semaphore.unblock();
m_packet_lock.unlock(state);
return nread;

View File

@ -219,7 +219,7 @@ namespace Kernel::PCI
dprintln("{}", res.error());
break;
default:
dprintln("unsupported serial bus controller (pci {2H}.{2H}.{2H})", pci_device.class_code(), pci_device.subclass(), pci_device.prog_if());
dprintln("unsupported serail bus controller (pci {2H}.{2H}.{2H})", pci_device.class_code(), pci_device.subclass(), pci_device.prog_if());
break;
}
break;

View File

@ -82,13 +82,12 @@ namespace Kernel
void Process::register_to_scheduler()
{
// FIXME: Allow failing...
{
SpinLockGuard _(s_process_lock);
MUST(s_processes.push_back(this));
}
for (auto* thread : m_threads)
MUST(Processor::scheduler().add_thread(thread));
MUST(Scheduler::get().add_thread(thread));
}
Process* Process::create_kernel()
@ -207,10 +206,10 @@ namespace Kernel
ProcFileSystem::get().on_process_delete(*this);
m_exit_status.exited = true;
m_exit_status.thread_blocker.unblock();
m_exit_status.semaphore.unblock();
while (m_exit_status.waiting > 0)
Processor::yield();
Scheduler::get().yield();
m_process_lock.lock();
@ -251,6 +250,16 @@ namespace Kernel
m_exit_status.exit_code = __WGENEXITCODE(status, signal);
while (!m_threads.empty())
m_threads.front()->on_exit();
//for (auto* thread : m_threads)
// if (thread != &Thread::current())
// Scheduler::get().terminate_thread(thread);
//if (this == &Process::current())
//{
// m_threads.clear();
// Processor::set_interrupt_state(InterruptState::Disabled);
// Thread::current().setup_process_cleanup();
// Scheduler::get().yield();
//}
}
size_t Process::proc_meminfo(off_t offset, BAN::ByteSpan buffer) const
@ -525,13 +534,13 @@ namespace Kernel
m_cmdline = BAN::move(str_argv);
m_environ = BAN::move(str_envp);
Processor::set_interrupt_state(InterruptState::Disabled);
asm volatile("cli");
}
m_has_called_exec = true;
m_threads.front()->setup_exec();
Processor::yield();
Scheduler::get().yield();
ASSERT_NOT_REACHED();
}
@ -557,13 +566,7 @@ namespace Kernel
return BAN::Error::from_errno(ECHILD);
while (!target->m_exit_status.exited)
{
if (auto ret = Thread::current().block_or_eintr_indefinite(target->m_exit_status.thread_blocker); ret.is_error())
{
target->m_exit_status.waiting--;
return ret.release_error();
}
}
TRY(Thread::current().block_or_eintr_indefinite(target->m_exit_status.semaphore));
int exit_status = target->m_exit_status.exit_code;
target->m_exit_status.waiting--;
@ -594,12 +597,12 @@ namespace Kernel
if (seconds == 0)
return 0;
const uint64_t wake_time_ms = SystemTimer::get().ms_since_boot() + (seconds * 1000);
SystemTimer::get().sleep_ms(seconds * 1000);
uint64_t wake_time = SystemTimer::get().ms_since_boot() + seconds * 1000;
Scheduler::get().set_current_thread_sleeping(wake_time);
const uint64_t current_ms = SystemTimer::get().ms_since_boot();
if (current_ms < wake_time_ms)
return BAN::Math::div_round_up<long>(wake_time_ms - current_ms, 1000);
uint64_t current_time = SystemTimer::get().ms_since_boot();
if (current_time < wake_time)
return BAN::Math::div_round_up<long>(wake_time - current_time, 1000);
return 0;
}
@ -613,21 +616,23 @@ namespace Kernel
TRY(validate_pointer_access(rmtp, sizeof(timespec)));
}
const uint64_t sleep_ns = (rqtp->tv_sec * 1'000'000'000) + rqtp->tv_nsec;
if (sleep_ns == 0)
uint64_t sleep_ms = rqtp->tv_sec * 1000 + BAN::Math::div_round_up<uint64_t>(rqtp->tv_nsec, 1'000'000);
if (sleep_ms == 0)
return 0;
const uint64_t wake_time_ns = SystemTimer::get().ns_since_boot() + sleep_ns;
SystemTimer::get().sleep_ns(sleep_ns);
uint64_t wake_time_ms = SystemTimer::get().ms_since_boot() + sleep_ms;
const uint64_t current_ns = SystemTimer::get().ns_since_boot();
if (current_ns < wake_time_ns)
Scheduler::get().set_current_thread_sleeping(wake_time_ms);
uint64_t current_ms = SystemTimer::get().ms_since_boot();
if (current_ms < wake_time_ms)
{
if (rmtp)
{
const uint64_t remaining_ns = wake_time_ns - current_ns;
rmtp->tv_sec = remaining_ns / 1'000'000'000;
rmtp->tv_nsec = remaining_ns % 1'000'000'000;
uint64_t remaining_ms = wake_time_ms - current_ms;
rmtp->tv_sec = remaining_ms / 1000;
rmtp->tv_nsec = (remaining_ms % 1000) * 1'000'000;
}
return BAN::Error::from_errno(EINTR);
}
@ -1129,7 +1134,7 @@ namespace Kernel
break;
LockFreeGuard free(m_process_lock);
SystemTimer::get().sleep_ms(1);
SystemTimer::get().sleep(1);
}
if (arguments->readfds)
@ -1336,7 +1341,7 @@ namespace Kernel
TRY(validate_pointer_access(args, sizeof(sys_mmap_t)));
}
if (args->prot != PROT_NONE && (args->prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC)))
if (args->prot != PROT_NONE && args->prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
return BAN::Error::from_errno(EINVAL);
if (args->flags & MAP_FIXED)
@ -1617,7 +1622,7 @@ namespace Kernel
{
process.add_pending_signal(signal);
// FIXME: This feels hacky
Processor::scheduler().unblock_thread(process.m_threads.front()->tid());
Scheduler::get().unblock_thread(process.m_threads.front()->tid());
}
return (pid > 0) ? BAN::Iteration::Break : BAN::Iteration::Continue;
}

View File

@ -1,11 +1,6 @@
#include <kernel/InterruptController.h>
#include <kernel/Memory/kmalloc.h>
#include <kernel/Processor.h>
#include <kernel/Terminal/TerminalDriver.h>
#include <kernel/Thread.h>
#include <kernel/Timer/Timer.h>
extern Kernel::TerminalDriver* g_terminal_driver;
namespace Kernel
{
@ -13,19 +8,10 @@ namespace Kernel
static constexpr uint32_t MSR_IA32_GS_BASE = 0xC0000101;
ProcessorID Processor::s_bsb_id { PROCESSOR_NONE };
BAN::Atomic<uint8_t> Processor::s_processor_count { 0 };
BAN::Atomic<bool> Processor::s_is_smp_enabled { false };
BAN::Atomic<bool> Processor::s_should_print_cpu_load { false };
static BAN::Atomic<uint8_t> s_processors_created { 0 };
// 32 bit milli seconds are definitely enough as APs start on boot
static BAN::Atomic<uint32_t> s_first_ap_ready_ms { 0 };
static BAN::Array<Processor, 0xFF> s_processors;
static BAN::Array<ProcessorID, 0xFF> s_processor_ids { PROCESSOR_NONE };
ProcessorID Processor::read_processor_id()
static ProcessorID read_processor_id()
{
uint32_t id;
asm volatile(
@ -35,18 +21,16 @@ namespace Kernel
: "=b"(id)
:: "eax", "ecx", "edx"
);
return ProcessorID(id);
return id;
}
Processor& Processor::create(ProcessorID id)
{
// bsb is the first processor
if (s_bsb_id == PROCESSOR_NONE && id == PROCESSOR_NONE)
if (s_bsb_id == PROCESSOR_NONE)
s_bsb_id = id = read_processor_id();
if (s_bsb_id == PROCESSOR_NONE || id == PROCESSOR_NONE || id.m_id >= s_processors.size())
Kernel::panic("Trying to initialize invalid processor {}", id.m_id);
auto& processor = s_processors[id.m_id];
auto& processor = s_processors[id];
ASSERT(processor.m_id == PROCESSOR_NONE);
processor.m_id = id;
@ -60,27 +44,13 @@ namespace Kernel
processor.m_idt = IDT::create();
ASSERT(processor.m_idt);
processor.m_scheduler = MUST(Scheduler::create());
ASSERT(processor.m_scheduler);
SMPMessage* smp_storage = new SMPMessage[0x1000];
ASSERT(smp_storage);
for (size_t i = 0; i < 0xFFF; i++)
smp_storage[i].next = &smp_storage[i + 1];
smp_storage[0xFFF].next = nullptr;
processor.m_smp_pending = nullptr;
processor.m_smp_free = smp_storage;
s_processors_created++;
return processor;
}
Processor& Processor::initialize()
{
auto id = read_processor_id();
auto& processor = s_processors[id.m_id];
auto& processor = s_processors[id];
ASSERT(processor.m_gdt);
processor.m_gdt->load();
@ -102,302 +72,41 @@ namespace Kernel
return processor;
}
ProcessorID Processor::id_from_index(size_t index)
void Processor::allocate_idle_thread()
{
ASSERT(index < s_processor_count);
ASSERT(s_processor_ids[index] != PROCESSOR_NONE);
return s_processor_ids[index];
ASSERT(idle_thread() == nullptr);
auto* idle_thread = MUST(Thread::create_kernel([](void*) { for (;;) asm volatile("hlt"); }, nullptr, nullptr));
write_gs_ptr(offsetof(Processor, m_idle_thread), idle_thread);
}
void Processor::wait_until_processors_ready()
void Processor::enter_interrupt(InterruptStack* interrupt_stack, InterruptRegisters* interrupt_registers)
{
if (s_processors_created == 1)
{
ASSERT(current_is_bsb());
s_processor_count++;
s_processor_ids[0] = current_id();
ASSERT(get_interrupt_state() == InterruptState::Disabled);
ASSERT(read_gs_ptr(offsetof(Processor, m_interrupt_stack)) == nullptr);
write_gs_ptr(offsetof(Processor, m_interrupt_stack), interrupt_stack);
write_gs_ptr(offsetof(Processor, m_interrupt_registers), interrupt_registers);
}
// wait until bsb is ready
if (current_is_bsb())
void Processor::leave_interrupt()
{
s_processor_count = 1;
s_processor_ids[0] = current_id();
// single processor system
if (s_processors_created == 1)
return;
// wait until first AP is ready
const uint64_t timeout_ms = SystemTimer::get().ms_since_boot() + 1000;
while (s_first_ap_ready_ms == 0)
{
if (SystemTimer::get().ms_since_boot() >= timeout_ms)
{
dprintln("Could not initialize any APs :(");
return;
}
__builtin_ia32_pause();
}
}
else
{
// wait until bsb is ready, it shall get index 0
while (s_processor_count == 0)
__builtin_ia32_pause();
auto lookup_index = s_processor_count++;
ASSERT(s_processor_ids[lookup_index] == PROCESSOR_NONE);
s_processor_ids[lookup_index] = current_id();
uint32_t expected = 0;
s_first_ap_ready_ms.compare_exchange(expected, SystemTimer::get().ms_since_boot());
ASSERT(get_interrupt_state() == InterruptState::Disabled);
ASSERT(read_gs_ptr(offsetof(Processor, m_interrupt_stack)) != nullptr);
write_gs_ptr(offsetof(Processor, m_interrupt_stack), nullptr);
write_gs_ptr(offsetof(Processor, m_interrupt_registers), nullptr);
}
// wait until all processors are initialized
InterruptStack& Processor::get_interrupt_stack()
{
const uint32_t timeout_ms = s_first_ap_ready_ms + 1000;
while (s_processor_count < s_processors_created)
{
if (SystemTimer::get().ms_since_boot() >= timeout_ms)
{
if (current_is_bsb())
dprintln("Could not initialize {} processors :(", s_processors_created - s_processor_count);
break;
}
__builtin_ia32_pause();
}
ASSERT(get_interrupt_state() == InterruptState::Disabled);
ASSERT(read_gs_ptr(offsetof(Processor, m_interrupt_stack)));
return *read_gs_sized<InterruptStack*>(offsetof(Processor, m_interrupt_stack));
}
s_is_smp_enabled = true;
}
void Processor::handle_ipi()
InterruptRegisters& Processor::get_interrupt_registers()
{
handle_smp_messages();
}
template<typename F>
void with_atomic_lock(BAN::Atomic<bool>& lock, F callback)
{
bool expected = false;
while (!lock.compare_exchange(expected, true, BAN::MemoryOrder::memory_order_acquire))
{
__builtin_ia32_pause();
expected = false;
}
callback();
lock.store(false, BAN::MemoryOrder::memory_order_release);
}
void Processor::handle_smp_messages()
{
auto state = get_interrupt_state();
set_interrupt_state(InterruptState::Disabled);
auto processor_id = current_id();
auto& processor = s_processors[processor_id.m_id];
SMPMessage* pending = nullptr;
with_atomic_lock(processor.m_smp_pending_lock,
[&]()
{
pending = processor.m_smp_pending;
processor.m_smp_pending = nullptr;
}
);
bool should_preempt = false;
if (pending)
{
// reverse smp message queue from LIFO to FIFO
{
SMPMessage* reversed = nullptr;
for (SMPMessage* message = pending; message;)
{
SMPMessage* next = message->next;
message->next = reversed;
reversed = message;
message = next;
}
pending = reversed;
}
SMPMessage* last_handled = nullptr;
// handle messages
for (auto* message = pending; message; message = message->next)
{
switch (message->type)
{
case SMPMessage::Type::FlushTLB:
for (size_t i = 0; i < message->flush_tlb.page_count; i++)
asm volatile("invlpg (%0)" :: "r"(message->flush_tlb.vaddr + i * PAGE_SIZE) : "memory");
break;
case SMPMessage::Type::NewThread:
processor.m_scheduler->handle_new_thread_request(message->new_thread);
break;
case SMPMessage::Type::UnblockThread:
processor.m_scheduler->handle_unblock_request(message->unblock_thread);
break;
case SMPMessage::Type::SchedulerPreemption:
should_preempt = true;
break;
}
last_handled = message;
}
with_atomic_lock(processor.m_smp_free_lock,
[&]()
{
last_handled->next = processor.m_smp_free;
processor.m_smp_free = pending;
}
);
}
if (should_preempt)
processor.m_scheduler->preempt();
set_interrupt_state(state);
}
void Processor::send_smp_message(ProcessorID processor_id, const SMPMessage& message, bool send_ipi)
{
ASSERT(processor_id != current_id());
auto state = get_interrupt_state();
set_interrupt_state(InterruptState::Disabled);
auto& processor = s_processors[processor_id.m_id];
// take free message slot
SMPMessage* storage = nullptr;
with_atomic_lock(processor.m_smp_free_lock,
[&]()
{
storage = processor.m_smp_free;
ASSERT(storage && storage->next);
processor.m_smp_free = storage->next;
}
);
// write message
*storage = message;
// push message to pending queue
with_atomic_lock(processor.m_smp_pending_lock,
[&]()
{
storage->next = processor.m_smp_pending;
processor.m_smp_pending = storage;
}
);
if (send_ipi)
InterruptController::get().send_ipi(processor_id);
set_interrupt_state(state);
}
void Processor::broadcast_smp_message(const SMPMessage& message)
{
if (!is_smp_enabled())
return;
auto state = get_interrupt_state();
set_interrupt_state(InterruptState::Disabled);
for (size_t i = 0; i < Processor::count(); i++)
{
auto processor_id = s_processor_ids[i];
if (processor_id != current_id())
send_smp_message(processor_id, message, false);
}
InterruptController::get().broadcast_ipi();
set_interrupt_state(state);
}
void Processor::yield()
{
auto state = get_interrupt_state();
set_interrupt_state(InterruptState::Disabled);
auto& processor_info = s_processors[current_id().as_u32()];
{
constexpr uint64_t load_update_interval_ns = 1'000'000'000;
const uint64_t current_ns = SystemTimer::get().ns_since_boot();
if (scheduler().is_idle())
processor_info.m_idle_ns += current_ns - processor_info.m_start_ns;
if (current_ns >= processor_info.m_next_update_ns)
{
if (s_should_print_cpu_load && g_terminal_driver)
{
const uint64_t duration_ns = current_ns - processor_info.m_last_update_ns;
const uint64_t load_x1000 = 100'000 * (duration_ns - processor_info.m_idle_ns) / duration_ns;
uint32_t x = g_terminal_driver->width() - 16;
uint32_t y = current_id().as_u32();
const auto proc_putc =
[&x, y](char ch)
{
if (x < g_terminal_driver->width() && y < g_terminal_driver->height())
g_terminal_driver->putchar_at(ch, x++, y, TerminalColor::BRIGHT_WHITE, TerminalColor::BLACK);
};
BAN::Formatter::print(proc_putc, "CPU { 2}: { 3}.{3}%", current_id(), load_x1000 / 1000, load_x1000 % 1000);
}
processor_info.m_idle_ns = 0;
processor_info.m_last_update_ns = current_ns;
processor_info.m_next_update_ns += load_update_interval_ns;
}
}
#if ARCH(x86_64)
asm volatile(
"movq %%rsp, %%rcx;"
"movq %[load_sp], %%rsp;"
"int %[yield];"
"movq %%rcx, %%rsp;"
// NOTE: This is offset by 2 pointers since interrupt without PL change
// does not push SP and SS. This allows accessing "whole" interrupt stack.
:: [load_sp]"r"(Processor::current_stack_top() - 2 * sizeof(uintptr_t)),
[yield]"i"(IRQ_VECTOR_BASE + IRQ_YIELD)
: "memory", "rcx"
);
#elif ARCH(i686)
asm volatile(
"movl %%esp, %%ecx;"
"movl %[load_sp], %%esp;"
"int %[yield];"
"movl %%ecx, %%esp;"
// NOTE: This is offset by 2 pointers since interrupt without PL change
// does not push SP and SS. This allows accessing "whole" interrupt stack.
:: [load_sp]"r"(Processor::current_stack_top() - 2 * sizeof(uintptr_t)),
[yield]"i"(IRQ_VECTOR_BASE + IRQ_YIELD)
: "memory", "ecx"
);
#else
#error
#endif
processor_info.m_start_ns = SystemTimer::get().ns_since_boot();
Processor::set_interrupt_state(state);
ASSERT(get_interrupt_state() == InterruptState::Disabled);
ASSERT(read_gs_ptr(offsetof(Processor, m_interrupt_registers)));
return *read_gs_sized<InterruptRegisters*>(offsetof(Processor, m_interrupt_registers));
}
}

View File

@ -1,715 +1,260 @@
#include <BAN/Optional.h>
#include <BAN/Sort.h>
#include <kernel/Arch.h>
#include <kernel/Attributes.h>
#include <kernel/GDT.h>
#include <kernel/InterruptController.h>
#include <kernel/Process.h>
#include <kernel/Scheduler.h>
#include <kernel/Thread.h>
#include <kernel/Timer/Timer.h>
#define DEBUG_SCHEDULER 0
#define SCHEDULER_ASSERT 1
#if SCHEDULER_ASSERT == 0
#undef ASSERT
#define ASSERT(...)
#endif
#define SCHEDULER_VERIFY_STACK 1
namespace Kernel
{
static constexpr uint64_t s_reschedule_interval_ns = 10'000'000;
static constexpr uint64_t s_load_balance_interval_ns = 1'000'000'000;
static BAN::Atomic<uint8_t> s_schedulers_initialized { 0 };
struct ProcessorInfo
{
uint64_t idle_time_ns { s_load_balance_interval_ns };
uint32_t max_load_threads { 0 };
};
static SpinLock s_processor_info_time_lock;
static BAN::Array<ProcessorInfo, 0xFF> s_processor_infos;
static BAN::Atomic<size_t> s_next_processor_index { 0 };
void SchedulerQueue::add_thread_to_back(Node* node)
{
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
node->next = nullptr;
node->prev = m_tail;
(m_tail ? m_tail->next : m_head) = node;
m_tail = node;
}
void SchedulerQueue::add_thread_with_wake_time(Node* node)
{
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
if (m_tail == nullptr || node->wake_time_ns >= m_tail->wake_time_ns)
return add_thread_to_back(node);
Node* next = m_head;
Node* prev = nullptr;
while (next && node->wake_time_ns > next->wake_time_ns)
{
prev = next;
next = next->next;
}
node->next = next;
node->prev = prev;
(next ? next->prev : m_tail) = node;
(prev ? prev->next : m_head) = node;
}
template<typename F>
SchedulerQueue::Node* SchedulerQueue::remove_with_condition(F callback)
{
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
for (Node* node = m_head; node; node = node->next)
{
if (!callback(node))
continue;
remove_node(node);
return node;
}
return nullptr;
}
void SchedulerQueue::remove_node(Node* node)
{
(node->prev ? node->prev->next : m_head) = node->next;
(node->next ? node->next->prev : m_tail) = node->prev;
node->prev = nullptr;
node->next = nullptr;
}
SchedulerQueue::Node* SchedulerQueue::front()
{
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
ASSERT(!empty());
return m_head;
}
SchedulerQueue::Node* SchedulerQueue::pop_front()
{
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
if (empty())
return nullptr;
Node* result = m_head;
m_head = m_head->next;
(m_head ? m_head->prev : m_tail) = nullptr;
result->next = nullptr;
return result;
}
BAN::ErrorOr<Scheduler*> Scheduler::create()
{
auto* scheduler = new Scheduler();
if (scheduler == nullptr)
return BAN::Error::from_errno(ENOMEM);
return scheduler;
}
static Scheduler* s_instance = nullptr;
BAN::ErrorOr<void> Scheduler::initialize()
{
m_idle_thread = TRY(Thread::create_kernel([](void*) { asm volatile("1: hlt; jmp 1b"); }, nullptr, nullptr));
ASSERT(m_idle_thread);
size_t processor_index = 0;
for (; processor_index < Processor::count(); processor_index++)
if (Processor::id_from_index(processor_index) == Processor::current_id())
break;
ASSERT(processor_index < Processor::count());
// each CPU does load balance at different times. This calulates the offset to other CPUs
m_last_load_balance_ns = s_load_balance_interval_ns * processor_index / Processor::count();
m_idle_ns = -m_last_load_balance_ns;
s_schedulers_initialized++;
while (s_schedulers_initialized < Processor::count())
__builtin_ia32_pause();
ASSERT(s_instance == nullptr);
s_instance = new Scheduler();
ASSERT(s_instance);
Processor::allocate_idle_thread();
return {};
}
void Scheduler::add_current_to_most_loaded(SchedulerQueue* target_queue)
Scheduler& Scheduler::get()
{
ASSERT(s_instance);
return *s_instance;
}
void Scheduler::start()
{
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
ASSERT(!m_active_threads.empty());
bool has_current = false;
for (auto& info : m_most_loaded_threads)
{
if (info.node == m_current)
{
info.queue = target_queue;
has_current = true;
break;
}
// broadcast ipi (yield) for each processor
InterruptController::get().broadcast_ipi();
yield();
ASSERT_NOT_REACHED();
}
if (!has_current)
Thread& Scheduler::current_thread()
{
size_t index = 0;
for (; index < m_most_loaded_threads.size() - 1; index++)
if (m_most_loaded_threads[index].node == nullptr)
break;
m_most_loaded_threads[index].queue = target_queue;
m_most_loaded_threads[index].node = m_current;
auto* current = Processor::get_current_thread();
return current ? *current->thread : *Processor::idle_thread();
}
BAN::sort::sort(m_most_loaded_threads.begin(), m_most_loaded_threads.end(),
[](const ThreadInfo& a, const ThreadInfo& b) -> bool
pid_t Scheduler::current_tid()
{
if (a.node == nullptr || b.node == nullptr)
return a.node;
return a.node->time_used_ns > b.node->time_used_ns;
}
);
if (s_instance == nullptr)
return 0;
return Scheduler::get().current_thread().tid();
}
void Scheduler::update_most_loaded_node_queue(SchedulerQueue::Node* node, SchedulerQueue* target_queue)
void Scheduler::setup_next_thread()
{
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
ASSERT(m_lock.current_processor_has_lock());
for (auto& info : m_most_loaded_threads)
if (auto* current = Processor::get_current_thread())
{
if (info.node == node)
auto* thread = current->thread;
if (thread->state() == Thread::State::Terminated)
{
info.queue = target_queue;
break;
PageTable::kernel().load();
delete thread;
delete current;
}
}
}
void Scheduler::remove_node_from_most_loaded(SchedulerQueue::Node* node)
{
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
size_t i = 0;
for (; i < m_most_loaded_threads.size(); i++)
if (m_most_loaded_threads[i].node == node)
break;
for (; i < m_most_loaded_threads.size() - 1; i++)
m_most_loaded_threads[i] = m_most_loaded_threads[i + 1];
m_most_loaded_threads.back().node = nullptr;
m_most_loaded_threads.back().queue = nullptr;
}
void Scheduler::reschedule(InterruptStack* interrupt_stack, InterruptRegisters* interrupt_registers)
{
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
// If there are no other threads in run queue, reschedule can be no-op :)
if (m_run_queue.empty() && !m_current_will_block && current_thread().state() == Thread::State::Executing)
return;
if (m_current == nullptr)
m_idle_ns += SystemTimer::get().ns_since_boot() - m_idle_start_ns;
else
{
switch (m_current->thread->state())
// thread->state() can be NotStarted when calling exec or cleaning up process
if (thread->state() != Thread::State::NotStarted)
{
case Thread::State::Terminated:
remove_node_from_most_loaded(m_current);
PageTable::kernel().load();
delete m_current->thread;
delete m_current;
m_thread_count--;
break;
case Thread::State::Executing:
thread->interrupt_stack() = Processor::get_interrupt_stack();
thread->interrupt_registers() = Processor::get_interrupt_registers();
}
if (current->should_block)
{
const uint64_t current_ns = SystemTimer::get().ns_since_boot();
m_current->thread->interrupt_stack() = *interrupt_stack;
m_current->thread->interrupt_registers() = *interrupt_registers;
m_current->time_used_ns += current_ns - m_current->last_start_ns;
add_current_to_most_loaded(m_current_will_block ? &m_block_queue : &m_run_queue);
if (!m_current_will_block)
m_run_queue.add_thread_to_back(m_current);
current->should_block = false;
m_blocking_threads.add_with_wake_time(current);
}
else
{
m_current_will_block = false;
m_block_queue.add_thread_with_wake_time(m_current);
m_active_threads.push_back(current);
}
break;
}
case Thread::State::NotStarted:
ASSERT(!m_current_will_block);
m_current->time_used_ns = 0;
remove_node_from_most_loaded(m_current);
m_run_queue.add_thread_to_back(m_current);
break;
}
}
while ((m_current = m_run_queue.pop_front()))
SchedulerQueue::Node* node = nullptr;
while (!m_active_threads.empty())
{
if (m_current->thread->state() != Thread::State::Terminated)
node = m_active_threads.pop_front();
if (node->thread->state() != Thread::State::Terminated)
break;
remove_node_from_most_loaded(m_current);
PageTable::kernel().load();
delete m_current->thread;
delete m_current;
m_thread_count--;
delete node->thread;
delete node;
node = nullptr;
}
if (m_current == nullptr)
{
Processor::set_current_thread(node);
auto* thread = node ? node->thread : Processor::idle_thread();
if (thread->has_process())
thread->process().page_table().load();
else
PageTable::kernel().load();
*interrupt_stack = m_idle_thread->interrupt_stack();
*interrupt_registers = m_idle_thread->interrupt_registers();
m_idle_thread->m_state = Thread::State::Executing;
m_idle_start_ns = SystemTimer::get().ns_since_boot();
return;
}
update_most_loaded_node_queue(m_current, nullptr);
auto* thread = m_current->thread;
auto& page_table = thread->has_process() ? thread->process().page_table() : PageTable::kernel();
page_table.load();
if (thread->state() == Thread::State::NotStarted)
thread->m_state = Thread::State::Executing;
Processor::gdt().set_tss_stack(thread->kernel_stack_top());
*interrupt_stack = thread->interrupt_stack();
*interrupt_registers = thread->interrupt_registers();
m_current->last_start_ns = SystemTimer::get().ns_since_boot();
Processor::get_interrupt_stack() = thread->interrupt_stack();
Processor::get_interrupt_registers() = thread->interrupt_registers();
}
void Scheduler::reschedule_if_idle()
void Scheduler::timer_reschedule()
{
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
if (!m_current && !m_run_queue.empty())
Processor::yield();
{
SpinLockGuard _(m_lock);
m_blocking_threads.remove_with_wake_time(m_active_threads, SystemTimer::get().ms_since_boot());
}
void Scheduler::preempt()
{
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
if (Processor::is_smp_enabled())
do_load_balancing();
{
const uint64_t current_ns = SystemTimer::get().ns_since_boot();
while (!m_block_queue.empty() && current_ns >= m_block_queue.front()->wake_time_ns)
{
auto* node = m_block_queue.pop_front();
update_most_loaded_node_queue(node, &m_run_queue);
m_run_queue.add_thread_to_back(node);
}
// Broadcast IPI to all other processors for them
// to perform reschedule
InterruptController::get().broadcast_ipi();
yield();
}
void Scheduler::yield()
{
const uint64_t current_ns = SystemTimer::get().ns_since_boot();
if (current_ns >= m_last_reschedule_ns + s_reschedule_interval_ns)
{
m_last_reschedule_ns = current_ns;
Processor::yield();
}
}
auto state = Processor::get_interrupt_state();
Processor::set_interrupt_state(InterruptState::Disabled);
ASSERT(!m_lock.current_processor_has_lock());
#if ARCH(x86_64)
asm volatile(
"movq %%rsp, %%rcx;"
"movq %[load_sp], %%rsp;"
"int %[yield];"
"movq %%rcx, %%rsp;"
// NOTE: This is offset by 2 pointers since interrupt without PL change
// does not push SP and SS. This allows accessing "whole" interrupt stack.
:: [load_sp]"r"(Processor::current_stack_top() - 2 * sizeof(uintptr_t)),
[yield]"i"(IRQ_VECTOR_BASE + IRQ_YIELD)
: "memory", "rcx"
);
#elif ARCH(i686)
asm volatile(
"movl %%esp, %%ecx;"
"movl %[load_sp], %%esp;"
"int %[yield];"
"movl %%ecx, %%esp;"
// NOTE: This is offset by 2 pointers since interrupt without PL change
// does not push SP and SS. This allows accessing "whole" interrupt stack.
:: [load_sp]"r"(Processor::current_stack_top() - 2 * sizeof(uintptr_t)),
[yield]"i"(IRQ_VECTOR_BASE + IRQ_YIELD)
: "memory", "ecx"
);
#else
#error
#endif
Processor::set_interrupt_state(state);
}
void Scheduler::timer_interrupt()
void Scheduler::irq_reschedule()
{
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
// FIXME: all processors should LAPIC for their preemption
if (Processor::is_smp_enabled())
{
ASSERT(Processor::current_is_bsb());
Processor::broadcast_smp_message({
.type = Processor::SMPMessage::Type::SchedulerPreemption,
.scheduler_preemption = 0 // dummy value
});
SpinLockGuard _(m_lock);
setup_next_thread();
}
preempt();
}
void Scheduler::handle_unblock_request(const UnblockRequest& request)
void Scheduler::reschedule_if_idling()
{
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
switch (request.type)
{
case UnblockRequest::Type::ThreadBlocker:
do_unblock(request.blocker);
break;
case UnblockRequest::Type::ThreadID:
do_unblock(request.tid);
break;
default:
ASSERT_NOT_REACHED();
}
}
void Scheduler::handle_new_thread_request(const NewThreadRequest& reqeuest)
{
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
if (reqeuest.blocked)
m_block_queue.add_thread_with_wake_time(reqeuest.node);
else
m_run_queue.add_thread_to_back(reqeuest.node);
}
bool Scheduler::do_unblock(ThreadBlocker* blocker)
{
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
// FIXME: This could _easily_ be O(1)
bool did_unblock = false;
if (m_current && m_current->blocker == blocker && m_current_will_block)
{
m_current_will_block = false;
did_unblock = true;
}
SchedulerQueue::Node* match;
while ((match = m_block_queue.remove_with_condition([blocker](const auto* node) { return node->blocker == blocker; })))
{
dprintln_if(DEBUG_SCHEDULER, "CPU {}: unblock blocker {} (tid {})", Processor::current_id(), blocker, match->thread->tid());
update_most_loaded_node_queue(match, &m_run_queue);
m_run_queue.add_thread_to_back(match);
did_unblock = true;
}
return did_unblock;
}
bool Scheduler::do_unblock(pid_t tid)
{
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
// FIXME: This could _easily_ be O(1)
if (m_current && m_current->thread->tid() == tid && m_current_will_block)
{
m_current_will_block = false;
return true;
}
auto* match = m_block_queue.remove_with_condition([tid](const auto* node) { return node->thread->tid() == tid; });
if (match == nullptr)
return false;
dprintln_if(DEBUG_SCHEDULER, "CPU {}: unblock tid {}", Processor::current_id(), tid);
update_most_loaded_node_queue(match, &m_run_queue);
m_run_queue.add_thread_to_back(match);
return true;
}
ProcessorID Scheduler::find_least_loaded_processor() const
{
ProcessorID least_loaded_id = Processor::current_id();
uint64_t most_idle_ns = m_idle_ns;
uint32_t least_max_load_threads = static_cast<uint32_t>(-1);
for (uint8_t i = 0; i < Processor::count(); i++)
{
auto processor_id = Processor::id_from_index(i);
if (processor_id == Processor::current_id())
continue;
const auto& info = s_processor_infos[i];
if (info.idle_time_ns < most_idle_ns || info.max_load_threads > least_max_load_threads)
continue;
least_loaded_id = processor_id;
most_idle_ns = info.idle_time_ns;
least_max_load_threads = info.max_load_threads;
}
return least_loaded_id;
}
void Scheduler::do_load_balancing()
{
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
const uint64_t current_ns = SystemTimer::get().ns_since_boot();
if (current_ns < m_last_load_balance_ns + s_load_balance_interval_ns)
SpinLockGuard _(m_lock);
if (Processor::get_current_thread())
return;
if (m_current == nullptr)
{
m_idle_ns += current_ns - m_idle_start_ns;
m_idle_start_ns = current_ns;
}
else
{
m_current->time_used_ns += current_ns - m_current->last_start_ns;
m_current->last_start_ns = current_ns;
add_current_to_most_loaded(nullptr);
}
if constexpr(DEBUG_SCHEDULER)
{
const uint64_t duration_ns = current_ns - m_last_load_balance_ns;
const uint64_t processing_ns = duration_ns - m_idle_ns;
{
const uint64_t load_percent_x1000 = BAN::Math::div_round_up<uint64_t>(processing_ns * 100'000, duration_ns);
dprintln("CPU {}: { 2}.{3}% ({} threads)", Processor::current_id(), load_percent_x1000 / 1000, load_percent_x1000 % 1000, m_thread_count);
}
if (m_current)
{
const char* name = "unknown";
if (m_current->thread->has_process() && m_current->thread->process().is_userspace() && m_current->thread->process().userspace_info().argv)
name = m_current->thread->process().userspace_info().argv[0];
const uint64_t load_percent_x1000 = BAN::Math::div_round_up<uint64_t>(m_current->time_used_ns * 100'000, processing_ns);
dprintln(" tid { 2}: { 3}.{3}% <{}> current", m_current->thread->tid(), load_percent_x1000 / 1000, load_percent_x1000 % 1000, name);
}
m_run_queue.remove_with_condition(
[&](SchedulerQueue::Node* node)
{
const uint64_t load_percent_x1000 = BAN::Math::div_round_up<uint64_t>(node->time_used_ns * 100'000, processing_ns);
dprintln(" tid { 2}: { 3}.{3}% active", node->thread->tid(), load_percent_x1000 / 1000, load_percent_x1000 % 1000);
return false;
}
);
m_block_queue.remove_with_condition(
[&](SchedulerQueue::Node* node)
{
const uint64_t load_percent_x1000 = BAN::Math::div_round_up<uint64_t>(node->time_used_ns * 100'000, processing_ns);
dprintln(" tid { 2}: { 3}.{3}% blocked", node->thread->tid(), load_percent_x1000 / 1000, load_percent_x1000 % 1000);
return false;
}
);
}
if (!s_processor_info_time_lock.try_lock_interrupts_disabled())
{
dprintln_if(DEBUG_SCHEDULER, "Load balancing cannot keep up");
if (m_active_threads.empty())
return;
}
if (m_idle_ns == 0 && m_should_calculate_max_load_threads)
{
const auto& most_loaded_thread = m_most_loaded_threads.front();
if (most_loaded_thread.node == nullptr || most_loaded_thread.node->time_used_ns == 0)
s_processor_infos[Processor::current_id().as_u32()].max_load_threads = 0;
else
{
const uint64_t duration_ns = current_ns - m_last_load_balance_ns;
const uint64_t max_thread_load_x1000 = 1000 * m_most_loaded_threads.front().node->time_used_ns / duration_ns;
const uint64_t max_load_thread_count = ((2000 / max_thread_load_x1000) + 1) / 2;
s_processor_infos[Processor::current_id().as_u32()].max_load_threads = max_load_thread_count;
}
}
constexpr auto absolute_difference_u64 = [](uint64_t a, uint64_t b) { return (a < b) ? (b - a) : (a - b); };
for (size_t i = 1; i < m_most_loaded_threads.size(); i++)
{
auto& thread_info = m_most_loaded_threads[i];
if (thread_info.node == nullptr)
break;
if (thread_info.node == m_current || thread_info.queue == nullptr)
continue;
auto least_loaded_id = find_least_loaded_processor();
if (least_loaded_id == Processor::current_id())
break;
auto& most_idle_info = s_processor_infos[least_loaded_id.as_u32()];
auto& my_info = s_processor_infos[Processor::current_id().as_u32()];
if (m_idle_ns == 0)
{
if (my_info.max_load_threads == 0)
break;
if (most_idle_info.idle_time_ns == 0)
{
if (most_idle_info.max_load_threads + 1 > my_info.max_load_threads - 1)
break;
my_info.max_load_threads -= 1;
most_idle_info.max_load_threads += 1;
dprintln_if(DEBUG_SCHEDULER, "CPU {}: sending tid {} to CPU {} (max load)", Processor::current_id(), thread_info.node->thread->tid(), least_loaded_id);
}
else
{
my_info.max_load_threads -= 1;
most_idle_info.idle_time_ns = 0;
most_idle_info.max_load_threads = 1;
dprintln_if(DEBUG_SCHEDULER, "CPU {}: sending tid {} to CPU {}", Processor::current_id(), thread_info.node->thread->tid(), least_loaded_id);
}
}
else
{
const uint64_t my_current_proc_ns = s_load_balance_interval_ns - BAN::Math::min(s_load_balance_interval_ns, m_idle_ns);
const uint64_t other_current_proc_ns = s_load_balance_interval_ns - BAN::Math::min(s_load_balance_interval_ns, most_idle_info.idle_time_ns);
const uint64_t current_proc_diff_ns = absolute_difference_u64(my_current_proc_ns, other_current_proc_ns);
const uint64_t my_new_proc_ns = my_current_proc_ns - BAN::Math::min(thread_info.node->time_used_ns, my_current_proc_ns);
const uint64_t other_new_proc_ns = other_current_proc_ns + thread_info.node->time_used_ns;
const uint64_t new_proc_diff_ns = absolute_difference_u64(my_new_proc_ns, other_new_proc_ns);
// require 10% decrease between CPU loads to do send thread to other CPU
if (new_proc_diff_ns >= current_proc_diff_ns || (100 * (current_proc_diff_ns - new_proc_diff_ns) / current_proc_diff_ns) < 10)
continue;
most_idle_info.idle_time_ns -= BAN::Math::min(thread_info.node->time_used_ns, most_idle_info.idle_time_ns);
m_idle_ns += thread_info.node->time_used_ns;
dprintln_if(DEBUG_SCHEDULER, "CPU {}: sending tid {} to CPU {}", Processor::current_id(), thread_info.node->thread->tid(), least_loaded_id);
}
thread_info.node->time_used_ns = 0;
{
auto& my_queue = (thread_info.queue == &m_run_queue) ? m_run_queue : m_block_queue;
my_queue.remove_node(thread_info.node);
m_thread_count--;
}
Processor::send_smp_message(least_loaded_id, {
.type = Processor::SMPMessage::Type::NewThread,
.new_thread = {
.node = thread_info.node,
.blocked = thread_info.queue == &m_block_queue
}
});
thread_info.node = nullptr;
thread_info.queue = nullptr;
if (m_idle_ns == 0)
break;
}
s_processor_infos[Processor::current_id().as_u32()].idle_time_ns = m_idle_ns;
s_processor_info_time_lock.unlock(InterruptState::Disabled);
if (m_current)
m_current->time_used_ns = 0;
for (auto& thread_info : m_most_loaded_threads)
thread_info = {};
m_run_queue .remove_with_condition([&](SchedulerQueue::Node* node) { node->time_used_ns = 0; return false; });
m_block_queue.remove_with_condition([&](SchedulerQueue::Node* node) { node->time_used_ns = 0; return false; });
m_idle_ns = 0;
m_should_calculate_max_load_threads = true;
m_last_load_balance_ns += s_load_balance_interval_ns;
yield();
}
BAN::ErrorOr<void> Scheduler::add_thread(Thread* thread)
{
auto* new_node = new SchedulerQueue::Node(thread);
if (new_node == nullptr)
auto* node = new SchedulerQueue::Node(thread);
if (node == nullptr)
return BAN::Error::from_errno(ENOMEM);
const size_t processor_index = s_next_processor_index++ % Processor::count();
const auto processor_id = Processor::id_from_index(processor_index);
if (processor_id == Processor::current_id())
{
auto state = Processor::get_interrupt_state();
Processor::set_interrupt_state(InterruptState::Disabled);
m_run_queue.add_thread_to_back(new_node);
m_thread_count++;
Processor::set_interrupt_state(state);
}
else
{
Processor::send_smp_message(processor_id, {
.type = Processor::SMPMessage::Type::NewThread,
.new_thread = {
.node = new_node,
.blocked = false
}
});
}
SpinLockGuard _(m_lock);
m_active_threads.push_back(node);
return {};
}
void Scheduler::block_current_thread(ThreadBlocker* blocker, uint64_t wake_time_ns)
void Scheduler::terminate_thread(Thread* thread)
{
auto state = Processor::get_interrupt_state();
Processor::set_interrupt_state(InterruptState::Disabled);
auto state = m_lock.lock();
m_current->blocker = blocker;
m_current->wake_time_ns = wake_time_ns;
m_current_will_block = true;
Processor::yield();
ASSERT(thread->state() == Thread::State::Executing);
thread->m_state = Thread::State::Terminated;
thread->interrupt_stack().sp = Processor::current_stack_top();
m_lock.unlock(InterruptState::Disabled);
// actual deletion will be done while rescheduling
if (&current_thread() == thread)
{
yield();
ASSERT_NOT_REACHED();
}
Processor::set_interrupt_state(state);
}
void Scheduler::unblock_threads(ThreadBlocker* blocker)
void Scheduler::set_current_thread_sleeping_impl(Semaphore* semaphore, uint64_t wake_time)
{
auto state = Processor::get_interrupt_state();
Processor::set_interrupt_state(InterruptState::Disabled);
auto state = m_lock.lock();
do_unblock(blocker);
auto* current = Processor::get_current_thread();
current->semaphore = semaphore;
current->wake_time = wake_time;
current->should_block = true;
Processor::broadcast_smp_message({
.type = Processor::SMPMessage::Type::UnblockThread,
.unblock_thread = {
.type = UnblockRequest::Type::ThreadBlocker,
.blocker = blocker
}
});
m_lock.unlock(InterruptState::Disabled);
yield();
Processor::set_interrupt_state(state);
}
void Scheduler::set_current_thread_sleeping(uint64_t wake_time)
{
set_current_thread_sleeping_impl(nullptr, wake_time);
}
void Scheduler::block_current_thread(Semaphore* semaphore, uint64_t wake_time)
{
set_current_thread_sleeping_impl(semaphore, wake_time);
}
void Scheduler::unblock_threads(Semaphore* semaphore)
{
SpinLockGuard _(m_lock);
m_blocking_threads.remove_with_condition(m_active_threads, [&](auto* node) { return node->semaphore == semaphore; });
}
void Scheduler::unblock_thread(pid_t tid)
{
auto state = Processor::get_interrupt_state();
Processor::set_interrupt_state(InterruptState::Disabled);
if (!do_unblock(tid))
{
Processor::broadcast_smp_message({
.type = Processor::SMPMessage::Type::UnblockThread,
.unblock_thread = {
.type = UnblockRequest::Type::ThreadID,
.tid = tid
}
});
}
Processor::set_interrupt_state(state);
}
Thread& Scheduler::current_thread()
{
if (m_current)
return *m_current->thread;
return *m_idle_thread;
}
Thread& Scheduler::idle_thread()
{
return *m_idle_thread;
}
pid_t Scheduler::current_tid() const
{
return m_current ? m_current->thread->tid() : 0;
}
bool Scheduler::is_idle() const
{
return m_current == nullptr;
SpinLockGuard _(m_lock);
m_blocking_threads.remove_with_condition(m_active_threads, [&](auto* node) { return node->thread->tid() == tid; });
}
}

View File

@ -0,0 +1,28 @@
#include <kernel/Scheduler.h>
#include <kernel/Semaphore.h>
#include <kernel/Timer/Timer.h>
namespace Kernel
{
void Semaphore::block_indefinite()
{
Scheduler::get().block_current_thread(this, ~(uint64_t)0);
}
void Semaphore::block_with_timeout(uint64_t timeout_ms)
{
Scheduler::get().block_current_thread(this, SystemTimer::get().ms_since_boot() + timeout_ms);
}
void Semaphore::block_with_wake_time(uint64_t wake_time)
{
Scheduler::get().block_current_thread(this, wake_time);
}
void Semaphore::unblock()
{
Scheduler::get().unblock_threads(this);
}
}

View File

@ -8,7 +8,7 @@
namespace Kernel
{
static constexpr uint64_t s_ata_timeout_ms = 1000;
static constexpr uint64_t s_ata_timeout = 1000;
static void start_cmd(volatile HBAPortMemorySpace* port)
{
@ -118,9 +118,9 @@ namespace Kernel
command.c = 1;
command.command = ATA_COMMAND_IDENTIFY;
const uint64_t timeout_ms = SystemTimer::get().ms_since_boot() + s_ata_timeout_ms;
uint64_t timeout = SystemTimer::get().ms_since_boot() + s_ata_timeout;
while (m_port->tfd & (ATA_STATUS_BSY | ATA_STATUS_DRQ))
if (SystemTimer::get().ms_since_boot() >= timeout_ms)
if (SystemTimer::get().ms_since_boot() >= timeout)
return BAN::Error::from_errno(ETIMEDOUT);
m_port->ci = 1 << slot.value();
@ -158,17 +158,17 @@ namespace Kernel
{
static constexpr uint64_t poll_timeout_ms = 10;
const auto start_time_ms = SystemTimer::get().ms_since_boot();
auto start_time = SystemTimer::get().ms_since_boot();
while (SystemTimer::get().ms_since_boot() < start_time_ms + poll_timeout_ms)
while (SystemTimer::get().ms_since_boot() < start_time + poll_timeout_ms)
if (!(m_port->ci & (1 << command_slot)))
return {};
// FIXME: This should actually block once ThreadBlocker support blocking with timeout.
// FIXME: This should actually block once semaphores support blocking with timeout.
// This doesn't allow scheduler to go properly idle.
while (SystemTimer::get().ms_since_boot() < start_time_ms + s_ata_timeout_ms)
while (SystemTimer::get().ms_since_boot() < start_time + s_ata_timeout)
{
Processor::yield();
Scheduler::get().yield();
if (!(m_port->ci & (1 << command_slot)))
return {};
}

View File

@ -67,7 +67,9 @@ namespace Kernel
static void select_delay()
{
SystemTimer::get().sleep_ns(400);
auto time = SystemTimer::get().ns_since_boot();
while (SystemTimer::get().ns_since_boot() < time + 400)
continue;
}
void ATABus::select_device(bool secondary)
@ -104,7 +106,7 @@ namespace Kernel
io_write(ATA_PORT_CONTROL, ATA_CONTROL_nIEN);
io_write(ATA_PORT_COMMAND, ATA_COMMAND_IDENTIFY);
SystemTimer::get().sleep_ms(1);
SystemTimer::get().sleep(1);
// No device on port
if (io_read(ATA_PORT_STATUS) == 0)
@ -128,7 +130,7 @@ namespace Kernel
}
io_write(ATA_PORT_COMMAND, ATA_COMMAND_IDENTIFY_PACKET);
SystemTimer::get().sleep_ms(1);
SystemTimer::get().sleep(1);
if (auto res = wait(true); res.is_error())
{
@ -147,17 +149,13 @@ namespace Kernel
ASSERT(!m_has_got_irq);
if (io_read(ATA_PORT_STATUS) & ATA_STATUS_ERR)
dprintln("ATA Error: {}", error());
m_has_got_irq.store(true);
m_has_got_irq = true;
}
void ATABus::block_until_irq()
{
bool expected { true };
while (!m_has_got_irq.compare_exchange(expected, false))
{
Processor::pause();
expected = true;
}
while (!__sync_bool_compare_and_swap(&m_has_got_irq, true, false))
__builtin_ia32_pause();
}
uint8_t ATABus::io_read(uint16_t port)

View File

@ -1,6 +1,6 @@
#include <kernel/Lock/LockGuard.h>
#include <kernel/Scheduler.h>
#include <kernel/Storage/NVMe/Queue.h>
#include <kernel/Thread.h>
#include <kernel/Timer/Timer.h>
namespace Kernel
@ -44,7 +44,7 @@ namespace Kernel
m_doorbell.cq_head = m_cq_head;
m_thread_blocker.unblock();
m_semaphore.unblock();
}
uint16_t NVMeQueue::submit_command(NVMe::SubmissionQueueEntry& sqe)
@ -66,15 +66,15 @@ namespace Kernel
m_doorbell.sq_tail = m_sq_tail;
}
const uint64_t start_time_ms = SystemTimer::get().ms_since_boot();
while (!(m_done_mask & cid_mask) && SystemTimer::get().ms_since_boot() < start_time_ms + s_nvme_command_poll_timeout_ms)
const uint64_t start_time = SystemTimer::get().ms_since_boot();
while (!(m_done_mask & cid_mask) && SystemTimer::get().ms_since_boot() < start_time + s_nvme_command_poll_timeout_ms)
continue;
// FIXME: Here is a possible race condition if done mask is set before
// scheduler has put the current thread blocking.
// EINTR should also be handled here.
while (!(m_done_mask & cid_mask) && SystemTimer::get().ms_since_boot() < start_time_ms + s_nvme_command_timeout_ms)
m_thread_blocker.block_with_wake_time_ms(start_time_ms + s_nvme_command_timeout_ms);
while (!(m_done_mask & cid_mask) && SystemTimer::get().ms_since_boot() < start_time + s_nvme_command_timeout_ms)
Scheduler::get().block_current_thread(&m_semaphore, start_time + s_nvme_command_timeout_ms);
if (m_done_mask & cid_mask)
{
@ -93,7 +93,7 @@ namespace Kernel
while (~m_used_mask == 0)
{
m_lock.unlock(state);
m_thread_blocker.block_with_timeout_ms(s_nvme_command_timeout_ms);
m_semaphore.block_with_timeout(s_nvme_command_timeout_ms);
state = m_lock.lock();
}

View File

@ -57,7 +57,7 @@ namespace Kernel
if ((flags & TTY_FLAG_ENABLE_INPUT) && !m_tty_ctrl.receive_input)
{
m_tty_ctrl.receive_input = true;
m_tty_ctrl.thread_blocker.unblock();
m_tty_ctrl.semaphore.unblock();
}
if (flags & TTY_FLAG_ENABLE_OUTPUT)
m_tty_ctrl.draw_graphics = true;
@ -94,7 +94,7 @@ namespace Kernel
while (true)
{
while (!TTY::current()->m_tty_ctrl.receive_input)
TTY::current()->m_tty_ctrl.thread_blocker.block_indefinite();
TTY::current()->m_tty_ctrl.semaphore.block_indefinite();
LibInput::RawKeyEvent event;
size_t read = MUST(inode->read(0, BAN::ByteSpan::from(event)));
@ -237,7 +237,7 @@ namespace Kernel
if (ch == '\x04' && m_termios.canonical)
{
m_output.flush = true;
m_output.thread_blocker.unblock();
m_output.semaphore.unblock();
return;
}
@ -279,7 +279,7 @@ namespace Kernel
if (ch == '\n' || !m_termios.canonical)
{
m_output.flush = true;
m_output.thread_blocker.unblock();
m_output.semaphore.unblock();
}
}
@ -338,7 +338,7 @@ namespace Kernel
while (!m_output.flush)
{
LockFreeGuard _(m_mutex);
TRY(Thread::current().block_or_eintr_indefinite(m_output.thread_blocker));
TRY(Thread::current().block_or_eintr_indefinite(m_output.semaphore));
}
if (m_output.bytes == 0)
@ -356,7 +356,7 @@ namespace Kernel
if (m_output.bytes == 0)
m_output.flush = false;
m_output.thread_blocker.unblock();
m_output.semaphore.unblock();
return to_copy;
}

View File

@ -120,12 +120,7 @@ namespace Kernel
Thread& Thread::current()
{
return Processor::scheduler().current_thread();
}
pid_t Thread::current_tid()
{
return Processor::scheduler().current_tid();
return Scheduler::get().current_thread();
}
Process& Thread::process()
@ -401,36 +396,36 @@ namespace Kernel
{
m_signal_pending_mask |= mask;
if (this != &Thread::current())
Processor::scheduler().unblock_thread(tid());
Scheduler::get().unblock_thread(tid());
return true;
}
return false;
}
BAN::ErrorOr<void> Thread::block_or_eintr_indefinite(ThreadBlocker& thread_blocker)
BAN::ErrorOr<void> Thread::block_or_eintr_indefinite(Semaphore& semaphore)
{
if (is_interrupted_by_signal())
return BAN::Error::from_errno(EINTR);
thread_blocker.block_indefinite();
semaphore.block_indefinite();
if (is_interrupted_by_signal())
return BAN::Error::from_errno(EINTR);
return {};
}
BAN::ErrorOr<void> Thread::block_or_eintr_or_timeout_ns(ThreadBlocker& thread_blocker, uint64_t timeout_ns, bool etimedout)
BAN::ErrorOr<void> Thread::block_or_eintr_or_timeout(Semaphore& semaphore, uint64_t timeout_ms, bool etimedout)
{
const uint64_t wake_time_ns = SystemTimer::get().ns_since_boot() + timeout_ns;
return block_or_eintr_or_waketime_ns(thread_blocker, wake_time_ns, etimedout);
uint64_t wake_time_ms = SystemTimer::get().ms_since_boot() + timeout_ms;
return block_or_eintr_or_waketime(semaphore, wake_time_ms, etimedout);
}
BAN::ErrorOr<void> Thread::block_or_eintr_or_waketime_ns(ThreadBlocker& thread_blocker, uint64_t wake_time_ns, bool etimedout)
BAN::ErrorOr<void> Thread::block_or_eintr_or_waketime(Semaphore& semaphore, uint64_t wake_time_ms, bool etimedout)
{
if (is_interrupted_by_signal())
return BAN::Error::from_errno(EINTR);
thread_blocker.block_with_timeout_ns(wake_time_ns);
semaphore.block_with_wake_time(wake_time_ms);
if (is_interrupted_by_signal())
return BAN::Error::from_errno(EINTR);
if (etimedout && SystemTimer::get().ms_since_boot() >= wake_time_ns)
if (etimedout && SystemTimer::get().ms_since_boot() >= wake_time_ms)
return BAN::Error::from_errno(ETIMEDOUT);
return {};
}
@ -449,12 +444,15 @@ namespace Kernel
{
Processor::set_interrupt_state(InterruptState::Disabled);
setup_process_cleanup();
Processor::yield();
ASSERT_NOT_REACHED();
Scheduler::get().yield();
}
else
Scheduler::get().terminate_thread(this);
}
else
{
Scheduler::get().terminate_thread(this);
}
m_state = State::Terminated;
Processor::yield();
ASSERT_NOT_REACHED();
}

View File

@ -1,28 +0,0 @@
#include <kernel/Processor.h>
#include <kernel/ThreadBlocker.h>
#include <kernel/Timer/Timer.h>
namespace Kernel
{
void ThreadBlocker::block_indefinite()
{
Processor::scheduler().block_current_thread(this, ~static_cast<uint64_t>(0));
}
void ThreadBlocker::block_with_timeout_ns(uint64_t timeout_ns)
{
Processor::scheduler().block_current_thread(this, SystemTimer::get().ns_since_boot() + timeout_ns);
}
void ThreadBlocker::block_with_wake_time_ns(uint64_t wake_time_ns)
{
Processor::scheduler().block_current_thread(this, wake_time_ns);
}
void ThreadBlocker::unblock()
{
Processor::scheduler().unblock_threads(this);
}
}

View File

@ -4,7 +4,7 @@
#include <kernel/InterruptController.h>
#include <kernel/Memory/PageTable.h>
#include <kernel/MMIO.h>
#include <kernel/Processor.h>
#include <kernel/Scheduler.h>
#include <kernel/Timer/HPET.h>
#define HPET_PERIOD_MAX 0x05F5E100
@ -272,7 +272,7 @@ namespace Kernel
m_last_ticks = current_ticks;
}
Processor::scheduler().timer_interrupt();
Scheduler::get().timer_reschedule();
}
uint64_t HPET::ms_since_boot() const

View File

@ -1,7 +1,7 @@
#include <kernel/IDT.h>
#include <kernel/InterruptController.h>
#include <kernel/IO.h>
#include <kernel/Processor.h>
#include <kernel/Scheduler.h>
#include <kernel/Timer/PIT.h>
#define PIT_IRQ 0
@ -54,32 +54,23 @@ namespace Kernel
void PIT::handle_irq()
{
{
SpinLockGuard _(m_lock);
m_system_time++;
}
Processor::scheduler().timer_interrupt();
}
uint64_t PIT::read_counter() const
{
SpinLockGuard _(m_lock);
return m_system_time;
m_system_time = m_system_time + 1;
Kernel::Scheduler::get().timer_reschedule();
}
uint64_t PIT::ms_since_boot() const
{
return read_counter() * (MS_PER_S / TICKS_PER_SECOND);
return m_system_time * (MS_PER_S / TICKS_PER_SECOND);
}
uint64_t PIT::ns_since_boot() const
{
return read_counter() * (NS_PER_S / TICKS_PER_SECOND);
return m_system_time * (NS_PER_S / TICKS_PER_SECOND);
}
timespec PIT::time_since_boot() const
{
uint64_t ticks = read_counter();
uint64_t ticks = m_system_time;
return timespec {
.tv_sec = ticks / TICKS_PER_SECOND,
.tv_nsec = (long)((ticks % TICKS_PER_SECOND) * (NS_PER_S / TICKS_PER_SECOND))

View File

@ -69,17 +69,15 @@ namespace Kernel
return m_timer->time_since_boot();
}
void SystemTimer::sleep_ns(uint64_t ns) const
void SystemTimer::sleep(uint64_t ms) const
{
if (ns == 0)
if (ms == 0)
return;
const uint64_t wake_time_ns = ns_since_boot() + ns;
Processor::scheduler().block_current_thread(nullptr, wake_time_ns);
//const uint64_t current_time_ns = ns_since_boot();
//if (current_time_ns < wake_time_ns)
// dwarnln("sleep woke {} ms too soon", BAN::Math::div_round_up<uint64_t>(wake_time_ns - current_time_ns, 1'000'000));
uint64_t wake_time = ms_since_boot() + ms;
Scheduler::get().set_current_thread_sleeping(wake_time);
uint64_t current_time = ms_since_boot();
if (current_time < wake_time)
dwarnln("sleep woke {} ms too soon", wake_time - current_time);
}
timespec SystemTimer::real_time() const

View File

@ -250,7 +250,7 @@ namespace Kernel
bool expected { true };
while (!m_port_changed.compare_exchange(expected, false))
{
m_port_thread_blocker.block_with_timeout_ms(100);
m_port_semaphore.block_with_timeout(100);
expected = true;
}
}
@ -482,7 +482,7 @@ namespace Kernel
break;
}
m_port_changed = true;
m_port_thread_blocker.unblock();
m_port_semaphore.unblock();
break;
}
case XHCI::TRBType::BandwidthRequestEvent:

View File

@ -35,7 +35,6 @@ struct ParsedCommandLine
{
bool force_pic = false;
bool disable_serial = false;
bool disable_smp = false;
BAN::StringView console = "tty0"_sv;
BAN::StringView root;
};
@ -72,8 +71,6 @@ static void parse_command_line()
cmdline.force_pic = true;
else if (argument == "noserial")
cmdline.disable_serial = true;
else if (argument == "nosmp")
cmdline.disable_smp = true;
else if (argument.size() > 5 && argument.substring(0, 5) == "root=")
cmdline.root = argument.substring(5);
else if (argument.size() > 8 && argument.substring(0, 8) == "console=")
@ -108,7 +105,7 @@ extern "C" void kernel_main(uint32_t boot_magic, uint32_t boot_info)
parse_boot_info(boot_magic, boot_info);
dprintln("boot info parsed");
Processor::create(PROCESSOR_NONE);
Processor::create(0);
Processor::initialize();
dprintln("BSP initialized");
@ -143,7 +140,6 @@ extern "C" void kernel_main(uint32_t boot_magic, uint32_t boot_info)
if (g_terminal_driver)
dprintln("Framebuffer terminal initialized");
if (!cmdline.disable_smp)
InterruptController::get().initialize_multiprocessor();
ProcFileSystem::initialize();
@ -167,11 +163,12 @@ extern "C" void kernel_main(uint32_t boot_magic, uint32_t boot_info)
Random::initialize();
dprintln("RNG initialized");
Processor::wait_until_processors_ready();
MUST(Processor::scheduler().initialize());
MUST(Scheduler::initialize());
dprintln("Scheduler initialized");
Scheduler& scheduler = Scheduler::get();
Process::create_kernel(init2, nullptr);
Processor::yield();
scheduler.start();
ASSERT_NOT_REACHED();
}
@ -232,11 +229,14 @@ extern "C" void ap_main()
Processor::initialize();
PageTable::kernel().initial_load();
Processor::allocate_idle_thread();
InterruptController::get().enable();
Processor::wait_until_processors_ready();
MUST(Processor::scheduler().initialize());
dprintln("ap{} initialized", Processor::current_id());
asm volatile("sti; 1: hlt; jmp 1b");
// wait until scheduler is started and we get irq for reschedule
Processor::set_interrupt_state(InterruptState::Enabled);
while (true)
asm volatile("hlt");
ASSERT_NOT_REACHED();
}

View File

@ -40,8 +40,8 @@ int tolower_l(int, locale_t);
int toupper(int);
int toupper_l(int, locale_t);
#define _toupper(val) toupper(val)
#define _tolower(val) tolower(val)
#define _toupper(val) ::toupper(val)
#define _tolower(val) ::tolower(val)
__END_DECLS

View File

@ -15,7 +15,7 @@ add_executable($PROGRAM_NAME \${SOURCES})
banan_link_library($PROGRAM_NAME ban)
banan_link_library($PROGRAM_NAME libc)
install(TARGETS $PROGRAM_NAME OPTIONAL)
install(TARGETS $PROGRAM_NAME)
EOF
cat > $PROGRAM_NAME/main.cpp << EOF

View File

@ -6,4 +6,4 @@ add_executable(loadfont ${SOURCES})
banan_link_library(loadfont ban)
banan_link_library(loadfont libc)
install(TARGETS loadfont OPTIONAL)
install(TARGETS loadfont)