Compare commits

...

12 Commits

Author SHA1 Message Date
Bananymous 23a2f8b903 Kernel: Cleanup multiprocessor startup by removing magic numbers 2024-03-07 17:01:17 +02:00
Bananymous 29fd682672 Kernel: Store current processor pointer in IA32_GS_BASE
This allows easier access to processors fields
2024-03-07 16:05:29 +02:00
Bananymous efed67cbd0 BAN: Remove unnecessary default constructor from Array 2024-03-06 16:01:52 +02:00
Bananymous 6234a5bc0b Kernel: Move multiprocessor initialize after framebuffer is created
This allows getting output before multiprocessor is started.
2024-03-06 16:00:19 +02:00
Bananymous 54f64e7618 Kernel: Move current page table to Processor
APs can now map kernel page table and print current time!
2024-03-06 02:19:59 +02:00
Bananymous f0105cb7fb Kernel: Move Interruptable from InterruptController.h to its own file 2024-03-06 00:47:02 +02:00
Bananymous 76b0f80169 Kernel: Move IDT to Processor 2024-03-06 00:45:54 +02:00
Bananymous f84df175ce Kernel: Save BSB id 2024-03-06 00:36:09 +02:00
Bananymous 58aca68726 Kernel: Move GDT to Processor 2024-03-06 00:35:45 +02:00
Bananymous 8670364f44 BAN: Remove unnecessary include from Array.h 2024-03-06 00:01:06 +02:00
Bananymous 418bc54f2b Kernel: Move SpinLock definition to header and fix Scheduler locking
This patch allows inlining of spinlocks :)
2024-03-04 22:36:41 +02:00
Bananymous 9c36d7c338 BAN/Kernel: Rework assertion/panic system
BAN/Assert.h does not need any includes meaning it can be included
anywhere without problems.
2024-03-04 11:41:54 +02:00
49 changed files with 647 additions and 471 deletions

22
BAN/BAN/Assert.cpp Normal file
View File

@ -0,0 +1,22 @@
#include <BAN/Assert.h>
#if __is_kernel
#include <kernel/Panic.h>
[[noreturn]] void __ban_assertion_failed(const char* location, const char* msg)
{
Kernel::panic_impl(location, msg);
}
#else
#include <BAN/Debug.h>
[[noreturn]] void __ban_assertion_failed(const char* location, const char* msg)
{
derrorln("{}: {}", location, msg);
__builtin_trap();
}
#endif

View File

@ -3,6 +3,7 @@ cmake_minimum_required(VERSION 3.26)
project(BAN CXX) project(BAN CXX)
set(BAN_SOURCES set(BAN_SOURCES
BAN/Assert.cpp
BAN/New.cpp BAN/New.cpp
BAN/String.cpp BAN/String.cpp
BAN/StringView.cpp BAN/StringView.cpp

View File

@ -1,6 +1,5 @@
#pragma once #pragma once
#include <BAN/Errors.h>
#include <BAN/Iterators.h> #include <BAN/Iterators.h>
#include <BAN/Span.h> #include <BAN/Span.h>
@ -19,7 +18,7 @@ namespace BAN
using const_iterator = ConstIteratorSimple<T, Array>; using const_iterator = ConstIteratorSimple<T, Array>;
public: public:
Array(); Array() = default;
Array(const T&); Array(const T&);
iterator begin() { return iterator(m_data); } iterator begin() { return iterator(m_data); }
@ -44,16 +43,9 @@ namespace BAN
T* data() { return m_data; } T* data() { return m_data; }
private: private:
T m_data[S]; T m_data[S] {};
}; };
template<typename T, size_t S>
Array<T, S>::Array()
{
for (size_type i = 0; i < S; i++)
m_data[i] = T();
}
template<typename T, size_t S> template<typename T, size_t S>
Array<T, S>::Array(const T& value) Array<T, S>::Array(const T& value)
{ {

View File

@ -1,33 +1,13 @@
#pragma once #pragma once
#include <BAN/Traits.h> #define __ban_assert_stringify_helper(s) #s
#define __ban_assert_stringify(s) __ban_assert_stringify_helper(s)
#if defined(__is_kernel)
#include <kernel/Panic.h>
#define ASSERT(cond) \ #define ASSERT(cond) \
do { \ (__builtin_expect(!(cond), 0) \
if (!(cond)) \ ? __ban_assertion_failed(__FILE__ ":" __ban_assert_stringify(__LINE__), "ASSERT(" #cond ") failed") \
Kernel::panic("ASSERT(" #cond ") failed"); \ : (void)0)
} while (false)
#define __ASSERT_BIN_OP(lhs, rhs, name, op) \ #define ASSERT_NOT_REACHED() ASSERT(false)
do { \
auto&& _lhs = (lhs); \
auto&& _rhs = (rhs); \
if (!(_lhs op _rhs)) \
Kernel::panic(name "(" #lhs ", " #rhs ") ({} " #op " {}) failed", _lhs, _rhs); \
} while (false)
#define ASSERT_LT(lhs, rhs) __ASSERT_BIN_OP(lhs, rhs, "ASSERT_LT", <) [[noreturn]] void __ban_assertion_failed(const char* location, const char* msg);
#define ASSERT_LTE(lhs, rhs) __ASSERT_BIN_OP(lhs, rhs, "ASSERT_LTE", <=)
#define ASSERT_GT(lhs, rhs) __ASSERT_BIN_OP(lhs, rhs, "ASSERT_GT", >)
#define ASSERT_GTE(lhs, rhs) __ASSERT_BIN_OP(lhs, rhs, "ASSERT_GTE", >=)
#define ASSERT_EQ(lhs, rhs) __ASSERT_BIN_OP(lhs, rhs, "ASSERT_EQ", ==)
#define ASSERT_NEQ(lhs, rhs) __ASSERT_BIN_OP(lhs, rhs, "ASSERT_NEQ", !=)
#define ASSERT_NOT_REACHED() Kernel::panic("ASSERT_NOT_REACHED() failed")
#else
#include <assert.h>
#define ASSERT(cond) assert((cond) && "ASSERT("#cond") failed")
#define ASSERT_NOT_REACHED() do { assert(false && "ASSERT_NOT_REACHED() failed"); __builtin_unreachable(); } while (false)
#endif

View File

@ -1,6 +1,7 @@
#pragma once #pragma once
#include <BAN/Assert.h> #include <BAN/Assert.h>
#include <BAN/Move.h>
#include <BAN/PlacementNew.h> #include <BAN/PlacementNew.h>
#include <stdint.h> #include <stdint.h>

View File

@ -1,8 +1,10 @@
#pragma once #pragma once
#if __is_kernel #if __is_kernel
#error "This is userspace only file"
#endif #include <kernel/Debug.h>
#else
#include <BAN/Formatter.h> #include <BAN/Formatter.h>
#include <stdio.h> #include <stdio.h>
@ -29,3 +31,5 @@
dprintln(__VA_ARGS__); \ dprintln(__VA_ARGS__); \
BAN::Formatter::print(__debug_putchar, "\e[m"); \ BAN::Formatter::print(__debug_putchar, "\e[m"); \
} while(false) } while(false)
#endif

View File

@ -1,4 +1,6 @@
#pragma once #pragma once
#include <stddef.h>
inline void* operator new(size_t, void* addr) { return addr; } inline void* operator new(size_t, void* addr) { return addr; }
inline void* operator new[](size_t, void* addr) { return addr; } inline void* operator new[](size_t, void* addr) { return addr; }

View File

@ -40,9 +40,9 @@ set(KERNEL_SOURCES
kernel/Input/PS2/Keyboard.cpp kernel/Input/PS2/Keyboard.cpp
kernel/Input/PS2/Keymap.cpp kernel/Input/PS2/Keymap.cpp
kernel/Input/PS2/Mouse.cpp kernel/Input/PS2/Mouse.cpp
kernel/Interruptable.cpp
kernel/InterruptController.cpp kernel/InterruptController.cpp
kernel/kernel.cpp kernel/kernel.cpp
kernel/Lock/SpinLock.cpp
kernel/Memory/DMARegion.cpp kernel/Memory/DMARegion.cpp
kernel/Memory/FileBackedRegion.cpp kernel/Memory/FileBackedRegion.cpp
kernel/Memory/Heap.cpp kernel/Memory/Heap.cpp
@ -136,6 +136,7 @@ set(LAI_SOURCES
) )
set(BAN_SOURCES set(BAN_SOURCES
../BAN/BAN/Assert.cpp
../BAN/BAN/New.cpp ../BAN/BAN/New.cpp
../BAN/BAN/String.cpp ../BAN/BAN/String.cpp
../BAN/BAN/StringView.cpp ../BAN/BAN/StringView.cpp

View File

@ -1,68 +1,32 @@
#include <BAN/Array.h>
#include <kernel/GDT.h> #include <kernel/GDT.h>
#include <kernel/Debug.h>
#include <string.h> #include <string.h>
namespace Kernel::GDT namespace Kernel
{ {
struct TaskStateSegment GDT* GDT::create()
{ {
uint32_t reserved1; auto* gdt = new GDT();
uint64_t rsp0; ASSERT(gdt);
uint64_t rsp1;
uint64_t rsp2;
uint64_t reserved2;
uint64_t ist1;
uint64_t ist2;
uint64_t ist3;
uint64_t ist4;
uint64_t ist5;
uint64_t ist6;
uint64_t ist7;
uint64_t reserved3;
uint16_t reserved4;
uint16_t iopb;
} __attribute__((packed));
union SegmentDescriptor gdt->write_entry(0x00, 0x00000000, 0x00000, 0x00, 0x0); // null
{ gdt->write_entry(0x08, 0x00000000, 0xFFFFF, 0x9A, 0xA); // kernel code
struct gdt->write_entry(0x10, 0x00000000, 0xFFFFF, 0x92, 0xC); // kernel data
{ gdt->write_entry(0x18, 0x00000000, 0xFFFFF, 0xFA, 0xA); // user code
uint16_t limit1; gdt->write_entry(0x20, 0x00000000, 0xFFFFF, 0xF2, 0xC); // user data
uint16_t base1; gdt->write_tss();
uint8_t base2;
uint8_t access;
uint8_t limit2 : 4;
uint8_t flags : 4;
uint8_t base3;
} __attribute__((packed));
struct return gdt;
{ }
uint32_t low;
uint32_t high;
} __attribute__((packed));
} __attribute__((packed)); void GDT::write_entry(uint8_t offset, uint32_t base, uint32_t limit, uint8_t access, uint8_t flags)
struct GDTR
{
uint16_t size;
uint64_t address;
} __attribute__((packed));
static constexpr uint16_t s_tss_offset = 0x28;
static TaskStateSegment s_tss;
static BAN::Array<SegmentDescriptor, 7> s_gdt; // null, kernel code, kernel data, user code, user data, tss low, tss high
static GDTR s_gdtr;
static void write_entry(uint8_t offset, uint32_t base, uint32_t limit, uint8_t access, uint8_t flags)
{ {
ASSERT(offset % sizeof(SegmentDescriptor) == 0); ASSERT(offset % sizeof(SegmentDescriptor) == 0);
uint8_t idx = offset / sizeof(SegmentDescriptor);
SegmentDescriptor& desc = s_gdt[offset / sizeof(SegmentDescriptor)]; auto& desc = m_gdt[idx];
desc.base1 = (base >> 0) & 0xFFFF; desc.base1 = (base >> 0) & 0xFFFF;
desc.base2 = (base >> 16) & 0xFF; desc.base2 = (base >> 16) & 0xFF;
desc.base3 = (base >> 24) & 0xFF; desc.base3 = (base >> 24) & 0xFF;
@ -75,49 +39,18 @@ namespace Kernel::GDT
desc.flags = flags & 0x0F; desc.flags = flags & 0x0F;
} }
static void write_tss() void GDT::write_tss()
{ {
memset(&s_tss, 0x00, sizeof(TaskStateSegment)); memset(&m_tss, 0x00, sizeof(TaskStateSegment));
s_tss.iopb = sizeof(TaskStateSegment); m_tss.iopb = sizeof(TaskStateSegment);
uint64_t base = (uint64_t)&s_tss; uint64_t base = reinterpret_cast<uint64_t>(&m_tss);
write_entry(s_tss_offset, (uint32_t)base, sizeof(TaskStateSegment), 0x89, 0x0); write_entry(0x28, (uint32_t)base, sizeof(TaskStateSegment), 0x89, 0x0);
SegmentDescriptor& desc = s_gdt[s_tss_offset / sizeof(SegmentDescriptor) + 1]; auto& desc = m_gdt[0x30 / sizeof(SegmentDescriptor)];
desc.low = base >> 32; desc.low = base >> 32;
desc.high = 0; desc.high = 0;
} }
void set_tss_stack(uintptr_t rsp)
{
s_tss.rsp0 = rsp;
}
static void flush_gdt()
{
asm volatile("lgdt %0" :: "m"(s_gdtr));
}
static void flush_tss()
{
asm volatile("ltr %0" :: "m"(s_tss_offset));
}
void initialize()
{
s_gdtr.address = (uint64_t)&s_gdt;
s_gdtr.size = s_gdt.size() * sizeof(SegmentDescriptor) - 1;
write_entry(0x00, 0x00000000, 0x00000, 0x00, 0x0); // null
write_entry(0x08, 0x00000000, 0xFFFFF, 0x9A, 0xA); // kernel code
write_entry(0x10, 0x00000000, 0xFFFFF, 0x92, 0xC); // kernel data
write_entry(0x18, 0x00000000, 0xFFFFF, 0xFA, 0xA); // user code
write_entry(0x20, 0x00000000, 0xFFFFF, 0xF2, 0xC); // user data
write_tss();
flush_gdt();
flush_tss();
}
} }

View File

@ -12,7 +12,7 @@
#define ISR_LIST_X X(0) X(1) X(2) X(3) X(4) X(5) X(6) X(7) X(8) X(9) X(10) X(11) X(12) X(13) X(14) X(15) X(16) X(17) X(18) X(19) X(20) X(21) X(22) X(23) X(24) X(25) X(26) X(27) X(28) X(29) X(30) X(31) #define ISR_LIST_X X(0) X(1) X(2) X(3) X(4) X(5) X(6) X(7) X(8) X(9) X(10) X(11) X(12) X(13) X(14) X(15) X(16) X(17) X(18) X(19) X(20) X(21) X(22) X(23) X(24) X(25) X(26) X(27) X(28) X(29) X(30) X(31)
#define IRQ_LIST_X X(0) X(1) X(2) X(3) X(4) X(5) X(6) X(7) X(8) X(9) X(10) X(11) X(12) X(13) X(14) X(15) X(16) X(17) X(18) X(19) X(20) X(21) X(22) X(23) X(24) X(25) X(26) X(27) X(28) X(29) X(30) X(31) #define IRQ_LIST_X X(0) X(1) X(2) X(3) X(4) X(5) X(6) X(7) X(8) X(9) X(10) X(11) X(12) X(13) X(14) X(15) X(16) X(17) X(18) X(19) X(20) X(21) X(22) X(23) X(24) X(25) X(26) X(27) X(28) X(29) X(30) X(31)
namespace Kernel::IDT namespace Kernel
{ {
struct Registers struct Registers
@ -42,26 +42,6 @@ namespace Kernel::IDT
uint64_t rax; uint64_t rax;
}; };
struct GateDescriptor
{
uint16_t offset1;
uint16_t selector;
uint8_t IST;
uint8_t flags;
uint16_t offset2;
uint32_t offset3;
uint32_t reserved;
} __attribute__((packed));
struct IDTR
{
uint16_t size;
uint64_t offset;
} __attribute__((packed));
static IDTR s_idtr;
static GateDescriptor* s_idt = nullptr;
#define X(num) 1 + #define X(num) 1 +
static BAN::Array<Interruptable*, IRQ_LIST_X 0> s_interruptables; static BAN::Array<Interruptable*, IRQ_LIST_X 0> s_interruptables;
#undef X #undef X
@ -163,6 +143,13 @@ namespace Kernel::IDT
extern "C" void cpp_isr_handler(uint64_t isr, uint64_t error, InterruptStack& interrupt_stack, const Registers* regs) extern "C" void cpp_isr_handler(uint64_t isr, uint64_t error, InterruptStack& interrupt_stack, const Registers* regs)
{ {
if (g_paniced)
{
// FIXME: tell other processors kernel panic has occured
dprintln("Processor {} halted", Processor::current_id());
asm volatile("cli; 1: hlt; jmp 1b");
}
#if __enable_sse #if __enable_sse
bool from_userspace = (interrupt_stack.cs & 0b11) == 0b11; bool from_userspace = (interrupt_stack.cs & 0b11) == 0b11;
if (from_userspace) if (from_userspace)
@ -334,14 +321,9 @@ done:
ASSERT(Thread::current().state() != Thread::State::Terminated); ASSERT(Thread::current().state() != Thread::State::Terminated);
} }
static void flush_idt() void IDT::register_interrupt_handler(uint8_t index, void (*handler)())
{ {
asm volatile("lidt %0"::"m"(s_idtr)); auto& descriptor = m_idt[index];
}
static void register_interrupt_handler(uint8_t index, void(*handler)())
{
GateDescriptor& descriptor = s_idt[index];
descriptor.offset1 = (uint16_t)((uint64_t)handler >> 0); descriptor.offset1 = (uint16_t)((uint64_t)handler >> 0);
descriptor.offset2 = (uint16_t)((uint64_t)handler >> 16); descriptor.offset2 = (uint16_t)((uint64_t)handler >> 16);
descriptor.offset3 = (uint32_t)((uint64_t)handler >> 32); descriptor.offset3 = (uint32_t)((uint64_t)handler >> 32);
@ -351,13 +333,13 @@ done:
descriptor.flags = 0x8E; descriptor.flags = 0x8E;
} }
static void register_syscall_handler(uint8_t index, void(*handler)()) void IDT::register_syscall_handler(uint8_t index, void (*handler)())
{ {
register_interrupt_handler(index, handler); register_interrupt_handler(index, handler);
s_idt[index].flags = 0xEE; m_idt[index].flags = 0xEE;
} }
void register_irq_handler(uint8_t irq, Interruptable* interruptable) void IDT::register_irq_handler(uint8_t irq, Interruptable* interruptable)
{ {
if (irq > s_interruptables.size()) if (irq > s_interruptables.size())
Kernel::panic("Trying to assign handler for irq {} while only {} are supported", irq, s_interruptables.size()); Kernel::panic("Trying to assign handler for irq {} while only {} are supported", irq, s_interruptables.size());
@ -374,34 +356,36 @@ done:
extern "C" void syscall_asm(); extern "C" void syscall_asm();
void initialize() IDT* IDT::create(bool is_bsb)
{ {
s_idt = (GateDescriptor*)kmalloc(0x100 * sizeof(GateDescriptor)); auto* idt = new IDT();
ASSERT(s_idt); ASSERT(idt);
memset(s_idt, 0x00, 0x100 * sizeof(GateDescriptor));
s_idtr.offset = (uint64_t)s_idt; memset(idt->m_idt.data(), 0x00, 0x100 * sizeof(GateDescriptor));
s_idtr.size = 0x100 * sizeof(GateDescriptor) - 1;
#define X(num) register_interrupt_handler(num, isr ## num); #define X(num) idt->register_interrupt_handler(num, isr ## num);
ISR_LIST_X ISR_LIST_X
#undef X #undef X
#define X(num) register_interrupt_handler(IRQ_VECTOR_BASE + num, irq ## num); // FIXME: distribute IRQs more evenly?
#define X(num) idt->register_interrupt_handler(IRQ_VECTOR_BASE + num, irq ## num);
if (is_bsb)
{
IRQ_LIST_X IRQ_LIST_X
}
#undef X #undef X
register_syscall_handler(0x80, syscall_asm); idt->register_syscall_handler(0x80, syscall_asm);
flush_idt(); return idt;
} }
[[noreturn]] void force_triple_fault() [[noreturn]] void IDT::force_triple_fault()
{ {
// load 0 sized IDT and trigger an interrupt to force triple fault // load 0 sized IDT and trigger an interrupt to force triple fault
asm volatile("cli"); Processor::set_interrupt_state(InterruptState::Disabled);
s_idtr.size = 0; Processor::idt().m_idtr.size = 0;
flush_idt(); Processor::idt().load();
asm volatile("int $0x00"); asm volatile("int $0x00");
ASSERT_NOT_REACHED(); ASSERT_NOT_REACHED();
} }

View File

@ -20,7 +20,6 @@ namespace Kernel
RecursiveSpinLock PageTable::s_fast_page_lock; RecursiveSpinLock PageTable::s_fast_page_lock;
static PageTable* s_kernel = nullptr; static PageTable* s_kernel = nullptr;
static PageTable* s_current = nullptr;
static bool s_has_nxe = false; static bool s_has_nxe = false;
static bool s_has_pge = false; static bool s_has_pge = false;
@ -71,40 +70,51 @@ namespace Kernel
void PageTable::initialize() void PageTable::initialize()
{ {
if (CPUID::has_nxe()) if (CPUID::has_nxe())
{
asm volatile(
"movl $0xC0000080, %ecx;"
"rdmsr;"
"orl $0x800, %eax;"
"wrmsr"
);
s_has_nxe = true; s_has_nxe = true;
}
uint32_t ecx, edx; if (CPUID::has_pge())
CPUID::get_features(ecx, edx);
if (edx & CPUID::EDX_PGE)
{
asm volatile(
"movq %cr4, %rax;"
"orq $0x80, %rax;"
"movq %rax, %cr4;"
);
s_has_pge = true; s_has_pge = true;
}
// enable write protect to kernel
asm volatile(
"movq %cr0, %rax;"
"orq $0x10000, %rax;"
"movq %rax, %cr0;"
);
ASSERT(s_kernel == nullptr); ASSERT(s_kernel == nullptr);
s_kernel = new PageTable(); s_kernel = new PageTable();
ASSERT(s_kernel); ASSERT(s_kernel);
s_kernel->initialize_kernel(); s_kernel->initialize_kernel();
s_kernel->load(); s_kernel->initial_load();
}
void PageTable::initial_load()
{
if (s_has_nxe)
{
asm volatile(
"movl $0xC0000080, %%ecx;"
"rdmsr;"
"orl $0x800, %%eax;"
"wrmsr"
::: "eax", "ecx", "edx", "memory"
);
}
if (s_has_pge)
{
asm volatile(
"movq %%cr4, %%rax;"
"orq $0x80, %%rax;"
"movq %%rax, %%cr4;"
::: "rax"
);
}
// enable write protect
asm volatile(
"movq %%cr0, %%rax;"
"orq $0x10000, %%rax;"
"movq %%rax, %%cr0;"
::: "rax"
);
load();
} }
PageTable& PageTable::kernel() PageTable& PageTable::kernel()
@ -113,12 +123,6 @@ namespace Kernel
return *s_kernel; return *s_kernel;
} }
PageTable& PageTable::current()
{
ASSERT(s_current);
return *s_current;
}
bool PageTable::is_valid_pointer(uintptr_t pointer) bool PageTable::is_valid_pointer(uintptr_t pointer)
{ {
if (!is_canonical(pointer)) if (!is_canonical(pointer))
@ -208,7 +212,7 @@ namespace Kernel
void PageTable::map_fast_page(paddr_t paddr) void PageTable::map_fast_page(paddr_t paddr)
{ {
ASSERT(s_kernel); ASSERT(s_kernel);
ASSERT_NEQ(paddr, 0); ASSERT(paddr);
SpinLockGuard _(s_fast_page_lock); SpinLockGuard _(s_fast_page_lock);
@ -308,7 +312,7 @@ namespace Kernel
{ {
SpinLockGuard _(m_lock); SpinLockGuard _(m_lock);
asm volatile("movq %0, %%cr3" :: "r"(m_highest_paging_struct)); asm volatile("movq %0, %%cr3" :: "r"(m_highest_paging_struct));
s_current = this; Processor::set_current_page_table(this);
} }
void PageTable::invalidate(vaddr_t vaddr) void PageTable::invalidate(vaddr_t vaddr)
@ -322,7 +326,7 @@ namespace Kernel
ASSERT(vaddr); ASSERT(vaddr);
ASSERT(vaddr != fast_page()); ASSERT(vaddr != fast_page());
if (vaddr >= KERNEL_OFFSET) if (vaddr >= KERNEL_OFFSET)
ASSERT_GTE(vaddr, (vaddr_t)g_kernel_start); ASSERT(vaddr >= (vaddr_t)g_kernel_start);
if ((vaddr >= KERNEL_OFFSET) != (this == s_kernel)) if ((vaddr >= KERNEL_OFFSET) != (this == s_kernel))
Kernel::panic("unmapping {8H}, kernel: {}", vaddr, this == s_kernel); Kernel::panic("unmapping {8H}, kernel: {}", vaddr, this == s_kernel);
@ -367,8 +371,6 @@ namespace Kernel
{ {
ASSERT(vaddr); ASSERT(vaddr);
ASSERT(vaddr != fast_page()); ASSERT(vaddr != fast_page());
if (vaddr >= KERNEL_OFFSET && s_current)
ASSERT_GTE(vaddr, (vaddr_t)g_kernel_start);
if ((vaddr >= KERNEL_OFFSET) != (this == s_kernel)) if ((vaddr >= KERNEL_OFFSET) != (this == s_kernel))
Kernel::panic("mapping {8H} to {8H}, kernel: {}", paddr, vaddr, this == s_kernel); Kernel::panic("mapping {8H} to {8H}, kernel: {}", paddr, vaddr, this == s_kernel);

View File

@ -158,13 +158,6 @@ enable_sse:
movl %eax, %cr4 movl %eax, %cr4
ret ret
initialize_lapic_id:
movl $1, %eax
cpuid
shrl $24, %ebx
movw %bx, %gs
ret
initialize_paging: initialize_paging:
# enable PAE # enable PAE
movl %cr4, %ecx movl %cr4, %ecx
@ -198,7 +191,6 @@ _start:
movl %ebx, V2P(bootloader_info) movl %ebx, V2P(bootloader_info)
movl $V2P(boot_stack_top), %esp movl $V2P(boot_stack_top), %esp
call initialize_lapic_id
call check_requirements call check_requirements
call enable_sse call enable_sse
@ -278,7 +270,6 @@ ap_protected_mode:
movl ap_stack_ptr, %esp movl ap_stack_ptr, %esp
movb $1, V2P(g_ap_stack_loaded) movb $1, V2P(g_ap_stack_loaded)
call V2P(initialize_lapic_id)
call V2P(enable_sse) call V2P(enable_sse)

View File

@ -78,5 +78,6 @@ namespace CPUID
void get_features(uint32_t& ecx, uint32_t& edx); void get_features(uint32_t& ecx, uint32_t& edx);
bool is_64_bit(); bool is_64_bit();
bool has_nxe(); bool has_nxe();
bool has_pge();
} }

View File

@ -1,16 +1,101 @@
#pragma once #pragma once
#include <BAN/Array.h>
#include <BAN/NoCopyMove.h>
#include <stdint.h> #include <stdint.h>
namespace Kernel::GDT namespace Kernel
{ {
struct TaskStateSegment
{
uint32_t reserved1;
uint64_t rsp0;
uint64_t rsp1;
uint64_t rsp2;
uint64_t reserved2;
uint64_t ist1;
uint64_t ist2;
uint64_t ist3;
uint64_t ist4;
uint64_t ist5;
uint64_t ist6;
uint64_t ist7;
uint64_t reserved3;
uint16_t reserved4;
uint16_t iopb;
} __attribute__((packed));
union SegmentDescriptor
{
struct
{
uint16_t limit1;
uint16_t base1;
uint8_t base2;
uint8_t access;
uint8_t limit2 : 4;
uint8_t flags : 4;
uint8_t base3;
} __attribute__((packed));
struct
{
uint32_t low;
uint32_t high;
} __attribute__((packed));
} __attribute__((packed));
struct GDTR
{
uint16_t size;
uint64_t address;
} __attribute__((packed));
class GDT
{
BAN_NON_COPYABLE(GDT);
BAN_NON_MOVABLE(GDT);
public:
static GDT* create();
void load() { flush_gdt(); flush_tss(); }
static constexpr inline bool is_user_segment(uint8_t segment) static constexpr inline bool is_user_segment(uint8_t segment)
{ {
return (segment & 3) == 3; return (segment & 3) == 3;
} }
void initialize(); void set_tss_stack(uintptr_t rsp)
void set_tss_stack(uintptr_t); {
m_tss.rsp0 = rsp;
}
private:
GDT() = default;
void write_entry(uint8_t offset, uint32_t base, uint32_t limit, uint8_t access, uint8_t flags);
void write_tss();
void flush_gdt()
{
asm volatile("lgdt %0" :: "m"(m_gdtr) : "memory");
}
void flush_tss()
{
asm volatile("ltr %0" :: "rm"((uint16_t)0x28) : "memory");
}
private:
BAN::Array<SegmentDescriptor, 7> m_gdt; // null, kernel code, kernel data, user code, user data, tss low, tss high
TaskStateSegment m_tss;
const GDTR m_gdtr {
.size = m_gdt.size() * sizeof(SegmentDescriptor) - 1,
.address = reinterpret_cast<uint64_t>(m_gdt.data())
};
};
} }

View File

@ -1,13 +1,62 @@
#pragma once #pragma once
#include <BAN/Array.h>
#include <BAN/NoCopyMove.h>
#include <kernel/Interruptable.h>
#include <stdint.h> #include <stdint.h>
constexpr uint8_t IRQ_VECTOR_BASE = 0x20; constexpr uint8_t IRQ_VECTOR_BASE = 0x20;
namespace Kernel::IDT namespace Kernel
{ {
void initialize(); struct GateDescriptor
[[noreturn]] void force_triple_fault(); {
uint16_t offset1;
uint16_t selector;
uint8_t IST;
uint8_t flags;
uint16_t offset2;
uint32_t offset3;
uint32_t reserved;
} __attribute__((packed));
struct IDTR
{
uint16_t size;
uint64_t offset;
} __attribute__((packed));
class IDT
{
BAN_NON_COPYABLE(IDT);
BAN_NON_MOVABLE(IDT);
public:
static IDT* create(bool is_bsb);
[[noreturn]] static void force_triple_fault();
void register_irq_handler(uint8_t irq, Interruptable* interruptable);
void load()
{
asm volatile("lidt %0" :: "m"(m_idtr) : "memory");
}
private:
IDT() = default;
void register_interrupt_handler(uint8_t index, void (*handler)());
void register_syscall_handler(uint8_t index, void (*handler)());
private:
BAN::Array<GateDescriptor, 0x100> m_idt;
IDTR m_idtr {
.size = m_idt.size() * sizeof(GateDescriptor) - 1,
.offset = reinterpret_cast<uint64_t>(m_idt.data())
};
};
} }

View File

@ -1,7 +1,7 @@
#pragma once #pragma once
#include <kernel/Input/PS2/Controller.h> #include <kernel/Input/PS2/Controller.h>
#include <kernel/InterruptController.h> #include <kernel/Interruptable.h>
namespace Kernel::Input namespace Kernel::Input
{ {

View File

@ -8,23 +8,6 @@
namespace Kernel namespace Kernel
{ {
class Interruptable
{
public:
void set_irq(int irq);
void enable_interrupt();
void disable_interrupt();
virtual void handle_irq() = 0;
protected:
Interruptable() = default;
~Interruptable() {}
private:
int m_irq { -1 };
};
class InterruptController class InterruptController
{ {
public: public:

View File

@ -0,0 +1,23 @@
#pragma once
namespace Kernel
{
class Interruptable
{
public:
void set_irq(int irq);
void enable_interrupt();
void disable_interrupt();
virtual void handle_irq() = 0;
protected:
Interruptable() = default;
~Interruptable() {}
private:
int m_irq { -1 };
};
}

View File

@ -21,12 +21,12 @@ namespace Kernel
{ {
auto tid = Scheduler::current_tid(); auto tid = Scheduler::current_tid();
if (tid == m_locker) if (tid == m_locker)
ASSERT_GT(m_lock_depth, 0); ASSERT(m_lock_depth > 0);
else else
{ {
while (!m_locker.compare_exchange(-1, tid)) while (!m_locker.compare_exchange(-1, tid))
Scheduler::get().reschedule(); Scheduler::get().reschedule();
ASSERT_EQ(m_lock_depth, 0); ASSERT(m_lock_depth == 0);
} }
m_lock_depth++; m_lock_depth++;
} }
@ -35,20 +35,20 @@ namespace Kernel
{ {
auto tid = Scheduler::current_tid(); auto tid = Scheduler::current_tid();
if (tid == m_locker) if (tid == m_locker)
ASSERT_GT(m_lock_depth, 0); ASSERT(m_lock_depth > 0);
else else
{ {
if (!m_locker.compare_exchange(-1, tid)) if (!m_locker.compare_exchange(-1, tid))
return false; return false;
ASSERT_EQ(m_lock_depth, 0); ASSERT(m_lock_depth == 0);
} }
m_lock_depth++; m_lock_depth++;
} }
void unlock() void unlock()
{ {
ASSERT_EQ(m_locker.load(), Scheduler::current_tid()); ASSERT(m_locker == Scheduler::current_tid());
ASSERT_GT(m_lock_depth, 0); ASSERT(m_lock_depth > 0);
if (--m_lock_depth == 0) if (--m_lock_depth == 0)
m_locker = -1; m_locker = -1;
} }
@ -74,7 +74,7 @@ namespace Kernel
{ {
auto tid = Scheduler::current_tid(); auto tid = Scheduler::current_tid();
if (tid == m_locker) if (tid == m_locker)
ASSERT_GT(m_lock_depth, 0); ASSERT(m_lock_depth > 0);
else else
{ {
bool has_priority = tid ? !Thread::current().is_userspace() : true; bool has_priority = tid ? !Thread::current().is_userspace() : true;
@ -82,7 +82,7 @@ namespace Kernel
m_queue_length++; m_queue_length++;
while (!(has_priority || m_queue_length == 0) || !m_locker.compare_exchange(-1, tid)) while (!(has_priority || m_queue_length == 0) || !m_locker.compare_exchange(-1, tid))
Scheduler::get().reschedule(); Scheduler::get().reschedule();
ASSERT_EQ(m_lock_depth, 0); ASSERT(m_lock_depth == 0);
} }
m_lock_depth++; m_lock_depth++;
} }
@ -91,7 +91,7 @@ namespace Kernel
{ {
auto tid = Scheduler::current_tid(); auto tid = Scheduler::current_tid();
if (tid == m_locker) if (tid == m_locker)
ASSERT_GT(m_lock_depth, 0); ASSERT(m_lock_depth > 0);
else else
{ {
bool has_priority = tid ? !Thread::current().is_userspace() : true; bool has_priority = tid ? !Thread::current().is_userspace() : true;
@ -99,7 +99,7 @@ namespace Kernel
return false; return false;
if (has_priority) if (has_priority)
m_queue_length++; m_queue_length++;
ASSERT_EQ(m_lock_depth, 0); ASSERT(m_lock_depth == 0);
} }
m_lock_depth++; m_lock_depth++;
} }
@ -107,8 +107,8 @@ namespace Kernel
void unlock() void unlock()
{ {
auto tid = Scheduler::current_tid(); auto tid = Scheduler::current_tid();
ASSERT_EQ(m_locker.load(), tid); ASSERT(m_locker == tid);
ASSERT_GT(m_lock_depth, 0); ASSERT(m_lock_depth > 0);
if (--m_lock_depth == 0) if (--m_lock_depth == 0)
{ {
bool has_priority = tid ? !Thread::current().is_userspace() : true; bool has_priority = tid ? !Thread::current().is_userspace() : true;

View File

@ -1,5 +1,6 @@
#pragma once #pragma once
#include <BAN/Assert.h>
#include <BAN/Atomic.h> #include <BAN/Atomic.h>
#include <BAN/NoCopyMove.h> #include <BAN/NoCopyMove.h>
#include <kernel/Processor.h> #include <kernel/Processor.h>
@ -17,8 +18,32 @@ namespace Kernel
public: public:
SpinLock() = default; SpinLock() = default;
InterruptState lock(); InterruptState lock()
void unlock(InterruptState state); {
auto state = Processor::get_interrupt_state();
Processor::set_interrupt_state(InterruptState::Disabled);
auto id = Processor::current_id();
ASSERT(m_locker != id);
while (!m_locker.compare_exchange(PROCESSOR_NONE, id, BAN::MemoryOrder::memory_order_acquire))
__builtin_ia32_pause();
return state;
}
void unlock(InterruptState state)
{
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
ASSERT(m_locker == Processor::current_id());
m_locker.store(PROCESSOR_NONE, BAN::MemoryOrder::memory_order_release);
Processor::set_interrupt_state(state);
}
bool current_processor_has_lock() const
{
return m_locker == Processor::current_id();
}
private: private:
BAN::Atomic<ProcessorID> m_locker { PROCESSOR_NONE }; BAN::Atomic<ProcessorID> m_locker { PROCESSOR_NONE };
@ -32,45 +57,44 @@ namespace Kernel
public: public:
RecursiveSpinLock() = default; RecursiveSpinLock() = default;
InterruptState lock();
void unlock(InterruptState state);
private:
BAN::Atomic<ProcessorID> m_locker { PROCESSOR_NONE };
uint32_t m_lock_depth { 0 };
};
class SpinLockUnsafe
{
BAN_NON_COPYABLE(SpinLockUnsafe);
BAN_NON_MOVABLE(SpinLockUnsafe);
public:
SpinLockUnsafe() = default;
InterruptState lock() InterruptState lock()
{ {
auto id = Processor::current_id();
auto state = Processor::get_interrupt_state(); auto state = Processor::get_interrupt_state();
Processor::set_interrupt_state(InterruptState::Disabled); Processor::set_interrupt_state(InterruptState::Disabled);
auto id = Processor::current_id();
if (m_locker == id)
ASSERT(m_lock_depth > 0);
else
{
while (!m_locker.compare_exchange(PROCESSOR_NONE, id, BAN::MemoryOrder::memory_order_acquire)) while (!m_locker.compare_exchange(PROCESSOR_NONE, id, BAN::MemoryOrder::memory_order_acquire))
__builtin_ia32_pause(); __builtin_ia32_pause();
ASSERT(m_lock_depth == 0);
}
m_lock_depth++;
return state; return state;
} }
void unlock(InterruptState state) void unlock(InterruptState state)
{ {
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
ASSERT(m_locker == Processor::current_id());
ASSERT(m_lock_depth > 0);
if (--m_lock_depth == 0)
m_locker.store(PROCESSOR_NONE, BAN::MemoryOrder::memory_order_release); m_locker.store(PROCESSOR_NONE, BAN::MemoryOrder::memory_order_release);
Processor::set_interrupt_state(state); Processor::set_interrupt_state(state);
} }
bool is_locked() const { return m_locker != PROCESSOR_NONE; } bool current_processor_has_lock() const
{
return m_locker == Processor::current_id();
}
private: private:
BAN::Atomic<ProcessorID> m_locker { PROCESSOR_NONE }; BAN::Atomic<ProcessorID> m_locker { PROCESSOR_NONE };
uint32_t m_lock_depth { 0 };
}; };
template<typename Lock> template<typename Lock>

View File

@ -40,7 +40,7 @@ namespace Kernel
static void initialize(); static void initialize();
static PageTable& kernel(); static PageTable& kernel();
static PageTable& current(); static PageTable& current() { return *reinterpret_cast<PageTable*>(Processor::get_current_page_table()); }
static constexpr vaddr_t fast_page() { return KERNEL_OFFSET; } static constexpr vaddr_t fast_page() { return KERNEL_OFFSET; }
@ -109,6 +109,7 @@ namespace Kernel
vaddr_t reserve_free_contiguous_pages(size_t page_count, vaddr_t first_address, vaddr_t last_address = UINTPTR_MAX); vaddr_t reserve_free_contiguous_pages(size_t page_count, vaddr_t first_address, vaddr_t last_address = UINTPTR_MAX);
void load(); void load();
void initial_load();
InterruptState lock() const { return m_lock.lock(); } InterruptState lock() const { return m_lock.lock(); }
void unlock(InterruptState state) const { m_lock.unlock(state); } void unlock(InterruptState state) const { m_lock.unlock(state); }

View File

@ -1,7 +1,7 @@
#pragma once #pragma once
#include <BAN/UniqPtr.h> #include <BAN/UniqPtr.h>
#include <kernel/InterruptController.h> #include <kernel/Interruptable.h>
#include <kernel/Memory/DMARegion.h> #include <kernel/Memory/DMARegion.h>
#include <kernel/Networking/E1000/Definitions.h> #include <kernel/Networking/E1000/Definitions.h>
#include <kernel/Networking/NetworkInterface.h> #include <kernel/Networking/NetworkInterface.h>

View File

@ -2,28 +2,29 @@
#include <kernel/Debug.h> #include <kernel/Debug.h>
#define panic(...) detail::panic_impl(__FILE__, __LINE__, __VA_ARGS__) #define __panic_stringify_helper(s) #s
#define __panic_stringify(s) __panic_stringify_helper(s)
namespace Kernel::detail #define panic(...) panic_impl(__FILE__ ":" __panic_stringify(__LINE__), __VA_ARGS__)
namespace Kernel
{ {
extern bool g_paniced; extern volatile bool g_paniced;
template<typename... Args> template<typename... Args>
__attribute__((__noreturn__)) __attribute__((__noreturn__))
static void panic_impl(const char* file, int line, const char* message, Args&&... args) static void panic_impl(const char* location, const char* message, Args&&... args)
{ {
asm volatile("cli"); asm volatile("cli");
derrorln("Kernel panic at {}:{}", file, line); derrorln("Kernel panic at {}", location);
derrorln(message, BAN::forward<Args>(args)...); derrorln(message, BAN::forward<Args>(args)...);
if (!g_paniced) if (!g_paniced)
{ {
g_paniced = true; g_paniced = true;
Debug::dump_stack_trace(); Debug::dump_stack_trace();
} }
for (;;) asm volatile("ud2");
asm volatile("hlt");
__builtin_unreachable();
} }
} }

View File

@ -1,7 +1,10 @@
#pragma once #pragma once
#include <BAN/ForwardList.h> #include <BAN/ForwardList.h>
#include <kernel/Arch.h> #include <kernel/Arch.h>
#include <kernel/GDT.h>
#include <kernel/IDT.h>
namespace Kernel namespace Kernel
{ {
@ -19,20 +22,16 @@ namespace Kernel
class Processor class Processor
{ {
BAN_NON_COPYABLE(Processor); BAN_NON_COPYABLE(Processor);
BAN_NON_MOVABLE(Processor);
public: public:
static Processor& create(ProcessorID id); static Processor& create(ProcessorID id);
static Processor& initialize();
static ProcessorID current_id() static ProcessorID current_id() { return read_gs_sized<ProcessorID>(offsetof(Processor, m_id)); }
{
uint16_t id;
asm volatile("movw %%gs, %0" : "=rm"(id));
return id;
}
static Processor& get(ProcessorID); static ProcessorID bsb_id() { return s_bsb_id; }
static bool current_is_bsb() { return current_id() == bsb_id(); }
static Processor& current() { return get(current_id()); }
static void set_interrupt_state(InterruptState state) static void set_interrupt_state(InterruptState state)
{ {
@ -51,23 +50,71 @@ namespace Kernel
return InterruptState::Disabled; return InterruptState::Disabled;
}; };
static uintptr_t current_stack_bottom() { return reinterpret_cast<uintptr_t>(read_gs_ptr(offsetof(Processor, m_stack))); }
static uintptr_t current_stack_top() { return current_stack_bottom() + s_stack_size; }
uintptr_t stack_bottom() const { return reinterpret_cast<uintptr_t>(m_stack); } uintptr_t stack_bottom() const { return reinterpret_cast<uintptr_t>(m_stack); }
uintptr_t stack_top() const { return stack_bottom() + m_stack_size; } uintptr_t stack_top() const { return stack_bottom() + s_stack_size; }
static GDT& gdt() { return *reinterpret_cast<GDT*>(read_gs_ptr(offsetof(Processor, m_gdt))); }
static IDT& idt() { return *reinterpret_cast<IDT*>(read_gs_ptr(offsetof(Processor, m_idt))); }
static void* get_current_page_table() { return read_gs_ptr(offsetof(Processor, m_current_page_table)); }
static void set_current_page_table(void* page_table) { write_gs_ptr(offsetof(Processor, m_current_page_table), page_table); }
private: private:
Processor() = default; Processor() = default;
Processor(Processor&& other) ~Processor() { ASSERT_NOT_REACHED(); }
template<typename T>
static T read_gs_sized(uintptr_t offset) requires(sizeof(T) <= 8)
{ {
m_stack = other.m_stack; #define __ASM_INPUT(operation) operation " %%gs:(%[offset]), %[result]" : [result]"=rm"(result) : [offset]"rm"(offset)
other.m_stack = nullptr; T result;
if constexpr(sizeof(T) == 8)
asm volatile(__ASM_INPUT("movq"));
if constexpr(sizeof(T) == 4)
asm volatile(__ASM_INPUT("movl"));
if constexpr(sizeof(T) == 2)
asm volatile(__ASM_INPUT("movw"));
if constexpr(sizeof(T) == 1)
asm volatile(__ASM_INPUT("movb"));
return result;
#undef __ASM_INPUT
} }
~Processor();
template<typename T>
static void write_gs_sized(uintptr_t offset, T value) requires(sizeof(T) <= 8)
{
#define __ASM_INPUT(operation) operation " %[value], %%gs:(%[offset])" :: [value]"rm"(value), [offset]"rm"(offset) : "memory"
if constexpr(sizeof(T) == 8)
asm volatile(__ASM_INPUT("movq"));
if constexpr(sizeof(T) == 4)
asm volatile(__ASM_INPUT("movl"));
if constexpr(sizeof(T) == 2)
asm volatile(__ASM_INPUT("movw"));
if constexpr(sizeof(T) == 1)
asm volatile(__ASM_INPUT("movb"));
#undef __ASM_INPUT
}
static void* read_gs_ptr(uintptr_t offset) { return read_gs_sized<void*>(offset); }
static void write_gs_ptr(uintptr_t offset, void* value) { write_gs_sized<void*>(offset, value); }
private: private:
void* m_stack { nullptr }; static ProcessorID s_bsb_id;
static constexpr size_t m_stack_size { 4096 };
friend class BAN::Vector<Processor>; ProcessorID m_id { PROCESSOR_NONE };
static constexpr size_t s_stack_size { 4096 };
void* m_stack { nullptr };
GDT* m_gdt { nullptr };
IDT* m_idt { nullptr };
void* m_current_page_table { nullptr };
friend class BAN::Array<Processor, 0xFF>;
}; };
#else #else
#error #error

View File

@ -62,7 +62,7 @@ namespace Kernel
Semaphore* semaphore; Semaphore* semaphore;
}; };
SpinLockUnsafe m_lock; SpinLock m_lock;
Thread* m_idle_thread { nullptr }; Thread* m_idle_thread { nullptr };
BAN::LinkedList<SchedulerThread> m_active_threads; BAN::LinkedList<SchedulerThread> m_active_threads;

View File

@ -2,7 +2,7 @@
#include <BAN/Array.h> #include <BAN/Array.h>
#include <BAN/RefPtr.h> #include <BAN/RefPtr.h>
#include <kernel/InterruptController.h> #include <kernel/Interruptable.h>
#include <kernel/Memory/DMARegion.h> #include <kernel/Memory/DMARegion.h>
#include <kernel/PCI.h> #include <kernel/PCI.h>
#include <kernel/Storage/ATA/AHCI/Definitions.h> #include <kernel/Storage/ATA/AHCI/Definitions.h>

View File

@ -3,7 +3,7 @@
#include <BAN/ByteSpan.h> #include <BAN/ByteSpan.h>
#include <BAN/RefPtr.h> #include <BAN/RefPtr.h>
#include <BAN/Vector.h> #include <BAN/Vector.h>
#include <kernel/InterruptController.h> #include <kernel/Interruptable.h>
#include <kernel/Lock/Mutex.h> #include <kernel/Lock/Mutex.h>
namespace Kernel namespace Kernel

View File

@ -2,7 +2,7 @@
#include <BAN/UniqPtr.h> #include <BAN/UniqPtr.h>
#include <BAN/Vector.h> #include <BAN/Vector.h>
#include <kernel/InterruptController.h> #include <kernel/Interruptable.h>
#include <kernel/Lock/Mutex.h> #include <kernel/Lock/Mutex.h>
#include <kernel/Memory/DMARegion.h> #include <kernel/Memory/DMARegion.h>
#include <kernel/Semaphore.h> #include <kernel/Semaphore.h>

View File

@ -2,7 +2,7 @@
#include <BAN/CircularQueue.h> #include <BAN/CircularQueue.h>
#include <BAN/Errors.h> #include <BAN/Errors.h>
#include <kernel/InterruptController.h> #include <kernel/Interruptable.h>
#include <kernel/Terminal/TTY.h> #include <kernel/Terminal/TTY.h>
namespace Kernel namespace Kernel

View File

@ -1,6 +1,6 @@
#pragma once #pragma once
#include <kernel/InterruptController.h> #include <kernel/Interruptable.h>
#include <kernel/Lock/SpinLock.h> #include <kernel/Lock/SpinLock.h>
#include <kernel/Timer/Timer.h> #include <kernel/Timer/Timer.h>

View File

@ -1,6 +1,6 @@
#pragma once #pragma once
#include <kernel/InterruptController.h> #include <kernel/Interruptable.h>
#include <kernel/Timer/Timer.h> #include <kernel/Timer/Timer.h>
namespace Kernel namespace Kernel

View File

@ -31,6 +31,35 @@ extern volatile uint8_t g_ap_stack_loaded[];
namespace Kernel namespace Kernel
{ {
enum ICR_LO : uint32_t
{
ICR_LO_reserved_mask = 0xFFF32000,
ICR_LO_delivery_mode_fixed = 0b000 << 8,
ICR_LO_delivery_mode_lowest_priority = 0b001 << 8,
ICR_LO_delivery_mode_smi = 0b010 << 8,
ICR_LO_delivery_mode_nmi = 0b100 << 8,
ICR_LO_delivery_mode_init = 0b101 << 8,
ICR_LO_delivery_mode_start_up = 0b110 << 8,
ICR_LO_destination_mode_physical = 0 << 11,
ICR_LO_destination_mode_logical = 1 << 11,
ICR_LO_delivery_status_idle = 0 << 12,
ICR_LO_delivery_status_send_pending = 1 << 12,
ICR_LO_level_deassert = 0 << 14,
ICR_LO_level_assert = 1 << 14,
ICR_LO_trigger_mode_edge = 0 << 15,
ICR_LO_trigger_mode_level = 1 << 15,
ICR_LO_destination_shorthand_none = 0b00 << 18,
ICR_LO_destination_shorthand_self = 0b01 << 18,
ICR_LO_destination_shorthand_all_including_self = 0b10 << 18,
ICR_LO_destination_shorthand_all_excluding_self = 0b11 << 18,
};
struct MADT : public Kernel::ACPI::SDTHeader struct MADT : public Kernel::ACPI::SDTHeader
{ {
uint32_t local_apic; uint32_t local_apic;
@ -208,7 +237,7 @@ namespace Kernel
write_to_local_apic(LAPIC_ICR_HI_REG, (read_from_local_apic(LAPIC_ICR_HI_REG) & 0x00FFFFFF) | (processor << 24)); write_to_local_apic(LAPIC_ICR_HI_REG, (read_from_local_apic(LAPIC_ICR_HI_REG) & 0x00FFFFFF) | (processor << 24));
write_to_local_apic(LAPIC_ICR_LO_REG, data); write_to_local_apic(LAPIC_ICR_LO_REG, data);
udelay(ud); udelay(ud);
while (read_from_local_apic(LAPIC_ICR_LO_REG) & (1 << 12)) while ((read_from_local_apic(LAPIC_ICR_LO_REG) & ICR_LO_delivery_status_send_pending) == ICR_LO_delivery_status_send_pending)
__builtin_ia32_pause(); __builtin_ia32_pause();
}; };
@ -232,31 +261,55 @@ namespace Kernel
dprintln("Trying to enable processor (lapic id {})", processor.apic_id); dprintln("Trying to enable processor (lapic id {})", processor.apic_id);
Kernel::Processor::create(processor.processor_id); auto& proc = Kernel::Processor::create(processor.apic_id);
PageTable::with_fast_page((paddr_t)g_ap_init_addr, [&] { PageTable::with_fast_page((paddr_t)g_ap_init_addr, [&] {
PageTable::fast_page_as_sized<uint32_t>(2) = V2P(Kernel::Processor::get(processor.processor_id).stack_top()); PageTable::fast_page_as_sized<uint32_t>(2) = V2P(proc.stack_top());
}); });
*g_ap_stack_loaded = 0; *g_ap_stack_loaded = 0;
write_to_local_apic(LAPIC_ERROR_REG, 0x00); write_to_local_apic(LAPIC_ERROR_REG, 0x00);
send_ipi(processor.processor_id, (read_from_local_apic(LAPIC_ICR_LO_REG) & 0xFFF00000) | 0x0000C500, 0);
send_ipi(processor.processor_id, (read_from_local_apic(LAPIC_ICR_LO_REG) & 0xFFF0F800) | 0x00008500, 0); // send INIT IPI
send_ipi(processor.apic_id,
(read_from_local_apic(LAPIC_ICR_LO_REG) & ICR_LO_reserved_mask)
| ICR_LO_delivery_mode_init
| ICR_LO_destination_mode_physical
| ICR_LO_level_assert
| ICR_LO_trigger_mode_edge
| ICR_LO_destination_shorthand_none
, 0
);
// TODO: If we are on processor predating Pentium, we need to send deassert
udelay(10 * 1000); udelay(10 * 1000);
for (int i = 0; i < 2; i++) for (int i = 0; i < 2; i++)
{ {
write_to_local_apic(LAPIC_ERROR_REG, 0x00); write_to_local_apic(LAPIC_ERROR_REG, 0x00);
send_ipi(processor.processor_id, (read_from_local_apic(LAPIC_ICR_LO_REG) & 0xFFF0F800) | 0x00000600 | ap_init_page, 200);
// send 2 SETUP IPIs with 200 us delay
send_ipi(processor.apic_id,
(read_from_local_apic(LAPIC_ICR_LO_REG) & ICR_LO_reserved_mask)
| ICR_LO_delivery_mode_start_up
| ICR_LO_destination_mode_physical
| ICR_LO_level_assert
| ICR_LO_trigger_mode_edge
| ICR_LO_destination_shorthand_none
| ap_init_page
, 200
);
} }
// give processor upto 100 * 100 us (10 ms to boot) // give processor upto 100 * 100 us + 200 us to boot
for (int i = 0; *g_ap_stack_loaded == 0 && i < 100; i++) for (int i = 0; *g_ap_stack_loaded == 0 && i < 100; i++)
udelay(100); udelay(100);
} }
*g_ap_startup_done = 1; *g_ap_startup_done = 1;
// give processors 100 us time to increment running count
udelay(100);
dprintln("{} processors started", *g_ap_running_count); dprintln("{} processors started", *g_ap_running_count);
} }

View File

@ -50,6 +50,13 @@ namespace CPUID
return buffer[3] & (1 << 20); return buffer[3] & (1 << 20);
} }
bool has_pge()
{
uint32_t ecx, edx;
get_features(ecx, edx);
return edx & CPUID::EDX_PGE;
}
const char* feature_string_ecx(uint32_t feat) const char* feature_string_ecx(uint32_t feat)
{ {
switch (feat) switch (feat)

View File

@ -128,9 +128,9 @@ namespace Kernel
auto inode_location = find_inode(ino); auto inode_location = find_inode(ino);
PageTable::with_fast_page(inode_location.paddr, [&] { PageTable::with_fast_page(inode_location.paddr, [&] {
auto& inode_info = PageTable::fast_page_as_sized<TmpInodeInfo>(inode_location.index); auto& inode_info = PageTable::fast_page_as_sized<TmpInodeInfo>(inode_location.index);
ASSERT_EQ(inode_info.nlink, 0); ASSERT(inode_info.nlink == 0);
for (auto paddr : inode_info.block) for (auto paddr : inode_info.block)
ASSERT_EQ(paddr, 0); ASSERT(paddr == 0);
inode_info = {}; inode_info = {};
}); });
ASSERT(!m_inode_cache.contains(ino)); ASSERT(!m_inode_cache.contains(ino));
@ -166,8 +166,8 @@ namespace Kernel
{ {
LockGuard _(m_mutex); LockGuard _(m_mutex);
ASSERT_GTE(ino, first_inode); ASSERT(ino >= first_inode);
ASSERT_LT(ino, max_inodes); ASSERT(ino < max_inodes);
constexpr size_t inodes_per_page = PAGE_SIZE / sizeof(TmpInodeInfo); constexpr size_t inodes_per_page = PAGE_SIZE / sizeof(TmpInodeInfo);
@ -220,7 +220,7 @@ namespace Kernel
{ {
LockGuard _(m_mutex); LockGuard _(m_mutex);
ASSERT_GT(index, 0); ASSERT(index > 0);
return find_indirect(m_data_pages, index - first_data_page, 3); return find_indirect(m_data_pages, index - first_data_page, 3);
} }

View File

@ -1,6 +1,9 @@
#include <kernel/Debug.h>
#include <kernel/Input/KeyEvent.h> #include <kernel/Input/KeyEvent.h>
#include <kernel/Input/PS2/Keymap.h> #include <kernel/Input/PS2/Keymap.h>
#include <string.h>
namespace Kernel::Input namespace Kernel::Input
{ {

View File

@ -8,29 +8,8 @@
namespace Kernel namespace Kernel
{ {
namespace IDT { void register_irq_handler(uint8_t irq, Interruptable*); }
static InterruptController* s_instance = nullptr; static InterruptController* s_instance = nullptr;
void Interruptable::set_irq(int irq)
{
if (m_irq != -1)
IDT::register_irq_handler(m_irq, nullptr);
m_irq = irq;
IDT::register_irq_handler(irq, this);
}
void Interruptable::enable_interrupt()
{
ASSERT(m_irq != -1);
InterruptController::get().enable_irq(m_irq);
}
void Interruptable::disable_interrupt()
{
ASSERT_NOT_REACHED();
}
InterruptController& InterruptController::get() InterruptController& InterruptController::get()
{ {
ASSERT(s_instance); ASSERT(s_instance);

View File

@ -0,0 +1,28 @@
#include <kernel/IDT.h>
#include <kernel/Interruptable.h>
#include <kernel/InterruptController.h>
#include <kernel/Processor.h>
namespace Kernel
{
void Interruptable::set_irq(int irq)
{
if (m_irq != -1)
Processor::idt().register_irq_handler(m_irq, nullptr);
m_irq = irq;
Processor::idt().register_irq_handler(irq, this);
}
void Interruptable::enable_interrupt()
{
ASSERT(m_irq != -1);
InterruptController::get().enable_irq(m_irq);
}
void Interruptable::disable_interrupt()
{
ASSERT_NOT_REACHED();
}
}

View File

@ -1,61 +0,0 @@
#include <kernel/InterruptController.h>
#include <kernel/Lock/SpinLock.h>
#include <kernel/Scheduler.h>
// FIXME: try to move these to header
namespace Kernel
{
InterruptState SpinLock::lock()
{
auto id = Processor::current_id();
ASSERT_NEQ(m_locker.load(), id);
auto state = Processor::get_interrupt_state();
Processor::set_interrupt_state(InterruptState::Disabled);
while (!m_locker.compare_exchange(PROCESSOR_NONE, id, BAN::MemoryOrder::memory_order_acquire))
__builtin_ia32_pause();
return state;
}
void SpinLock::unlock(InterruptState state)
{
ASSERT_EQ(m_locker.load(), Processor::current_id());
m_locker.store(PROCESSOR_NONE, BAN::MemoryOrder::memory_order_release);
Processor::set_interrupt_state(state);
}
InterruptState RecursiveSpinLock::lock()
{
auto id = Processor::current_id();
auto state = Processor::get_interrupt_state();
Processor::set_interrupt_state(InterruptState::Disabled);
if (id == m_locker)
ASSERT_GT(m_lock_depth, 0);
else
{
while (!m_locker.compare_exchange(PROCESSOR_NONE, id, BAN::MemoryOrder::memory_order_acquire))
__builtin_ia32_pause();
ASSERT_EQ(m_lock_depth, 0);
}
m_lock_depth++;
return state;
}
void RecursiveSpinLock::unlock(InterruptState state)
{
ASSERT_EQ(m_locker.load(), Processor::current_id());
ASSERT_GT(m_lock_depth, 0);
if (--m_lock_depth == 0)
m_locker.store(PROCESSOR_NONE, BAN::MemoryOrder::memory_order_release);
Processor::set_interrupt_state(state);
}
}

View File

@ -129,7 +129,7 @@ namespace Kernel
size_t file_offset = m_offset + (vaddr - m_vaddr); size_t file_offset = m_offset + (vaddr - m_vaddr);
size_t bytes = BAN::Math::min<size_t>(m_size - file_offset, PAGE_SIZE); size_t bytes = BAN::Math::min<size_t>(m_size - file_offset, PAGE_SIZE);
ASSERT_EQ(&PageTable::current(), &m_page_table); ASSERT(&PageTable::current() == &m_page_table);
auto read_ret = m_inode->read(file_offset, BAN::ByteSpan((uint8_t*)vaddr, bytes)); auto read_ret = m_inode->read(file_offset, BAN::ByteSpan((uint8_t*)vaddr, bytes));
if (read_ret.is_error()) if (read_ret.is_error())

View File

@ -158,9 +158,9 @@ namespace Kernel
return; return;
// Verify no overflow // Verify no overflow
ASSERT_LTE(bytes, size()); ASSERT(bytes <= size());
ASSERT_LTE(offset, size()); ASSERT(offset <= size());
ASSERT_LTE(offset, size() - bytes); ASSERT(offset <= size() - bytes);
if (&PageTable::current() == &m_page_table || &PageTable::kernel() == &m_page_table) if (&PageTable::current() == &m_page_table || &PageTable::kernel() == &m_page_table)
{ {

View File

@ -259,7 +259,7 @@ namespace Kernel
BAN::ErrorOr<void> E1000::send_bytes(BAN::MACAddress destination, EtherType protocol, BAN::ConstByteSpan buffer) BAN::ErrorOr<void> E1000::send_bytes(BAN::MACAddress destination, EtherType protocol, BAN::ConstByteSpan buffer)
{ {
ASSERT_LTE(buffer.size() + sizeof(EthernetHeader), E1000_TX_BUFFER_SIZE); ASSERT(buffer.size() + sizeof(EthernetHeader) <= E1000_TX_BUFFER_SIZE);
SpinLockGuard _(m_lock); SpinLockGuard _(m_lock);
@ -299,7 +299,7 @@ namespace Kernel
auto& descriptor = reinterpret_cast<volatile e1000_rx_desc*>(m_rx_descriptor_region->vaddr())[rx_current]; auto& descriptor = reinterpret_cast<volatile e1000_rx_desc*>(m_rx_descriptor_region->vaddr())[rx_current];
if (!(descriptor.status & 1)) if (!(descriptor.status & 1))
break; break;
ASSERT_LTE((uint16_t)descriptor.length, E1000_RX_BUFFER_SIZE); ASSERT(descriptor.length <= E1000_RX_BUFFER_SIZE);
NetworkManager::get().on_receive(*this, BAN::ConstByteSpan { NetworkManager::get().on_receive(*this, BAN::ConstByteSpan {
reinterpret_cast<const uint8_t*>(m_rx_buffer_region->vaddr() + rx_current * E1000_RX_BUFFER_SIZE), reinterpret_cast<const uint8_t*>(m_rx_buffer_region->vaddr() + rx_current * E1000_RX_BUFFER_SIZE),

View File

@ -455,7 +455,7 @@ namespace Kernel
if (m_send_window.data_size > 0 && m_send_window.current_ack - m_send_window.has_ghost_byte > m_send_window.start_seq) if (m_send_window.data_size > 0 && m_send_window.current_ack - m_send_window.has_ghost_byte > m_send_window.start_seq)
{ {
uint32_t acknowledged_bytes = m_send_window.current_ack - m_send_window.start_seq - m_send_window.has_ghost_byte; uint32_t acknowledged_bytes = m_send_window.current_ack - m_send_window.start_seq - m_send_window.has_ghost_byte;
ASSERT_LTE(acknowledged_bytes, m_send_window.data_size); ASSERT(acknowledged_bytes <= m_send_window.data_size);
m_send_window.data_size -= acknowledged_bytes; m_send_window.data_size -= acknowledged_bytes;
m_send_window.start_seq += acknowledged_bytes; m_send_window.start_seq += acknowledged_bytes;

View File

@ -1,6 +1,6 @@
#include <kernel/Panic.h> #include <kernel/Panic.h>
namespace Kernel::detail namespace Kernel
{ {
bool g_paniced = false; volatile bool g_paniced = false;
} }

View File

@ -1,34 +1,70 @@
#include <BAN/Vector.h> #include <kernel/Memory/kmalloc.h>
#include <kernel/Processor.h> #include <kernel/Processor.h>
#include <kernel/Debug.h>
namespace Kernel namespace Kernel
{ {
static BAN::Vector<Processor> s_processors; static constexpr uint32_t MSR_IA32_GS_BASE = 0xC0000101;
ProcessorID Processor::s_bsb_id { PROCESSOR_NONE };
static BAN::Array<Processor, 0xFF> s_processors;
static ProcessorID read_processor_id()
{
uint8_t id;
asm volatile(
"movl $1, %%eax;"
"cpuid;"
"shrl $24, %%ebx;"
"movb %%bl, %0;"
: "=rm"(id)
:: "eax", "ebx", "ecx", "edx"
);
return id;
}
Processor& Processor::create(ProcessorID id) Processor& Processor::create(ProcessorID id)
{ {
while (s_processors.size() <= id) // bsb is the first processor
MUST(s_processors.emplace_back()); if (s_bsb_id == PROCESSOR_NONE)
s_bsb_id = id = read_processor_id();
auto& processor = s_processors[id]; auto& processor = s_processors[id];
if (processor.m_stack == nullptr)
{ ASSERT(processor.m_id == PROCESSOR_NONE);
processor.m_stack = kmalloc(m_stack_size, 4096, true); processor.m_id = id;
processor.m_stack = kmalloc(s_stack_size, 4096, true);
ASSERT(processor.m_stack); ASSERT(processor.m_stack);
}
processor.m_gdt = GDT::create();
ASSERT(processor.m_gdt);
processor.m_idt = IDT::create(id == s_bsb_id);
ASSERT(processor.m_idt);
return processor; return processor;
} }
Processor::~Processor() Processor& Processor::initialize()
{ {
if (m_stack) auto id = read_processor_id();
kfree(m_stack); auto& processor = s_processors[id];
m_stack = nullptr;
}
Processor& Processor::get(ProcessorID id) // set gs base to pointer to this processor
{ uint64_t ptr = reinterpret_cast<uint64_t>(&processor);
return s_processors[id]; asm volatile("wrmsr" :: "d"(ptr >> 32), "a"(ptr), "c"(MSR_IA32_GS_BASE));
ASSERT(processor.m_gdt);
processor.gdt().load();
ASSERT(processor.m_idt);
processor.idt().load();
return processor;
} }
} }

View File

@ -18,7 +18,7 @@ namespace Kernel
ALWAYS_INLINE static void load_temp_stack() ALWAYS_INLINE static void load_temp_stack()
{ {
asm volatile("movq %0, %%rsp" :: "rm"(Processor::current().stack_top())); asm volatile("movq %0, %%rsp" :: "rm"(Processor::current_stack_top()));
} }
BAN::ErrorOr<void> Scheduler::initialize() BAN::ErrorOr<void> Scheduler::initialize()
@ -64,7 +64,7 @@ namespace Kernel
auto state = m_lock.lock(); auto state = m_lock.lock();
wake_threads(); wake_threads();
if (save_current_thread()) if (save_current_thread())
return m_lock.unlock(state); return Processor::set_interrupt_state(state);
advance_current_thread(); advance_current_thread();
execute_current_thread_locked(); execute_current_thread_locked();
ASSERT_NOT_REACHED(); ASSERT_NOT_REACHED();
@ -86,7 +86,7 @@ namespace Kernel
if (m_active_threads.empty() || &current_thread() != m_idle_thread) if (m_active_threads.empty() || &current_thread() != m_idle_thread)
return m_lock.unlock(state); return m_lock.unlock(state);
if (save_current_thread()) if (save_current_thread())
return m_lock.unlock(state); return Processor::set_interrupt_state(state);
m_current_thread = m_active_threads.begin(); m_current_thread = m_active_threads.begin();
execute_current_thread_locked(); execute_current_thread_locked();
ASSERT_NOT_REACHED(); ASSERT_NOT_REACHED();
@ -94,7 +94,7 @@ namespace Kernel
void Scheduler::wake_threads() void Scheduler::wake_threads()
{ {
ASSERT(m_lock.is_locked()); ASSERT(m_lock.current_processor_has_lock());
uint64_t current_time = SystemTimer::get().ms_since_boot(); uint64_t current_time = SystemTimer::get().ms_since_boot();
while (!m_sleeping_threads.empty() && m_sleeping_threads.front().wake_time <= current_time) while (!m_sleeping_threads.empty() && m_sleeping_threads.front().wake_time <= current_time)
@ -124,7 +124,7 @@ namespace Kernel
void Scheduler::advance_current_thread() void Scheduler::advance_current_thread()
{ {
ASSERT(m_lock.is_locked()); ASSERT(m_lock.current_processor_has_lock());
if (m_active_threads.empty()) if (m_active_threads.empty())
{ {
@ -137,7 +137,7 @@ namespace Kernel
void Scheduler::remove_and_advance_current_thread() void Scheduler::remove_and_advance_current_thread()
{ {
ASSERT(m_lock.is_locked()); ASSERT(m_lock.current_processor_has_lock());
ASSERT(m_current_thread); ASSERT(m_current_thread);
@ -158,7 +158,7 @@ namespace Kernel
// after getting the rsp // after getting the rsp
ALWAYS_INLINE bool Scheduler::save_current_thread() ALWAYS_INLINE bool Scheduler::save_current_thread()
{ {
ASSERT(m_lock.is_locked()); ASSERT(m_lock.current_processor_has_lock());
uintptr_t rsp, rip; uintptr_t rsp, rip;
push_callee_saved(); push_callee_saved();
@ -209,7 +209,7 @@ namespace Kernel
void Scheduler::execute_current_thread_locked() void Scheduler::execute_current_thread_locked()
{ {
ASSERT(m_lock.is_locked()); ASSERT(m_lock.current_processor_has_lock());
load_temp_stack(); load_temp_stack();
PageTable::kernel().load(); PageTable::kernel().load();
execute_current_thread_stack_loaded(); execute_current_thread_stack_loaded();
@ -218,12 +218,12 @@ namespace Kernel
NEVER_INLINE void Scheduler::execute_current_thread_stack_loaded() NEVER_INLINE void Scheduler::execute_current_thread_stack_loaded()
{ {
ASSERT(m_lock.is_locked()); ASSERT(m_lock.current_processor_has_lock());
#if SCHEDULER_VERIFY_STACK #if SCHEDULER_VERIFY_STACK
vaddr_t rsp; vaddr_t rsp;
read_rsp(rsp); read_rsp(rsp);
ASSERT(Processor::current().stack_bottom() <= rsp && rsp <= Processor::current().stack_top()); ASSERT(Processor::current_stack_bottom() <= rsp && rsp <= Processor::current_stack_top());
ASSERT(&PageTable::current() == &PageTable::kernel()); ASSERT(&PageTable::current() == &PageTable::kernel());
#endif #endif
@ -256,7 +256,7 @@ namespace Kernel
if (current->has_process()) if (current->has_process())
{ {
current->process().page_table().load(); current->process().page_table().load();
GDT::set_tss_stack(current->interrupt_stack_base() + current->interrupt_stack_size()); Processor::gdt().set_tss_stack(current->interrupt_stack_base() + current->interrupt_stack_size());
} }
else else
PageTable::kernel().load(); PageTable::kernel().load();
@ -281,7 +281,7 @@ namespace Kernel
void Scheduler::set_current_thread_sleeping_impl(uint64_t wake_time) void Scheduler::set_current_thread_sleeping_impl(uint64_t wake_time)
{ {
ASSERT(m_lock.is_locked()); ASSERT(m_lock.current_processor_has_lock());
if (save_current_thread()) if (save_current_thread())
return; return;
@ -307,16 +307,18 @@ namespace Kernel
void Scheduler::set_current_thread_sleeping(uint64_t wake_time) void Scheduler::set_current_thread_sleeping(uint64_t wake_time)
{ {
SpinLockGuard _(m_lock); auto state = m_lock.lock();
m_current_thread->semaphore = nullptr; m_current_thread->semaphore = nullptr;
set_current_thread_sleeping_impl(wake_time); set_current_thread_sleeping_impl(wake_time);
Processor::set_interrupt_state(state);
} }
void Scheduler::block_current_thread(Semaphore* semaphore, uint64_t wake_time) void Scheduler::block_current_thread(Semaphore* semaphore, uint64_t wake_time)
{ {
SpinLockGuard _(m_lock); auto state = m_lock.lock();
m_current_thread->semaphore = semaphore; m_current_thread->semaphore = semaphore;
set_current_thread_sleeping_impl(wake_time); set_current_thread_sleeping_impl(wake_time);
Processor::set_interrupt_state(state);
} }
void Scheduler::unblock_threads(Semaphore* semaphore) void Scheduler::unblock_threads(Semaphore* semaphore)

View File

@ -1,4 +1,5 @@
#include <kernel/FS/DevFS/FileSystem.h> #include <kernel/FS/DevFS/FileSystem.h>
#include <kernel/InterruptController.h>
#include <kernel/Storage/ATA/AHCI/Controller.h> #include <kernel/Storage/ATA/AHCI/Controller.h>
#include <kernel/Storage/ATA/ATABus.h> #include <kernel/Storage/ATA/ATABus.h>
#include <kernel/Storage/ATA/ATAController.h> #include <kernel/Storage/ATA/ATAController.h>

View File

@ -170,7 +170,7 @@ namespace Kernel
// Signal mask is inherited // Signal mask is inherited
// Setup stack for returning // Setup stack for returning
ASSERT_EQ(m_rsp % PAGE_SIZE, 0u); ASSERT(m_rsp % PAGE_SIZE == 0);
PageTable::with_fast_page(process().page_table().physical_address_of(m_rsp - PAGE_SIZE), [&] { PageTable::with_fast_page(process().page_table().physical_address_of(m_rsp - PAGE_SIZE), [&] {
uintptr_t rsp = PageTable::fast_page() + PAGE_SIZE; uintptr_t rsp = PageTable::fast_page() + PAGE_SIZE;
write_to_stack(rsp, nullptr); // alignment write_to_stack(rsp, nullptr); // alignment
@ -199,7 +199,7 @@ namespace Kernel
m_signal_pending_mask = 0; m_signal_pending_mask = 0;
m_signal_block_mask = ~0ull; m_signal_block_mask = ~0ull;
ASSERT_EQ(m_rsp % PAGE_SIZE, 0u); ASSERT(m_rsp % PAGE_SIZE == 0);
PageTable::with_fast_page(process().page_table().physical_address_of(m_rsp - PAGE_SIZE), [&] { PageTable::with_fast_page(process().page_table().physical_address_of(m_rsp - PAGE_SIZE), [&] {
uintptr_t rsp = PageTable::fast_page() + PAGE_SIZE; uintptr_t rsp = PageTable::fast_page() + PAGE_SIZE;
write_to_stack(rsp, nullptr); // alignment write_to_stack(rsp, nullptr); // alignment

View File

@ -102,16 +102,12 @@ extern "C" void kernel_main(uint32_t boot_magic, uint32_t boot_info)
parse_boot_info(boot_magic, boot_info); parse_boot_info(boot_magic, boot_info);
dprintln("boot info parsed"); dprintln("boot info parsed");
Processor::create(Processor::current_id()); Processor::create(0);
Processor::initialize();
dprintln("BSP initialized"); dprintln("BSP initialized");
GDT::initialize();
dprintln("GDT initialized");
IDT::initialize();
dprintln("IDT initialized");
PageTable::initialize(); PageTable::initialize();
PageTable::kernel().initial_load();
dprintln("PageTable initialized"); dprintln("PageTable initialized");
Heap::initialize(); Heap::initialize();
@ -129,8 +125,6 @@ extern "C" void kernel_main(uint32_t boot_magic, uint32_t boot_info)
SystemTimer::initialize(cmdline.force_pic); SystemTimer::initialize(cmdline.force_pic);
dprintln("Timers initialized"); dprintln("Timers initialized");
InterruptController::get().initialize_multiprocessor();
DevFileSystem::initialize(); DevFileSystem::initialize();
dprintln("devfs initialized"); dprintln("devfs initialized");
@ -143,6 +137,8 @@ extern "C" void kernel_main(uint32_t boot_magic, uint32_t boot_info)
if (g_terminal_driver) if (g_terminal_driver)
dprintln("Framebuffer terminal initialized"); dprintln("Framebuffer terminal initialized");
InterruptController::get().initialize_multiprocessor();
ProcFileSystem::initialize(); ProcFileSystem::initialize();
dprintln("procfs initialized"); dprintln("procfs initialized");
@ -215,8 +211,11 @@ extern "C" void ap_main()
{ {
using namespace Kernel; using namespace Kernel;
dprintln("hello from processor {}", Processor::current_id()); Processor::initialize();
PageTable::kernel().initial_load();
dprintln("ap{} initialized", Processor::current_id());
for (;;) for (;;)
asm volatile(""); asm volatile("hlt");
} }

View File

@ -30,6 +30,8 @@ set(LIBC_SOURCES
unistd.cpp unistd.cpp
math.S math.S
icxxabi.cpp icxxabi.cpp
../BAN/BAN/Assert.cpp
) )
add_custom_target(libc-headers add_custom_target(libc-headers