Kernel: Remove old GDT, IDT and MMU code from i386

It will be easier to just rewrite them
This commit is contained in:
Bananymous 2024-03-22 12:47:34 +02:00
parent 7ce0370b6a
commit 26ed689d30
4 changed files with 0 additions and 647 deletions

View File

@ -117,9 +117,6 @@ elseif("${BANAN_ARCH}" STREQUAL "i386")
set(KERNEL_SOURCES
${KERNEL_SOURCES}
arch/i386/boot.S
arch/i386/GDT.cpp
arch/i386/IDT.cpp
arch/i386/MMU.cpp
arch/i386/SpinLock.S
arch/i386/Thread.S
)

View File

@ -1,147 +0,0 @@
#include <BAN/Assert.h>
#include <kernel/GDT.h>
#include <string.h>
extern "C" uintptr_t g_boot_stack_top[0];
namespace Kernel::GDT
{
struct TaskStateSegment
{
uint16_t link;
uint16_t reserved1;
uint32_t esp0;
uint16_t ss0;
uint16_t reserved2;
uint32_t esp1;
uint16_t ss1;
uint16_t reserved3;
uint32_t esp2;
uint16_t ss2;
uint16_t reserved4;
uint32_t cr3;
uint32_t eip;
uint32_t eflags;
uint32_t eax;
uint32_t ecx;
uint32_t edx;
uint32_t ebx;
uint32_t esp;
uint32_t ebp;
uint32_t esi;
uint32_t edi;
uint16_t es;
uint16_t reserved5;
uint16_t cs;
uint16_t reserved6;
uint16_t ss;
uint16_t reserved7;
uint16_t ds;
uint16_t reserved8;
uint16_t fs;
uint16_t reserved9;
uint16_t gs;
uint16_t reserved10;
uint16_t ldtr;
uint16_t reserved11;
uint16_t reserved12;
uint16_t iopb;
uint32_t ssp;
} __attribute__((packed));
union SegmentDescriptor
{
struct
{
uint16_t limit1;
uint16_t base1;
uint8_t base2;
uint8_t access;
uint8_t limit2 : 4;
uint8_t flags : 4;
uint8_t base3;
} __attribute__((packed));
struct
{
uint32_t low;
uint32_t high;
} __attribute__((packed));
} __attribute__((packed));
struct GDTR
{
uint16_t size;
uint32_t address;
} __attribute__((packed));
static TaskStateSegment* s_tss = nullptr;
static SegmentDescriptor* s_gdt = nullptr;
static GDTR s_gdtr;
static void write_entry(uint8_t offset, uint32_t base, uint32_t limit, uint8_t access, uint8_t flags)
{
SegmentDescriptor& desc = *(SegmentDescriptor*)((uintptr_t)s_gdt + offset);
desc.base1 = base;
desc.base2 = base >> 16;
desc.base3 = base >> 24;
desc.limit1 = limit;
desc.limit2 = limit >> 16;
desc.access = access;
desc.flags = flags;
}
static void write_tss(uint8_t offset)
{
s_tss = new TaskStateSegment();
ASSERT(s_tss);
memset(s_tss, 0x00, sizeof(TaskStateSegment));
s_tss->ss0 = 0x10;
s_tss->esp0 = (uintptr_t)g_boot_stack_top;
write_entry(offset, (uint32_t)s_tss, sizeof(TaskStateSegment), 0x89, 0x0);
}
void set_tss_stack(uintptr_t esp)
{
s_tss->esp0 = esp;
}
static void flush_gdt()
{
asm volatile("lgdt %0" :: "m"(s_gdtr));
}
extern "C" void flush_tss(uint16_t offset)
{
asm volatile("ltr %0" :: "m"(offset));
}
void initialize()
{
constexpr uint32_t descriptor_count = 6;
s_gdt = new SegmentDescriptor[descriptor_count];
ASSERT(s_gdt);
s_gdtr.address = (uint64_t)s_gdt;
s_gdtr.size = descriptor_count * sizeof(SegmentDescriptor) - 1;
write_entry(0x00, 0x00000000, 0x00000, 0x00, 0x0); // null
write_entry(0x08, 0x00000000, 0xFFFFF, 0x9A, 0xC); // kernel code
write_entry(0x10, 0x00000000, 0xFFFFF, 0x92, 0xC); // kernel data
write_entry(0x18, 0x00000000, 0xFFFFF, 0xFA, 0xC); // user code
write_entry(0x20, 0x00000000, 0xFFFFF, 0xF2, 0xC); // user data
write_tss(0x28);
flush_gdt();
flush_tss(0x28);
}
}

View File

@ -1,270 +0,0 @@
#include <BAN/Errors.h>
#include <kernel/IDT.h>
#include <kernel/InterruptController.h>
#include <kernel/Memory/kmalloc.h>
#include <kernel/Panic.h>
#include <kernel/Scheduler.h>
#define INTERRUPT_HANDLER____(i, msg) \
static void interrupt ## i () \
{ \
uint32_t eax, ebx, ecx, edx; \
uint32_t esp, ebp; \
uint32_t cr0, cr2, cr3, cr4; \
asm volatile("":"=a"(eax),"=b"(ebx),"=c"(ecx),"=d"(edx)); \
asm volatile("movl %%esp, %%eax":"=a"(esp)); \
asm volatile("movl %%ebp, %%eax":"=a"(ebp)); \
asm volatile("movl %%cr0, %%eax":"=a"(cr0)); \
asm volatile("movl %%cr2, %%eax":"=a"(cr2)); \
asm volatile("movl %%cr3, %%eax":"=a"(cr3)); \
asm volatile("movl %%cr4, %%eax":"=a"(cr4)); \
Kernel::panic(msg "\r\nRegister dump\r\n" \
"eax=0x{8H}, ebx=0x{8H}, ecx=0x{8H}, edx=0x{8H}\r\n" \
"esp=0x{8H}, ebp=0x{8H}\r\n" \
"CR0=0x{8H}, CR2=0x{8H}, CR3=0x{8H}, CR4=0x{8H}\r\n", \
eax, ebx, ecx, edx, esp, ebp, cr0, cr2, cr3, cr4); \
}
#define INTERRUPT_HANDLER_ERR(i, msg) \
static void interrupt ## i () \
{ \
uint32_t eax, ebx, ecx, edx; \
uint32_t esp, ebp; \
uint32_t cr0, cr2, cr3, cr4; \
uint32_t error_code; \
asm volatile("":"=a"(eax),"=b"(ebx),"=c"(ecx),"=d"(edx)); \
asm volatile("movl %%esp, %%eax":"=a"(esp)); \
asm volatile("movl %%ebp, %%eax":"=a"(ebp)); \
asm volatile("movl %%cr0, %%eax":"=a"(cr0)); \
asm volatile("movl %%cr2, %%eax":"=a"(cr2)); \
asm volatile("movl %%cr3, %%eax":"=a"(cr3)); \
asm volatile("movl %%cr4, %%eax":"=a"(cr4)); \
asm volatile("popl %%eax":"=a"(error_code)); \
Kernel::panic(msg " (error code: 0x{8H})\r\n" \
"Register dump\r\n" \
"eax=0x{8H}, ebx=0x{8H}, ecx=0x{8H}, edx=0x{8H}\r\n" \
"esp=0x{8H}, ebp=0x{8H}\r\n" \
"CR0=0x{8H}, CR2=0x{8H}, CR3=0x{8H}, CR4=0x{8H}\r\n", \
eax, ebx, ecx, edx, esp, ebp, cr0, cr2, cr3, cr4, error_code); \
}
#define REGISTER_HANDLER(i) register_interrupt_handler(i, interrupt ## i)
namespace IDT
{
struct GateDescriptor
{
uint16_t offset1;
uint16_t selector;
uint8_t reserved : 5;
uint8_t zero1 : 3;
uint8_t type : 4;
uint8_t zero2 : 1;
uint8_t DPL : 2;
uint8_t present : 1;
uint16_t offset2;
} __attribute__((packed));
struct IDTR
{
uint16_t size;
void* offset;
} __attribute((packed));
static IDTR s_idtr;
static GateDescriptor* s_idt = nullptr;
static void(*s_irq_handlers[16])() { nullptr };
INTERRUPT_HANDLER____(0x00, "Division Error")
INTERRUPT_HANDLER____(0x01, "Debug")
INTERRUPT_HANDLER____(0x02, "Non-maskable Interrupt")
INTERRUPT_HANDLER____(0x03, "Breakpoint")
INTERRUPT_HANDLER____(0x04, "Overflow")
INTERRUPT_HANDLER____(0x05, "Bound Range Exception")
INTERRUPT_HANDLER____(0x06, "Invalid Opcode")
INTERRUPT_HANDLER____(0x07, "Device Not Available")
INTERRUPT_HANDLER_ERR(0x08, "Double Fault")
INTERRUPT_HANDLER____(0x09, "Coprocessor Segment Overrun")
INTERRUPT_HANDLER_ERR(0x0A, "Invalid TSS")
INTERRUPT_HANDLER_ERR(0x0B, "Segment Not Present")
INTERRUPT_HANDLER_ERR(0x0C, "Stack-Segment Fault")
INTERRUPT_HANDLER_ERR(0x0D, "General Protection Fault")
INTERRUPT_HANDLER_ERR(0x0E, "Page Fault")
INTERRUPT_HANDLER____(0x0F, "Unknown Exception 0x0F")
INTERRUPT_HANDLER____(0x10, "x87 Floating-Point Exception")
INTERRUPT_HANDLER_ERR(0x11, "Alignment Check")
INTERRUPT_HANDLER____(0x12, "Machine Check")
INTERRUPT_HANDLER____(0x13, "SIMD Floating-Point Exception")
INTERRUPT_HANDLER____(0x14, "Virtualization Exception")
INTERRUPT_HANDLER_ERR(0x15, "Control Protection Exception")
INTERRUPT_HANDLER____(0x16, "Unknown Exception 0x16")
INTERRUPT_HANDLER____(0x17, "Unknown Exception 0x17")
INTERRUPT_HANDLER____(0x18, "Unknown Exception 0x18")
INTERRUPT_HANDLER____(0x19, "Unknown Exception 0x19")
INTERRUPT_HANDLER____(0x1A, "Unknown Exception 0x1A")
INTERRUPT_HANDLER____(0x1B, "Unknown Exception 0x1B")
INTERRUPT_HANDLER____(0x1C, "Hypervisor Injection Exception")
INTERRUPT_HANDLER_ERR(0x1D, "VMM Communication Exception")
INTERRUPT_HANDLER_ERR(0x1E, "Security Exception")
INTERRUPT_HANDLER____(0x1F, "Unkown Exception 0x1F")
extern "C" void handle_irq()
{
uint8_t irq;
for (uint32_t i = 0; i < 16; i++)
{
if (InterruptController::get().is_in_service(i))
{
irq = i;
goto found;
}
}
dprintln("Spurious irq");
return;
found:
if (s_irq_handlers[irq])
s_irq_handlers[irq]();
else
dprintln("no handler for irq 0x{2H}\n", irq);
// NOTE: Scheduler sends PIT eoi's
if (irq != PIT_IRQ)
InterruptController::get().eoi(irq);
Kernel::Scheduler::get().reschedule_if_idling();
}
extern "C" void handle_irq_common();
asm(
".globl handle_irq_common;"
"handle_irq_common:"
"pusha;"
"pushw %ds;"
"pushw %es;"
"pushw %ss;"
"pushw %ss;"
"popw %ds;"
"popw %es;"
"call handle_irq;"
"popw %es;"
"popw %ds;"
"popa;"
"iret;"
);
extern "C" void syscall_asm();
asm(
".global syscall_asm;"
"syscall_asm:"
"pusha;"
"pushw %ds;"
"pushw %es;"
"pushw %ss;"
"pushw %ss;"
"popw %ds;"
"popw %es;"
"pushl %edx;"
"pushl %ecx;"
"pushl %ebx;"
"pushl %eax;"
"call cpp_syscall_handler;"
"addl $16, %esp;"
"popw %es;"
"popw %ds;"
// NOTE: following instructions are same as in 'popa', except we skip eax
// since it holds the return value of the syscall.
"popl %edi;"
"popl %esi;"
"popl %ebp;"
"addl $4, %esp;"
"popl %ebx;"
"popl %edx;"
"popl %ecx;"
"addl $4, %esp;"
"iret;"
);
static void flush_idt()
{
asm volatile("lidt %0"::"m"(s_idtr));
}
static void register_interrupt_handler(uint8_t index, void(*f)())
{
GateDescriptor& descriptor = s_idt[index];
descriptor.offset1 = (uint32_t)f & 0xFFFF;
descriptor.selector = 0x08;
descriptor.type = 0xE;
descriptor.DPL = 0;
descriptor.present = 1;
descriptor.offset2 = (uint32_t)f >> 16;
}
void register_irq_handler(uint8_t irq, void(*f)())
{
s_irq_handlers[irq] = f;
register_interrupt_handler(IRQ_VECTOR_BASE + irq, handle_irq_common);
flush_idt();
}
void register_syscall_handler(uint8_t offset, void(*handler)())
{
register_interrupt_handler(offset, handler);
s_idt[offset].DPL = 3;
}
void initialize()
{
constexpr size_t idt_size = 0x100 * sizeof(GateDescriptor);
s_idt = (GateDescriptor*)kmalloc(idt_size);
ASSERT(s_idt);
memset(s_idt, 0x00, idt_size);
s_idtr.offset = s_idt;
s_idtr.size = idt_size - 1;
REGISTER_HANDLER(0x00);
REGISTER_HANDLER(0x01);
REGISTER_HANDLER(0x02);
REGISTER_HANDLER(0x03);
REGISTER_HANDLER(0x04);
REGISTER_HANDLER(0x05);
REGISTER_HANDLER(0x06);
REGISTER_HANDLER(0x07);
REGISTER_HANDLER(0x08);
REGISTER_HANDLER(0x09);
REGISTER_HANDLER(0x0A);
REGISTER_HANDLER(0x0B);
REGISTER_HANDLER(0x0C);
REGISTER_HANDLER(0x0D);
REGISTER_HANDLER(0x0E);
REGISTER_HANDLER(0x0F);
REGISTER_HANDLER(0x10);
REGISTER_HANDLER(0x11);
REGISTER_HANDLER(0x12);
REGISTER_HANDLER(0x13);
REGISTER_HANDLER(0x14);
REGISTER_HANDLER(0x15);
REGISTER_HANDLER(0x16);
REGISTER_HANDLER(0x17);
REGISTER_HANDLER(0x18);
REGISTER_HANDLER(0x19);
REGISTER_HANDLER(0x1A);
REGISTER_HANDLER(0x1B);
REGISTER_HANDLER(0x1C);
REGISTER_HANDLER(0x1D);
REGISTER_HANDLER(0x1E);
REGISTER_HANDLER(0x1F);
register_syscall_handler(0x80, syscall_asm);
flush_idt();
}
}

View File

@ -1,227 +0,0 @@
#include <BAN/Errors.h>
#include <kernel/Debug.h>
#include <kernel/Memory/MMU.h>
#include <kernel/Memory/kmalloc.h>
#include <string.h>
#define MMU_DEBUG_PRINT 0
// bits 31-12 set
#define PAGE_MASK 0xfffff000
#define FLAGS_MASK 0x00000fff
namespace Kernel
{
static MMU* s_instance = nullptr;
void MMU::initialize()
{
ASSERT(s_instance == nullptr);
s_instance = new MMU();
ASSERT(s_instance);
s_instance->initialize_kernel();
s_instance->load();
}
MMU& MMU::get()
{
ASSERT(s_instance);
return *s_instance;
}
static uint64_t* allocate_page_aligned_page()
{
uint64_t* page = (uint64_t*)kmalloc(PAGE_SIZE, PAGE_SIZE);
ASSERT(page);
ASSERT(((uintptr_t)page % PAGE_SIZE) == 0);
memset(page, 0, PAGE_SIZE);
return page;
}
void MMU::initialize_kernel()
{
m_highest_paging_struct = (uint64_t*)kmalloc(sizeof(uint64_t) * 4, 32);
ASSERT(m_highest_paging_struct);
ASSERT(((uintptr_t)m_highest_paging_struct % 32) == 0);
// allocate all page directories
for (int i = 0; i < 4; i++)
{
uint64_t* page_directory = allocate_page_aligned_page();
m_highest_paging_struct[i] = (uint64_t)page_directory | Flags::Present;
}
// FIXME: We should just identity map until g_kernel_end
// create and identity map first 6 MiB
uint64_t* page_directory1 = (uint64_t*)(m_highest_paging_struct[0] & PAGE_MASK);
for (uint64_t i = 0; i < 3; i++)
{
uint64_t* page_table = allocate_page_aligned_page();
for (uint64_t j = 0; j < 512; j++)
page_table[j] = (i << 21) | (j << 12) | Flags::ReadWrite | Flags::Present;
page_directory1[i] = (uint64_t)page_table | Flags::ReadWrite | Flags::Present;
}
// dont map first page (0 -> 4 KiB) so that nullptr dereference
// causes page fault :)
uint64_t* page_table1 = (uint64_t*)(page_directory1[0] & PAGE_MASK);
page_table1[0] = 0;
}
MMU::MMU()
{
if (s_instance == nullptr)
return;
// Here we copy the s_instances paging structs since they are
// global for every process
uint64_t* global_pdpt = s_instance->m_highest_paging_struct;
uint64_t* pdpt = (uint64_t*)kmalloc(sizeof(uint64_t) * 4, 32);
ASSERT(pdpt);
for (uint32_t pdpte = 0; pdpte < 4; pdpte++)
{
if (!(global_pdpt[pdpte] & Flags::Present))
continue;
uint64_t* global_pd = (uint64_t*)(global_pdpt[pdpte] & PAGE_MASK);
uint64_t* pd = allocate_page_aligned_page();
pdpt[pdpte] = (uint64_t)pd | (global_pdpt[pdpte] & ~PAGE_MASK);
for (uint32_t pde = 0; pde < 512; pde++)
{
if (!(global_pd[pde] & Flags::Present))
continue;
uint64_t* global_pt = (uint64_t*)(global_pd[pde] & PAGE_MASK);
uint64_t* pt = allocate_page_aligned_page();
pd[pde] = (uint64_t)pt | (global_pd[pde] & ~PAGE_MASK);
memcpy(pt, global_pt, PAGE_SIZE);
}
}
m_highest_paging_struct = pdpt;
}
MMU::~MMU()
{
uint64_t* pdpt = m_highest_paging_struct;
for (uint32_t pdpte = 0; pdpte < 512; pdpte++)
{
if (!(pdpt[pdpte] & Flags::Present))
continue;
uint64_t* pd = (uint64_t*)(pdpt[pdpte] & PAGE_MASK);
for (uint32_t pde = 0; pde < 512; pde++)
{
if (!(pd[pde] & Flags::Present))
continue;
kfree((void*)(pd[pde] & PAGE_MASK));
}
kfree(pd);
}
kfree(pdpt);
}
void MMU::load()
{
asm volatile("movl %0, %%cr3" :: "r"(m_highest_paging_struct));
}
void MMU::map_page_at(paddr_t paddr, vaddr_t vaddr, uint8_t flags)
{
#if MMU_DEBUG_PRINT
dprintln("AllocatePage(0x{8H})", address);
#endif
ASSERT(flags & Flags::Present);
ASSERT(!(paddr & ~PAGE_MASK));
ASSERT(!(vaddr & ~PAGE_MASK));
uint32_t pdpte = (vaddr & 0xC0000000) >> 30;
uint32_t pde = (vaddr & 0x3FE00000) >> 21;
uint32_t pte = (vaddr & 0x001FF000) >> 12;
uint64_t* page_directory = (uint64_t*)(m_highest_paging_struct[pdpte] & PAGE_MASK);
if (!(page_directory[pde] & Flags::Present))
{
uint64_t* page_table = allocate_page_aligned_page();
page_directory[pde] = (uint64_t)page_table;
}
page_directory[pde] |= flags;
uint64_t* page_table = (uint64_t*)(page_directory[pde] & PAGE_MASK);
page_table[pte] = paddr | flags;
}
void MMU::identity_map_page(paddr_t address, uint8_t flags)
{
address &= PAGE_MASK;
map_page_at(address, address, flags);
}
void MMU::identity_map_range(paddr_t address, ptrdiff_t size, uint8_t flags)
{
paddr_t s_page = address & PAGE_MASK;
paddr_t e_page = (address + size - 1) & PAGE_MASK;
for (paddr_t page = s_page; page <= e_page; page += PAGE_SIZE)
identity_map_page(page, flags);
}
void MMU::unmap_page(vaddr_t address)
{
#if MMU_DEBUG_PRINT
dprintln("UnAllocatePage(0x{8H})", address & PAGE_MASK);
#endif
uint32_t pdpte = (address & 0xC0000000) >> 30;
uint32_t pde = (address & 0x3FE00000) >> 21;
uint32_t pte = (address & 0x001FF000) >> 12;
uint64_t* page_directory = (uint64_t*)(m_highest_paging_struct[pdpte] & PAGE_MASK);
if (!(page_directory[pde] & Flags::Present))
return;
uint64_t* page_table = (uint64_t*)(page_directory[pde] & PAGE_MASK);
if (!(page_table[pte] & Flags::Present))
return;
page_table[pte] = 0;
// TODO: Unallocate the page table if this was the only allocated page
}
void MMU::unmap_range(vaddr_t address, ptrdiff_t size)
{
uintptr_t s_page = address & PAGE_MASK;
uintptr_t e_page = (address + size - 1) & PAGE_MASK;
for (uintptr_t page = s_page; page <= e_page; page += PAGE_SIZE)
unmap_page(page);
}
uint8_t MMU::get_page_flags(vaddr_t address) const
{
uint32_t pdpte = (address & 0xC0000000) >> 30;
uint32_t pde = (address & 0x3FE00000) >> 21;
uint32_t pte = (address & 0x001FF000) >> 12;
uint64_t* page_directory = (uint64_t*)(m_highest_paging_struct[pdpte] & PAGE_MASK);
if (!(page_directory[pde] & Flags::Present))
return 0;
uint64_t* page_table = (uint64_t*)(page_directory[pde] & PAGE_MASK);
if (!(page_table[pte] & Flags::Present))
return 0;
return page_table[pte] & FLAGS_MASK;
}
}