Kernel: Rewrite the whole scheduler and re-architecture SMP handling

Change Semaphore -> ThreadBlocker
  This was not a semaphore, I just named it one because I didn't know
  what semaphore was. I have meant to change this sooner, but it was in
  no way urgent :D

Implement SMP events. Processors can now be sent SMP events through
IPIs. SMP events can be sent either to a single processor or broadcasted
to every processor.

PageTable::{map_page,map_range,unmap_page,unmap_range}() now send SMP
event to invalidate TLB caches for the changed pages.

Scheduler no longer uses a global run queue. Each processor has its own
scheduler that keeps track of the load on the processor. Once every
second schedulers do load balancing. Schedulers have no access to other
processors' schedulers, they just see approximate loads. If scheduler
decides that it has too much load, it will send a thread to another
processor through a SMP event.

Schedulers are currently run using the timer interrupt on BSB. This
should be not the case, and each processor should use its LAPIC timer
for interrupts. There is no reason to broadcast SMP event to all
processors when BSB gets timer interrupt.

Old scheduler only achieved 20% idle load on qemu. That was probably a
very inefficient implementation. This new scheduler seems to average
around 1% idle load. This is much closer to what I would expect. On my
own laptop idle load seems to be only around 0.5% on each processor.
This commit is contained in:
2024-07-22 00:33:50 +03:00
parent 9f90eeab05
commit f8261c60c0
60 changed files with 1559 additions and 715 deletions

View File

@@ -78,7 +78,6 @@ namespace Kernel
ASSERT(s_kernel);
s_kernel->initialize_kernel();
s_kernel->initial_load();
}
void PageTable::initial_load()
@@ -237,7 +236,7 @@ namespace Kernel
ASSERT(!(pt[pte] & Flags::Present));
pt[pte] = paddr | Flags::ReadWrite | Flags::Present;
invalidate(fast_page());
invalidate(fast_page(), false);
}
void PageTable::unmap_fast_page()
@@ -260,7 +259,7 @@ namespace Kernel
ASSERT(pt[pte] & Flags::Present);
pt[pte] = 0;
invalidate(fast_page());
invalidate(fast_page(), false);
}
BAN::ErrorOr<PageTable*> PageTable::create_userspace()
@@ -322,13 +321,24 @@ namespace Kernel
Processor::set_current_page_table(this);
}
void PageTable::invalidate(vaddr_t vaddr)
void PageTable::invalidate(vaddr_t vaddr, bool send_smp_message)
{
ASSERT(vaddr % PAGE_SIZE == 0);
asm volatile("invlpg (%0)" :: "r"(vaddr) : "memory");
if (send_smp_message)
{
Processor::broadcast_smp_message({
.type = Processor::SMPMessage::Type::FlushTLB,
.flush_tlb = {
.vaddr = vaddr,
.page_count = 1
}
});
}
}
void PageTable::unmap_page(vaddr_t vaddr)
void PageTable::unmap_page(vaddr_t vaddr, bool send_smp_message)
{
ASSERT(vaddr);
ASSERT(vaddr != fast_page());
@@ -350,10 +360,7 @@ namespace Kernel
SpinLockGuard _(m_lock);
if (is_page_free(vaddr))
{
dwarnln("unmapping unmapped page {8H}", vaddr);
return;
}
Kernel::panic("trying to unmap unmapped page 0x{H}", vaddr);
uint64_t* pml4 = (uint64_t*)P2V(m_highest_paging_struct);
uint64_t* pdpt = (uint64_t*)P2V(pml4[pml4e] & PAGE_ADDR_MASK);
@@ -361,20 +368,29 @@ namespace Kernel
uint64_t* pt = (uint64_t*)P2V(pd[pde] & PAGE_ADDR_MASK);
pt[pte] = 0;
invalidate(vaddr);
invalidate(vaddr, send_smp_message);
}
void PageTable::unmap_range(vaddr_t vaddr, size_t size)
{
vaddr_t s_page = vaddr / PAGE_SIZE;
vaddr_t e_page = BAN::Math::div_round_up<vaddr_t>(vaddr + size, PAGE_SIZE);
ASSERT(vaddr % PAGE_SIZE == 0);
size_t page_count = range_page_count(vaddr, size);
SpinLockGuard _(m_lock);
for (vaddr_t page = s_page; page < e_page; page++)
unmap_page(page * PAGE_SIZE);
for (vaddr_t page = 0; page < page_count; page++)
unmap_page(vaddr + page * PAGE_SIZE, false);
Processor::broadcast_smp_message({
.type = Processor::SMPMessage::Type::FlushTLB,
.flush_tlb = {
.vaddr = vaddr,
.page_count = page_count
}
});
}
void PageTable::map_page_at(paddr_t paddr, vaddr_t vaddr, flags_t flags, MemoryType memory_type)
void PageTable::map_page_at(paddr_t paddr, vaddr_t vaddr, flags_t flags, MemoryType memory_type, bool send_smp_message)
{
ASSERT(vaddr);
ASSERT(vaddr != fast_page());
@@ -441,7 +457,7 @@ namespace Kernel
uint64_t* pt = (uint64_t*)P2V(pd[pde] & PAGE_ADDR_MASK);
pt[pte] = paddr | uwr_flags | extra_flags;
invalidate(vaddr);
invalidate(vaddr, send_smp_message);
}
void PageTable::map_range_at(paddr_t paddr, vaddr_t vaddr, size_t size, flags_t flags, MemoryType memory_type)
@@ -456,7 +472,15 @@ namespace Kernel
SpinLockGuard _(m_lock);
for (size_t page = 0; page < page_count; page++)
map_page_at(paddr + page * PAGE_SIZE, vaddr + page * PAGE_SIZE, flags, memory_type);
map_page_at(paddr + page * PAGE_SIZE, vaddr + page * PAGE_SIZE, flags, memory_type, false);
Processor::broadcast_smp_message({
.type = Processor::SMPMessage::Type::FlushTLB,
.flush_tlb = {
.vaddr = vaddr,
.page_count = page_count
}
});
}
uint64_t PageTable::get_page_data(vaddr_t vaddr) const

View File

@@ -70,6 +70,13 @@ asm_yield_handler:
popaq
iretq
.global asm_ipi_handler
asm_ipi_handler:
pushaq
call cpp_ipi_handler
popaq
iretq
.macro isr n
.global isr\n
isr\n:
@@ -158,4 +165,3 @@ irq 28
irq 29
irq 30
irq 31
irq 32