From f5bbcc017cbc2f543a3e05aaf0a90b9db4987acb Mon Sep 17 00:00:00 2001 From: Bananymous Date: Thu, 7 Aug 2025 16:39:02 +0300 Subject: [PATCH] Kernel: Only send one smp message when reserving a range This was causing some kernel panic because processors ran out of smp message storage when reserving large areas. Also most of the time there is no need to actually send the SMP message. If process is mapping something to just its own address space, there is no need for a TLB shootdown. Maybe this should be only limited to kernel memory and threads across the same process. I'm not sure what the best approach here and it is better to send too many invalidations that too few! --- kernel/arch/i686/PageTable.cpp | 13 ++++++++++--- kernel/arch/x86_64/PageTable.cpp | 13 ++++++++++--- kernel/include/kernel/Memory/PageTable.h | 2 +- 3 files changed, 21 insertions(+), 7 deletions(-) diff --git a/kernel/arch/i686/PageTable.cpp b/kernel/arch/i686/PageTable.cpp index 9073df2a..90a35bc8 100644 --- a/kernel/arch/i686/PageTable.cpp +++ b/kernel/arch/i686/PageTable.cpp @@ -497,13 +497,13 @@ namespace Kernel return true; } - bool PageTable::reserve_page(vaddr_t vaddr, bool only_free) + bool PageTable::reserve_page(vaddr_t vaddr, bool only_free, bool send_smp_message) { SpinLockGuard _(m_lock); ASSERT(vaddr % PAGE_SIZE == 0); if (only_free && !is_page_free(vaddr)) return false; - map_page_at(0, vaddr, Flags::Reserved); + map_page_at(0, vaddr, Flags::Reserved, MemoryType::Normal, send_smp_message); return true; } @@ -517,7 +517,14 @@ namespace Kernel if (only_free && !is_range_free(vaddr, bytes)) return false; for (size_t offset = 0; offset < bytes; offset += PAGE_SIZE) - reserve_page(vaddr + offset); + reserve_page(vaddr + offset, true, false); + Processor::broadcast_smp_message({ + .type = Processor::SMPMessage::Type::FlushTLB, + .flush_tlb = { + .vaddr = vaddr, + .page_count = bytes / PAGE_SIZE, + } + }); return true; } diff --git a/kernel/arch/x86_64/PageTable.cpp b/kernel/arch/x86_64/PageTable.cpp index d844a3ab..6cd16d62 100644 --- a/kernel/arch/x86_64/PageTable.cpp +++ b/kernel/arch/x86_64/PageTable.cpp @@ -814,13 +814,13 @@ namespace Kernel return page_data & s_page_addr_mask; } - bool PageTable::reserve_page(vaddr_t vaddr, bool only_free) + bool PageTable::reserve_page(vaddr_t vaddr, bool only_free, bool send_smp_message) { SpinLockGuard _(m_lock); ASSERT(vaddr % PAGE_SIZE == 0); if (only_free && !is_page_free(vaddr)) return false; - map_page_at(0, vaddr, Flags::Reserved); + map_page_at(0, vaddr, Flags::Reserved, MemoryType::Normal, send_smp_message); return true; } @@ -834,7 +834,14 @@ namespace Kernel if (only_free && !is_range_free(vaddr, bytes)) return false; for (size_t offset = 0; offset < bytes; offset += PAGE_SIZE) - reserve_page(vaddr + offset); + reserve_page(vaddr + offset, true, false); + Processor::broadcast_smp_message({ + .type = Processor::SMPMessage::Type::FlushTLB, + .flush_tlb = { + .vaddr = vaddr, + .page_count = bytes / PAGE_SIZE, + } + }); return true; } diff --git a/kernel/include/kernel/Memory/PageTable.h b/kernel/include/kernel/Memory/PageTable.h index d6a4ca31..21f8f852 100644 --- a/kernel/include/kernel/Memory/PageTable.h +++ b/kernel/include/kernel/Memory/PageTable.h @@ -112,7 +112,7 @@ namespace Kernel bool is_page_free(vaddr_t) const; bool is_range_free(vaddr_t, size_t bytes) const; - bool reserve_page(vaddr_t, bool only_free = true); + bool reserve_page(vaddr_t, bool only_free = true, bool send_smp_message = true); bool reserve_range(vaddr_t, size_t bytes, bool only_free = true); vaddr_t reserve_free_page(vaddr_t first_address, vaddr_t last_address = UINTPTR_MAX);