From 34e84f8b078e1736627508a0dcea2d13e65fcc62 Mon Sep 17 00:00:00 2001 From: Bananymous Date: Fri, 16 Jan 2026 16:22:29 +0200 Subject: [PATCH] Kernel: Reduce the number of TLB invalidations Invalidations are not done if mapping or unmapping previously unmapped page. TLB invalidate IPIs are now ignored if they don't affect the currently mapped address space --- kernel/arch/i686/PageTable.cpp | 26 +++++++++++++++++------- kernel/arch/x86_64/PageTable.cpp | 25 ++++++++++++++++------- kernel/include/kernel/Memory/PageTable.h | 2 +- kernel/include/kernel/Processor.h | 1 + kernel/kernel/Processor.cpp | 2 ++ 5 files changed, 41 insertions(+), 15 deletions(-) diff --git a/kernel/arch/i686/PageTable.cpp b/kernel/arch/i686/PageTable.cpp index 90a35bc8..55f1485d 100644 --- a/kernel/arch/i686/PageTable.cpp +++ b/kernel/arch/i686/PageTable.cpp @@ -221,7 +221,7 @@ namespace Kernel ASSERT(!(pt[pte] & Flags::Present)); pt[pte] = paddr | Flags::ReadWrite | Flags::Present; - invalidate(fast_page(), false); + asm volatile("invlpg (%0)" :: "r"(fast_page()) : "memory"); } void PageTable::unmap_fast_page() @@ -241,7 +241,7 @@ namespace Kernel ASSERT(pt[pte] & Flags::Present); pt[pte] = 0; - invalidate(fast_page(), false); + asm volatile("invlpg (%0)" :: "r"(fast_page()) : "memory"); } BAN::ErrorOr PageTable::create_userspace() @@ -314,7 +314,8 @@ namespace Kernel .type = Processor::SMPMessage::Type::FlushTLB, .flush_tlb = { .vaddr = vaddr, - .page_count = 1 + .page_count = 1, + .page_table = vaddr < KERNEL_OFFSET ? this : nullptr, } }); } @@ -343,8 +344,12 @@ namespace Kernel uint64_t* pd = reinterpret_cast(P2V(pdpt[pdpte] & PAGE_ADDR_MASK)); uint64_t* pt = reinterpret_cast(P2V(pd[pde] & PAGE_ADDR_MASK)); + const paddr_t old_paddr = pt[pte] & PAGE_ADDR_MASK; + pt[pte] = 0; - invalidate(vaddr, send_smp_message); + + if (old_paddr != 0) + invalidate(vaddr, send_smp_message); } void PageTable::unmap_range(vaddr_t vaddr, size_t size) @@ -361,7 +366,8 @@ namespace Kernel .type = Processor::SMPMessage::Type::FlushTLB, .flush_tlb = { .vaddr = vaddr, - .page_count = page_count + .page_count = page_count, + .page_table = vaddr < KERNEL_OFFSET ? this : nullptr, } }); } @@ -417,9 +423,13 @@ namespace Kernel uwr_flags &= ~Flags::Present; uint64_t* pt = reinterpret_cast(P2V(pd[pde] & PAGE_ADDR_MASK)); + + const paddr_t old_paddr = pt[pte] & PAGE_ADDR_MASK; + pt[pte] = paddr | uwr_flags | extra_flags; - invalidate(vaddr, send_smp_message); + if (old_paddr != 0) + invalidate(vaddr, send_smp_message); } void PageTable::map_range_at(paddr_t paddr, vaddr_t vaddr, size_t size, flags_t flags, MemoryType memory_type) @@ -438,7 +448,8 @@ namespace Kernel .type = Processor::SMPMessage::Type::FlushTLB, .flush_tlb = { .vaddr = vaddr, - .page_count = page_count + .page_count = page_count, + .page_table = vaddr < KERNEL_OFFSET ? this : nullptr, } }); } @@ -523,6 +534,7 @@ namespace Kernel .flush_tlb = { .vaddr = vaddr, .page_count = bytes / PAGE_SIZE, + .page_table = vaddr < KERNEL_OFFSET ? this : nullptr, } }); return true; diff --git a/kernel/arch/x86_64/PageTable.cpp b/kernel/arch/x86_64/PageTable.cpp index 6cd16d62..11ee3923 100644 --- a/kernel/arch/x86_64/PageTable.cpp +++ b/kernel/arch/x86_64/PageTable.cpp @@ -521,7 +521,7 @@ namespace Kernel ASSERT(!(pt[pte] & Flags::Present)); pt[pte] = paddr | Flags::ReadWrite | Flags::Present; - invalidate(fast_page(), false); + asm volatile("invlpg (%0)" :: "r"(fast_page()) : "memory"); } void PageTable::unmap_fast_page() @@ -544,7 +544,7 @@ namespace Kernel ASSERT(pt[pte] & Flags::Present); pt[pte] = 0; - invalidate(fast_page(), false); + asm volatile("invlpg (%0)" :: "r"(fast_page()) : "memory"); } BAN::ErrorOr PageTable::create_userspace() @@ -623,7 +623,8 @@ namespace Kernel .type = Processor::SMPMessage::Type::FlushTLB, .flush_tlb = { .vaddr = vaddr, - .page_count = 1 + .page_count = 1, + .page_table = vaddr < KERNEL_OFFSET ? this : nullptr, } }); } @@ -658,8 +659,12 @@ namespace Kernel uint64_t* pd = P2V(pdpt[pdpte] & s_page_addr_mask); uint64_t* pt = P2V(pd[pde] & s_page_addr_mask); + const paddr_t old_paddr = pt[pte] & PAGE_ADDR_MASK; + pt[pte] = 0; - invalidate(vaddr, send_smp_message); + + if (old_paddr != 0) + invalidate(vaddr, send_smp_message); } void PageTable::unmap_range(vaddr_t vaddr, size_t size) @@ -676,7 +681,8 @@ namespace Kernel .type = Processor::SMPMessage::Type::FlushTLB, .flush_tlb = { .vaddr = vaddr, - .page_count = page_count + .page_count = page_count, + .page_table = vaddr < KERNEL_OFFSET ? this : nullptr, } }); } @@ -742,9 +748,12 @@ namespace Kernel if (!(flags & Flags::Present)) uwr_flags &= ~Flags::Present; + const paddr_t old_paddr = pt[pte] & PAGE_ADDR_MASK; + pt[pte] = paddr | uwr_flags | extra_flags; - invalidate(vaddr, send_smp_message); + if (old_paddr != 0) + invalidate(vaddr, send_smp_message); } void PageTable::map_range_at(paddr_t paddr, vaddr_t vaddr, size_t size, flags_t flags, MemoryType memory_type) @@ -765,7 +774,8 @@ namespace Kernel .type = Processor::SMPMessage::Type::FlushTLB, .flush_tlb = { .vaddr = vaddr, - .page_count = page_count + .page_count = page_count, + .page_table = vaddr < KERNEL_OFFSET ? this : nullptr, } }); } @@ -840,6 +850,7 @@ namespace Kernel .flush_tlb = { .vaddr = vaddr, .page_count = bytes / PAGE_SIZE, + .page_table = vaddr < KERNEL_OFFSET ? this : nullptr, } }); return true; diff --git a/kernel/include/kernel/Memory/PageTable.h b/kernel/include/kernel/Memory/PageTable.h index 21f8f852..c485b8a7 100644 --- a/kernel/include/kernel/Memory/PageTable.h +++ b/kernel/include/kernel/Memory/PageTable.h @@ -133,7 +133,7 @@ namespace Kernel void map_kernel_memory(); void prepare_fast_page(); - static void invalidate(vaddr_t, bool send_smp_message); + void invalidate(vaddr_t, bool send_smp_message); static void map_fast_page(paddr_t); static void unmap_fast_page(); diff --git a/kernel/include/kernel/Processor.h b/kernel/include/kernel/Processor.h index d7ea6d16..62a6d835 100644 --- a/kernel/include/kernel/Processor.h +++ b/kernel/include/kernel/Processor.h @@ -46,6 +46,7 @@ namespace Kernel { uintptr_t vaddr; size_t page_count; + void* page_table; } flush_tlb; SchedulerQueue::Node* new_thread; SchedulerQueue::Node* unblock_thread; diff --git a/kernel/kernel/Processor.cpp b/kernel/kernel/Processor.cpp index d5e27a37..243917ac 100644 --- a/kernel/kernel/Processor.cpp +++ b/kernel/kernel/Processor.cpp @@ -370,6 +370,8 @@ namespace Kernel switch (message->type) { case SMPMessage::Type::FlushTLB: + if (message->flush_tlb.page_table && message->flush_tlb.page_table != processor.m_current_page_table) + break; for (size_t i = 0; i < message->flush_tlb.page_count; i++) asm volatile("invlpg (%0)" :: "r"(message->flush_tlb.vaddr + i * PAGE_SIZE) : "memory"); break;