Kernel: Reduce the number of TLB invalidations

Invalidations are not done if mapping or unmapping previously unmapped
page. TLB invalidate IPIs are now ignored if they don't affect the
currently mapped address space
This commit is contained in:
Bananymous 2026-01-16 16:22:29 +02:00
parent 1143dc3cae
commit 34e84f8b07
5 changed files with 41 additions and 15 deletions

View File

@ -221,7 +221,7 @@ namespace Kernel
ASSERT(!(pt[pte] & Flags::Present));
pt[pte] = paddr | Flags::ReadWrite | Flags::Present;
invalidate(fast_page(), false);
asm volatile("invlpg (%0)" :: "r"(fast_page()) : "memory");
}
void PageTable::unmap_fast_page()
@ -241,7 +241,7 @@ namespace Kernel
ASSERT(pt[pte] & Flags::Present);
pt[pte] = 0;
invalidate(fast_page(), false);
asm volatile("invlpg (%0)" :: "r"(fast_page()) : "memory");
}
BAN::ErrorOr<PageTable*> PageTable::create_userspace()
@ -314,7 +314,8 @@ namespace Kernel
.type = Processor::SMPMessage::Type::FlushTLB,
.flush_tlb = {
.vaddr = vaddr,
.page_count = 1
.page_count = 1,
.page_table = vaddr < KERNEL_OFFSET ? this : nullptr,
}
});
}
@ -343,7 +344,11 @@ namespace Kernel
uint64_t* pd = reinterpret_cast<uint64_t*>(P2V(pdpt[pdpte] & PAGE_ADDR_MASK));
uint64_t* pt = reinterpret_cast<uint64_t*>(P2V(pd[pde] & PAGE_ADDR_MASK));
const paddr_t old_paddr = pt[pte] & PAGE_ADDR_MASK;
pt[pte] = 0;
if (old_paddr != 0)
invalidate(vaddr, send_smp_message);
}
@ -361,7 +366,8 @@ namespace Kernel
.type = Processor::SMPMessage::Type::FlushTLB,
.flush_tlb = {
.vaddr = vaddr,
.page_count = page_count
.page_count = page_count,
.page_table = vaddr < KERNEL_OFFSET ? this : nullptr,
}
});
}
@ -417,8 +423,12 @@ namespace Kernel
uwr_flags &= ~Flags::Present;
uint64_t* pt = reinterpret_cast<uint64_t*>(P2V(pd[pde] & PAGE_ADDR_MASK));
const paddr_t old_paddr = pt[pte] & PAGE_ADDR_MASK;
pt[pte] = paddr | uwr_flags | extra_flags;
if (old_paddr != 0)
invalidate(vaddr, send_smp_message);
}
@ -438,7 +448,8 @@ namespace Kernel
.type = Processor::SMPMessage::Type::FlushTLB,
.flush_tlb = {
.vaddr = vaddr,
.page_count = page_count
.page_count = page_count,
.page_table = vaddr < KERNEL_OFFSET ? this : nullptr,
}
});
}
@ -523,6 +534,7 @@ namespace Kernel
.flush_tlb = {
.vaddr = vaddr,
.page_count = bytes / PAGE_SIZE,
.page_table = vaddr < KERNEL_OFFSET ? this : nullptr,
}
});
return true;

View File

@ -521,7 +521,7 @@ namespace Kernel
ASSERT(!(pt[pte] & Flags::Present));
pt[pte] = paddr | Flags::ReadWrite | Flags::Present;
invalidate(fast_page(), false);
asm volatile("invlpg (%0)" :: "r"(fast_page()) : "memory");
}
void PageTable::unmap_fast_page()
@ -544,7 +544,7 @@ namespace Kernel
ASSERT(pt[pte] & Flags::Present);
pt[pte] = 0;
invalidate(fast_page(), false);
asm volatile("invlpg (%0)" :: "r"(fast_page()) : "memory");
}
BAN::ErrorOr<PageTable*> PageTable::create_userspace()
@ -623,7 +623,8 @@ namespace Kernel
.type = Processor::SMPMessage::Type::FlushTLB,
.flush_tlb = {
.vaddr = vaddr,
.page_count = 1
.page_count = 1,
.page_table = vaddr < KERNEL_OFFSET ? this : nullptr,
}
});
}
@ -658,7 +659,11 @@ namespace Kernel
uint64_t* pd = P2V(pdpt[pdpte] & s_page_addr_mask);
uint64_t* pt = P2V(pd[pde] & s_page_addr_mask);
const paddr_t old_paddr = pt[pte] & PAGE_ADDR_MASK;
pt[pte] = 0;
if (old_paddr != 0)
invalidate(vaddr, send_smp_message);
}
@ -676,7 +681,8 @@ namespace Kernel
.type = Processor::SMPMessage::Type::FlushTLB,
.flush_tlb = {
.vaddr = vaddr,
.page_count = page_count
.page_count = page_count,
.page_table = vaddr < KERNEL_OFFSET ? this : nullptr,
}
});
}
@ -742,8 +748,11 @@ namespace Kernel
if (!(flags & Flags::Present))
uwr_flags &= ~Flags::Present;
const paddr_t old_paddr = pt[pte] & PAGE_ADDR_MASK;
pt[pte] = paddr | uwr_flags | extra_flags;
if (old_paddr != 0)
invalidate(vaddr, send_smp_message);
}
@ -765,7 +774,8 @@ namespace Kernel
.type = Processor::SMPMessage::Type::FlushTLB,
.flush_tlb = {
.vaddr = vaddr,
.page_count = page_count
.page_count = page_count,
.page_table = vaddr < KERNEL_OFFSET ? this : nullptr,
}
});
}
@ -840,6 +850,7 @@ namespace Kernel
.flush_tlb = {
.vaddr = vaddr,
.page_count = bytes / PAGE_SIZE,
.page_table = vaddr < KERNEL_OFFSET ? this : nullptr,
}
});
return true;

View File

@ -133,7 +133,7 @@ namespace Kernel
void map_kernel_memory();
void prepare_fast_page();
static void invalidate(vaddr_t, bool send_smp_message);
void invalidate(vaddr_t, bool send_smp_message);
static void map_fast_page(paddr_t);
static void unmap_fast_page();

View File

@ -46,6 +46,7 @@ namespace Kernel
{
uintptr_t vaddr;
size_t page_count;
void* page_table;
} flush_tlb;
SchedulerQueue::Node* new_thread;
SchedulerQueue::Node* unblock_thread;

View File

@ -370,6 +370,8 @@ namespace Kernel
switch (message->type)
{
case SMPMessage::Type::FlushTLB:
if (message->flush_tlb.page_table && message->flush_tlb.page_table != processor.m_current_page_table)
break;
for (size_t i = 0; i < message->flush_tlb.page_count; i++)
asm volatile("invlpg (%0)" :: "r"(message->flush_tlb.vaddr + i * PAGE_SIZE) : "memory");
break;