Kernel: Reduce the number of TLB invalidations
Invalidations are not done if mapping or unmapping previously unmapped page. TLB invalidate IPIs are now ignored if they don't affect the currently mapped address space
This commit is contained in:
parent
1143dc3cae
commit
34e84f8b07
|
|
@ -221,7 +221,7 @@ namespace Kernel
|
||||||
ASSERT(!(pt[pte] & Flags::Present));
|
ASSERT(!(pt[pte] & Flags::Present));
|
||||||
pt[pte] = paddr | Flags::ReadWrite | Flags::Present;
|
pt[pte] = paddr | Flags::ReadWrite | Flags::Present;
|
||||||
|
|
||||||
invalidate(fast_page(), false);
|
asm volatile("invlpg (%0)" :: "r"(fast_page()) : "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
void PageTable::unmap_fast_page()
|
void PageTable::unmap_fast_page()
|
||||||
|
|
@ -241,7 +241,7 @@ namespace Kernel
|
||||||
ASSERT(pt[pte] & Flags::Present);
|
ASSERT(pt[pte] & Flags::Present);
|
||||||
pt[pte] = 0;
|
pt[pte] = 0;
|
||||||
|
|
||||||
invalidate(fast_page(), false);
|
asm volatile("invlpg (%0)" :: "r"(fast_page()) : "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
BAN::ErrorOr<PageTable*> PageTable::create_userspace()
|
BAN::ErrorOr<PageTable*> PageTable::create_userspace()
|
||||||
|
|
@ -314,7 +314,8 @@ namespace Kernel
|
||||||
.type = Processor::SMPMessage::Type::FlushTLB,
|
.type = Processor::SMPMessage::Type::FlushTLB,
|
||||||
.flush_tlb = {
|
.flush_tlb = {
|
||||||
.vaddr = vaddr,
|
.vaddr = vaddr,
|
||||||
.page_count = 1
|
.page_count = 1,
|
||||||
|
.page_table = vaddr < KERNEL_OFFSET ? this : nullptr,
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
@ -343,8 +344,12 @@ namespace Kernel
|
||||||
uint64_t* pd = reinterpret_cast<uint64_t*>(P2V(pdpt[pdpte] & PAGE_ADDR_MASK));
|
uint64_t* pd = reinterpret_cast<uint64_t*>(P2V(pdpt[pdpte] & PAGE_ADDR_MASK));
|
||||||
uint64_t* pt = reinterpret_cast<uint64_t*>(P2V(pd[pde] & PAGE_ADDR_MASK));
|
uint64_t* pt = reinterpret_cast<uint64_t*>(P2V(pd[pde] & PAGE_ADDR_MASK));
|
||||||
|
|
||||||
|
const paddr_t old_paddr = pt[pte] & PAGE_ADDR_MASK;
|
||||||
|
|
||||||
pt[pte] = 0;
|
pt[pte] = 0;
|
||||||
invalidate(vaddr, send_smp_message);
|
|
||||||
|
if (old_paddr != 0)
|
||||||
|
invalidate(vaddr, send_smp_message);
|
||||||
}
|
}
|
||||||
|
|
||||||
void PageTable::unmap_range(vaddr_t vaddr, size_t size)
|
void PageTable::unmap_range(vaddr_t vaddr, size_t size)
|
||||||
|
|
@ -361,7 +366,8 @@ namespace Kernel
|
||||||
.type = Processor::SMPMessage::Type::FlushTLB,
|
.type = Processor::SMPMessage::Type::FlushTLB,
|
||||||
.flush_tlb = {
|
.flush_tlb = {
|
||||||
.vaddr = vaddr,
|
.vaddr = vaddr,
|
||||||
.page_count = page_count
|
.page_count = page_count,
|
||||||
|
.page_table = vaddr < KERNEL_OFFSET ? this : nullptr,
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
@ -417,9 +423,13 @@ namespace Kernel
|
||||||
uwr_flags &= ~Flags::Present;
|
uwr_flags &= ~Flags::Present;
|
||||||
|
|
||||||
uint64_t* pt = reinterpret_cast<uint64_t*>(P2V(pd[pde] & PAGE_ADDR_MASK));
|
uint64_t* pt = reinterpret_cast<uint64_t*>(P2V(pd[pde] & PAGE_ADDR_MASK));
|
||||||
|
|
||||||
|
const paddr_t old_paddr = pt[pte] & PAGE_ADDR_MASK;
|
||||||
|
|
||||||
pt[pte] = paddr | uwr_flags | extra_flags;
|
pt[pte] = paddr | uwr_flags | extra_flags;
|
||||||
|
|
||||||
invalidate(vaddr, send_smp_message);
|
if (old_paddr != 0)
|
||||||
|
invalidate(vaddr, send_smp_message);
|
||||||
}
|
}
|
||||||
|
|
||||||
void PageTable::map_range_at(paddr_t paddr, vaddr_t vaddr, size_t size, flags_t flags, MemoryType memory_type)
|
void PageTable::map_range_at(paddr_t paddr, vaddr_t vaddr, size_t size, flags_t flags, MemoryType memory_type)
|
||||||
|
|
@ -438,7 +448,8 @@ namespace Kernel
|
||||||
.type = Processor::SMPMessage::Type::FlushTLB,
|
.type = Processor::SMPMessage::Type::FlushTLB,
|
||||||
.flush_tlb = {
|
.flush_tlb = {
|
||||||
.vaddr = vaddr,
|
.vaddr = vaddr,
|
||||||
.page_count = page_count
|
.page_count = page_count,
|
||||||
|
.page_table = vaddr < KERNEL_OFFSET ? this : nullptr,
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
@ -523,6 +534,7 @@ namespace Kernel
|
||||||
.flush_tlb = {
|
.flush_tlb = {
|
||||||
.vaddr = vaddr,
|
.vaddr = vaddr,
|
||||||
.page_count = bytes / PAGE_SIZE,
|
.page_count = bytes / PAGE_SIZE,
|
||||||
|
.page_table = vaddr < KERNEL_OFFSET ? this : nullptr,
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
return true;
|
return true;
|
||||||
|
|
|
||||||
|
|
@ -521,7 +521,7 @@ namespace Kernel
|
||||||
ASSERT(!(pt[pte] & Flags::Present));
|
ASSERT(!(pt[pte] & Flags::Present));
|
||||||
pt[pte] = paddr | Flags::ReadWrite | Flags::Present;
|
pt[pte] = paddr | Flags::ReadWrite | Flags::Present;
|
||||||
|
|
||||||
invalidate(fast_page(), false);
|
asm volatile("invlpg (%0)" :: "r"(fast_page()) : "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
void PageTable::unmap_fast_page()
|
void PageTable::unmap_fast_page()
|
||||||
|
|
@ -544,7 +544,7 @@ namespace Kernel
|
||||||
ASSERT(pt[pte] & Flags::Present);
|
ASSERT(pt[pte] & Flags::Present);
|
||||||
pt[pte] = 0;
|
pt[pte] = 0;
|
||||||
|
|
||||||
invalidate(fast_page(), false);
|
asm volatile("invlpg (%0)" :: "r"(fast_page()) : "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
BAN::ErrorOr<PageTable*> PageTable::create_userspace()
|
BAN::ErrorOr<PageTable*> PageTable::create_userspace()
|
||||||
|
|
@ -623,7 +623,8 @@ namespace Kernel
|
||||||
.type = Processor::SMPMessage::Type::FlushTLB,
|
.type = Processor::SMPMessage::Type::FlushTLB,
|
||||||
.flush_tlb = {
|
.flush_tlb = {
|
||||||
.vaddr = vaddr,
|
.vaddr = vaddr,
|
||||||
.page_count = 1
|
.page_count = 1,
|
||||||
|
.page_table = vaddr < KERNEL_OFFSET ? this : nullptr,
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
@ -658,8 +659,12 @@ namespace Kernel
|
||||||
uint64_t* pd = P2V(pdpt[pdpte] & s_page_addr_mask);
|
uint64_t* pd = P2V(pdpt[pdpte] & s_page_addr_mask);
|
||||||
uint64_t* pt = P2V(pd[pde] & s_page_addr_mask);
|
uint64_t* pt = P2V(pd[pde] & s_page_addr_mask);
|
||||||
|
|
||||||
|
const paddr_t old_paddr = pt[pte] & PAGE_ADDR_MASK;
|
||||||
|
|
||||||
pt[pte] = 0;
|
pt[pte] = 0;
|
||||||
invalidate(vaddr, send_smp_message);
|
|
||||||
|
if (old_paddr != 0)
|
||||||
|
invalidate(vaddr, send_smp_message);
|
||||||
}
|
}
|
||||||
|
|
||||||
void PageTable::unmap_range(vaddr_t vaddr, size_t size)
|
void PageTable::unmap_range(vaddr_t vaddr, size_t size)
|
||||||
|
|
@ -676,7 +681,8 @@ namespace Kernel
|
||||||
.type = Processor::SMPMessage::Type::FlushTLB,
|
.type = Processor::SMPMessage::Type::FlushTLB,
|
||||||
.flush_tlb = {
|
.flush_tlb = {
|
||||||
.vaddr = vaddr,
|
.vaddr = vaddr,
|
||||||
.page_count = page_count
|
.page_count = page_count,
|
||||||
|
.page_table = vaddr < KERNEL_OFFSET ? this : nullptr,
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
@ -742,9 +748,12 @@ namespace Kernel
|
||||||
if (!(flags & Flags::Present))
|
if (!(flags & Flags::Present))
|
||||||
uwr_flags &= ~Flags::Present;
|
uwr_flags &= ~Flags::Present;
|
||||||
|
|
||||||
|
const paddr_t old_paddr = pt[pte] & PAGE_ADDR_MASK;
|
||||||
|
|
||||||
pt[pte] = paddr | uwr_flags | extra_flags;
|
pt[pte] = paddr | uwr_flags | extra_flags;
|
||||||
|
|
||||||
invalidate(vaddr, send_smp_message);
|
if (old_paddr != 0)
|
||||||
|
invalidate(vaddr, send_smp_message);
|
||||||
}
|
}
|
||||||
|
|
||||||
void PageTable::map_range_at(paddr_t paddr, vaddr_t vaddr, size_t size, flags_t flags, MemoryType memory_type)
|
void PageTable::map_range_at(paddr_t paddr, vaddr_t vaddr, size_t size, flags_t flags, MemoryType memory_type)
|
||||||
|
|
@ -765,7 +774,8 @@ namespace Kernel
|
||||||
.type = Processor::SMPMessage::Type::FlushTLB,
|
.type = Processor::SMPMessage::Type::FlushTLB,
|
||||||
.flush_tlb = {
|
.flush_tlb = {
|
||||||
.vaddr = vaddr,
|
.vaddr = vaddr,
|
||||||
.page_count = page_count
|
.page_count = page_count,
|
||||||
|
.page_table = vaddr < KERNEL_OFFSET ? this : nullptr,
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
@ -840,6 +850,7 @@ namespace Kernel
|
||||||
.flush_tlb = {
|
.flush_tlb = {
|
||||||
.vaddr = vaddr,
|
.vaddr = vaddr,
|
||||||
.page_count = bytes / PAGE_SIZE,
|
.page_count = bytes / PAGE_SIZE,
|
||||||
|
.page_table = vaddr < KERNEL_OFFSET ? this : nullptr,
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
return true;
|
return true;
|
||||||
|
|
|
||||||
|
|
@ -133,7 +133,7 @@ namespace Kernel
|
||||||
void map_kernel_memory();
|
void map_kernel_memory();
|
||||||
void prepare_fast_page();
|
void prepare_fast_page();
|
||||||
|
|
||||||
static void invalidate(vaddr_t, bool send_smp_message);
|
void invalidate(vaddr_t, bool send_smp_message);
|
||||||
|
|
||||||
static void map_fast_page(paddr_t);
|
static void map_fast_page(paddr_t);
|
||||||
static void unmap_fast_page();
|
static void unmap_fast_page();
|
||||||
|
|
|
||||||
|
|
@ -46,6 +46,7 @@ namespace Kernel
|
||||||
{
|
{
|
||||||
uintptr_t vaddr;
|
uintptr_t vaddr;
|
||||||
size_t page_count;
|
size_t page_count;
|
||||||
|
void* page_table;
|
||||||
} flush_tlb;
|
} flush_tlb;
|
||||||
SchedulerQueue::Node* new_thread;
|
SchedulerQueue::Node* new_thread;
|
||||||
SchedulerQueue::Node* unblock_thread;
|
SchedulerQueue::Node* unblock_thread;
|
||||||
|
|
|
||||||
|
|
@ -370,6 +370,8 @@ namespace Kernel
|
||||||
switch (message->type)
|
switch (message->type)
|
||||||
{
|
{
|
||||||
case SMPMessage::Type::FlushTLB:
|
case SMPMessage::Type::FlushTLB:
|
||||||
|
if (message->flush_tlb.page_table && message->flush_tlb.page_table != processor.m_current_page_table)
|
||||||
|
break;
|
||||||
for (size_t i = 0; i < message->flush_tlb.page_count; i++)
|
for (size_t i = 0; i < message->flush_tlb.page_count; i++)
|
||||||
asm volatile("invlpg (%0)" :: "r"(message->flush_tlb.vaddr + i * PAGE_SIZE) : "memory");
|
asm volatile("invlpg (%0)" :: "r"(message->flush_tlb.vaddr + i * PAGE_SIZE) : "memory");
|
||||||
break;
|
break;
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue