Kernel: Remove CriticalScopes from memory handing code

This commit is contained in:
Bananymous 2024-02-29 11:19:10 +02:00
parent 682de62c57
commit ec0cb5fd54
6 changed files with 21 additions and 18 deletions

View File

@ -17,7 +17,7 @@ extern uint8_t g_userspace_end[];
namespace Kernel
{
SpinLock PageTable::s_fast_page_lock;
RecursiveSpinLock PageTable::s_fast_page_lock;
static PageTable* s_kernel = nullptr;
static PageTable* s_current = nullptr;
@ -209,7 +209,8 @@ namespace Kernel
{
ASSERT(s_kernel);
ASSERT_NEQ(paddr, 0);
ASSERT(!interrupts_enabled());
SpinLockGuard _(s_fast_page_lock);
constexpr vaddr_t uc_vaddr = uncanonicalize(fast_page());
constexpr uint64_t pml4e = (uc_vaddr >> 39) & 0x1FF;
@ -231,7 +232,8 @@ namespace Kernel
void PageTable::unmap_fast_page()
{
ASSERT(s_kernel);
ASSERT(!interrupts_enabled());
SpinLockGuard _(s_fast_page_lock);
constexpr vaddr_t uc_vaddr = uncanonicalize(fast_page());
constexpr uint64_t pml4e = (uc_vaddr >> 39) & 0x1FF;

View File

@ -2,7 +2,6 @@
#include <BAN/Errors.h>
#include <BAN/Traits.h>
#include <kernel/CriticalScope.h>
#include <kernel/Lock/SpinLock.h>
#include <kernel/Memory/Types.h>
@ -130,7 +129,7 @@ namespace Kernel
private:
paddr_t m_highest_paging_struct { 0 };
mutable RecursiveSpinLock m_lock;
static SpinLock s_fast_page_lock;
static RecursiveSpinLock s_fast_page_lock;
};
static constexpr size_t range_page_count(vaddr_t start, size_t bytes)

View File

@ -1,4 +1,3 @@
#include <kernel/CriticalScope.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/FileBackedRegion.h>
#include <kernel/Memory/Heap.h>
@ -83,12 +82,9 @@ namespace Kernel
if (pages[page_index] == 0)
return;
{
CriticalScope _;
PageTable::with_fast_page(pages[page_index], [&] {
memcpy(page_buffer, PageTable::fast_page_as_ptr(), PAGE_SIZE);
});
}
PageTable::with_fast_page(pages[page_index], [&] {
memcpy(page_buffer, PageTable::fast_page_as_ptr(), PAGE_SIZE);
});
if (auto ret = inode->write(page_index * PAGE_SIZE, BAN::ConstByteSpan::from(page_buffer)); ret.is_error())
dwarnln("{}", ret.error());

View File

@ -1,4 +1,3 @@
#include <kernel/CriticalScope.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/MemoryBackedRegion.h>

View File

@ -1,4 +1,3 @@
#include <kernel/CriticalScope.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/VirtualRange.h>

View File

@ -1,5 +1,4 @@
#include <BAN/Errors.h>
#include <kernel/CriticalScope.h>
#include <kernel/kprint.h>
#include <kernel/Memory/kmalloc.h>
@ -81,6 +80,8 @@ struct kmalloc_info
};
static kmalloc_info s_kmalloc_info;
static Kernel::SpinLock s_kmalloc_lock;
template<size_t SIZE>
struct kmalloc_fixed_node
{
@ -144,6 +145,8 @@ void kmalloc_initialize()
void kmalloc_dump_info()
{
Kernel::SpinLockGuard _(s_kmalloc_lock);
kprintln("kmalloc: 0x{8H}->0x{8H}", s_kmalloc_info.base, s_kmalloc_info.end);
kprintln(" used: 0x{8H}", s_kmalloc_info.used);
kprintln(" free: 0x{8H}", s_kmalloc_info.free);
@ -155,6 +158,7 @@ void kmalloc_dump_info()
static bool is_corrupted()
{
Kernel::SpinLockGuard _(s_kmalloc_lock);
auto& info = s_kmalloc_info;
auto* temp = info.first();
for (; temp->end() <= info.end; temp = temp->after());
@ -163,6 +167,8 @@ static bool is_corrupted()
[[maybe_unused]] static void debug_dump()
{
Kernel::SpinLockGuard _(s_kmalloc_lock);
auto& info = s_kmalloc_info;
uint32_t used = 0;
@ -181,6 +187,8 @@ static bool is_corrupted()
static void* kmalloc_fixed()
{
Kernel::SpinLockGuard _(s_kmalloc_lock);
auto& info = s_kmalloc_fixed_info;
if (!info.free_list_head)
@ -223,6 +231,8 @@ static void* kmalloc_impl(size_t size, size_t align)
ASSERT(align % s_kmalloc_min_align == 0);
ASSERT(size % s_kmalloc_min_align == 0);
Kernel::SpinLockGuard _(s_kmalloc_lock);
auto& info = s_kmalloc_info;
for (auto* node = info.first(); node->end() <= info.end; node = node->after())
@ -304,8 +314,6 @@ void* kmalloc(size_t size, size_t align, bool force_identity_map)
align = s_kmalloc_min_align;
ASSERT(align <= PAGE_SIZE);
Kernel::CriticalScope critical;
if (size == 0 || size >= info.size)
goto no_memory;
@ -338,7 +346,7 @@ void kfree(void* address)
uintptr_t address_uint = (uintptr_t)address;
ASSERT(address_uint % s_kmalloc_min_align == 0);
Kernel::CriticalScope critical;
Kernel::SpinLockGuard _(s_kmalloc_lock);
if (s_kmalloc_fixed_info.base <= address_uint && address_uint < s_kmalloc_fixed_info.end)
{