Kernel: Fix all broken locks from new mutexes

This commit is contained in:
2024-02-28 22:39:02 +02:00
parent 1971813336
commit d94f6388b7
66 changed files with 681 additions and 647 deletions

View File

@@ -1,5 +1,5 @@
#include <kernel/CriticalScope.h>
#include <kernel/LockGuard.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/FileBackedRegion.h>
#include <kernel/Memory/Heap.h>
@@ -26,7 +26,7 @@ namespace Kernel
if (type == Type::SHARED)
{
LockGuard _(inode->m_lock);
LockGuard _(inode->m_mutex);
if (inode->m_shared_region.valid())
region->m_shared_data = inode->m_shared_region.lock();
else
@@ -157,7 +157,7 @@ namespace Kernel
}
else if (m_type == Type::SHARED)
{
LockGuard _(m_inode->m_lock);
LockGuard _(m_inode->m_mutex);
ASSERT(m_inode->m_shared_region.valid());
ASSERT(m_shared_data->pages.size() == BAN::Math::div_round_up<size_t>(m_inode->size(), PAGE_SIZE));
@@ -175,10 +175,9 @@ namespace Kernel
TRY(m_inode->read(offset, BAN::ByteSpan(m_shared_data->page_buffer, bytes)));
CriticalScope _;
PageTable::map_fast_page(pages[page_index]);
memcpy(PageTable::fast_page_as_ptr(), m_shared_data->page_buffer, PAGE_SIZE);
PageTable::unmap_fast_page();
PageTable::with_fast_page(pages[page_index], [&] {
memcpy(PageTable::fast_page_as_ptr(), m_shared_data->page_buffer, PAGE_SIZE);
});
}
paddr_t paddr = pages[page_index];

View File

@@ -1,5 +1,4 @@
#include <kernel/BootInfo.h>
#include <kernel/LockGuard.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/PageTable.h>
@@ -67,7 +66,7 @@ namespace Kernel
paddr_t Heap::take_free_page()
{
LockGuard _(m_lock);
SpinLockGuard _(m_lock);
for (auto& range : m_physical_ranges)
if (range.free_pages() >= 1)
return range.reserve_page();
@@ -76,7 +75,7 @@ namespace Kernel
void Heap::release_page(paddr_t paddr)
{
LockGuard _(m_lock);
SpinLockGuard _(m_lock);
for (auto& range : m_physical_ranges)
if (range.contains(paddr))
return range.release_page(paddr);
@@ -85,7 +84,7 @@ namespace Kernel
paddr_t Heap::take_free_contiguous_pages(size_t pages)
{
LockGuard _(m_lock);
SpinLockGuard _(m_lock);
for (auto& range : m_physical_ranges)
if (range.free_pages() >= pages)
if (paddr_t paddr = range.reserve_contiguous_pages(pages))
@@ -95,7 +94,7 @@ namespace Kernel
void Heap::release_contiguous_pages(paddr_t paddr, size_t pages)
{
LockGuard _(m_lock);
SpinLockGuard _(m_lock);
for (auto& range : m_physical_ranges)
if (range.contains(paddr))
return range.release_contiguous_pages(paddr, pages);
@@ -104,7 +103,7 @@ namespace Kernel
size_t Heap::used_pages() const
{
LockGuard _(m_lock);
SpinLockGuard _(m_lock);
size_t result = 0;
for (const auto& range : m_physical_ranges)
result += range.used_pages();
@@ -113,7 +112,7 @@ namespace Kernel
size_t Heap::free_pages() const
{
LockGuard _(m_lock);
SpinLockGuard _(m_lock);
size_t result = 0;
for (const auto& range : m_physical_ranges)
result += range.free_pages();

View File

@@ -1,5 +1,5 @@
#include <kernel/CriticalScope.h>
#include <kernel/LockGuard.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/MemoryBackedRegion.h>
@@ -57,15 +57,9 @@ namespace Kernel
m_page_table.map_page_at(paddr, vaddr, m_flags);
// Zero out the new page
if (&PageTable::current() == &m_page_table)
memset((void*)vaddr, 0x00, PAGE_SIZE);
else
{
CriticalScope _;
PageTable::map_fast_page(paddr);
PageTable::with_fast_page(paddr, [&] {
memset(PageTable::fast_page_as_ptr(), 0x00, PAGE_SIZE);
PageTable::unmap_fast_page();
}
});
return true;
}
@@ -100,15 +94,9 @@ namespace Kernel
TRY(allocate_page_containing(write_vaddr));
if (&PageTable::current() == &m_page_table)
memcpy((void*)write_vaddr, (void*)(buffer + written), bytes);
else
{
CriticalScope _;
PageTable::map_fast_page(m_page_table.physical_address_of(write_vaddr & PAGE_ADDR_MASK));
PageTable::with_fast_page(m_page_table.physical_address_of(write_vaddr & PAGE_ADDR_MASK), [&] {
memcpy(PageTable::fast_page_as_ptr(page_offset), (void*)(buffer + written), bytes);
PageTable::unmap_fast_page();
}
});
written += bytes;
}

View File

@@ -1,5 +1,5 @@
#include <kernel/CriticalScope.h>
#include <kernel/LockGuard.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/VirtualRange.h>
@@ -68,7 +68,7 @@ namespace Kernel
}
ASSERT(vaddr + size <= vaddr_end);
LockGuard _(page_table);
SpinLockGuard _(page_table);
page_table.unmap_range(vaddr, size); // We have to unmap here to allow reservation in create_to_vaddr()
return create_to_vaddr(page_table, vaddr, size, flags, preallocate_pages);
}
@@ -99,7 +99,7 @@ namespace Kernel
auto result = TRY(create_to_vaddr(page_table, vaddr(), size(), flags(), m_preallocated));
LockGuard _(m_page_table);
SpinLockGuard _(m_page_table);
for (size_t offset = 0; offset < size(); offset += PAGE_SIZE)
{
if (!m_preallocated && m_page_table.physical_address_of(vaddr() + offset))
@@ -110,10 +110,9 @@ namespace Kernel
result->m_page_table.map_page_at(paddr, vaddr() + offset, m_flags);
}
CriticalScope _;
PageTable::map_fast_page(result->m_page_table.physical_address_of(vaddr() + offset));
memcpy(PageTable::fast_page_as_ptr(), (void*)(vaddr() + offset), PAGE_SIZE);
PageTable::unmap_fast_page();
PageTable::with_fast_page(result->m_page_table.physical_address_of(vaddr() + offset), [&] {
memcpy(PageTable::fast_page_as_ptr(), (void*)(vaddr() + offset), PAGE_SIZE);
});
}
return result;
@@ -148,10 +147,9 @@ namespace Kernel
for (size_t offset = 0; offset < size(); offset += PAGE_SIZE)
{
CriticalScope _;
PageTable::map_fast_page(m_page_table.physical_address_of(vaddr() + offset));
memset(PageTable::fast_page_as_ptr(), 0x00, PAGE_SIZE);
PageTable::unmap_fast_page();
PageTable::with_fast_page(m_page_table.physical_address_of(vaddr() + offset), [&] {
memset(PageTable::fast_page_as_ptr(), 0x00, PAGE_SIZE);
});
}
}
@@ -176,12 +174,9 @@ namespace Kernel
while (bytes > 0)
{
{
CriticalScope _;
PageTable::map_fast_page(m_page_table.physical_address_of(vaddr() + page_index * PAGE_SIZE));
PageTable::with_fast_page(m_page_table.physical_address_of(vaddr() + page_index * PAGE_SIZE), [&] {
memcpy(PageTable::fast_page_as_ptr(page_offset), buffer, PAGE_SIZE - page_offset);
PageTable::unmap_fast_page();
}
});
buffer += PAGE_SIZE - page_offset;
bytes -= PAGE_SIZE - page_offset;