Kernel: Fix all broken locks from new mutexes

This commit is contained in:
2024-02-28 22:39:02 +02:00
parent 1971813336
commit d94f6388b7
66 changed files with 681 additions and 647 deletions

View File

@@ -2,7 +2,7 @@
#include <kernel/IDT.h>
#include <kernel/InterruptController.h>
#include <kernel/IO.h>
#include <kernel/LockGuard.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Storage/ATA/ATABus.h>
#include <kernel/Storage/ATA/ATADefinitions.h>
#include <kernel/Storage/ATA/ATADevice.h>
@@ -261,7 +261,7 @@ namespace Kernel
if (lba + sector_count > device.sector_count())
return BAN::Error::from_error_code(ErrorCode::Storage_Boundaries);
LockGuard _(m_lock);
LockGuard _(m_mutex);
if (lba < (1 << 28))
{
@@ -298,7 +298,7 @@ namespace Kernel
if (lba + sector_count > device.sector_count())
return BAN::Error::from_error_code(ErrorCode::Storage_Boundaries);
LockGuard _(m_lock);
LockGuard _(m_mutex);
if (lba < (1 << 28))
{

View File

@@ -1,5 +1,5 @@
#include <kernel/CriticalScope.h>
#include <kernel/LockGuard.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/PageTable.h>
#include <kernel/Storage/DiskCache.h>
@@ -32,10 +32,6 @@ namespace Kernel
uint64_t page_cache_offset = sector % sectors_per_page;
uint64_t page_cache_start = sector - page_cache_offset;
PageTable& page_table = PageTable::current();
LockGuard page_table_locker(page_table);
ASSERT(page_table.is_page_free(0));
for (auto& cache : m_cache)
{
if (cache.first_sector < page_cache_start)
@@ -46,10 +42,9 @@ namespace Kernel
if (!(cache.sector_mask & (1 << page_cache_offset)))
continue;
CriticalScope _;
PageTable::map_fast_page(cache.paddr);
memcpy(buffer.data(), PageTable::fast_page_as_ptr(page_cache_offset * m_sector_size), m_sector_size);
PageTable::unmap_fast_page();
PageTable::with_fast_page(cache.paddr, [&] {
memcpy(buffer.data(), PageTable::fast_page_as_ptr(page_cache_offset * m_sector_size), m_sector_size);
});
return true;
}
@@ -64,10 +59,6 @@ namespace Kernel
uint64_t page_cache_offset = sector % sectors_per_page;
uint64_t page_cache_start = sector - page_cache_offset;
PageTable& page_table = PageTable::current();
LockGuard page_table_locker(page_table);
ASSERT(page_table.is_page_free(0));
size_t index = 0;
// Search the cache if the have this sector in memory
@@ -80,12 +71,9 @@ namespace Kernel
if (cache.first_sector > page_cache_start)
break;
{
CriticalScope _;
PageTable::map_fast_page(cache.paddr);
PageTable::with_fast_page(cache.paddr, [&] {
memcpy(PageTable::fast_page_as_ptr(page_cache_offset * m_sector_size), buffer.data(), m_sector_size);
PageTable::unmap_fast_page();
}
});
cache.sector_mask |= 1 << page_cache_offset;
if (dirty)
@@ -111,12 +99,9 @@ namespace Kernel
return ret.error();
}
{
CriticalScope _;
PageTable::map_fast_page(cache.paddr);
PageTable::with_fast_page(cache.paddr, [&] {
memcpy(PageTable::fast_page_as_ptr(page_cache_offset * m_sector_size), buffer.data(), m_sector_size);
PageTable::unmap_fast_page();
}
});
return {};
}
@@ -128,12 +113,9 @@ namespace Kernel
if (cache.dirty_mask == 0)
continue;
{
CriticalScope _;
PageTable::map_fast_page(cache.paddr);
PageTable::with_fast_page(cache.paddr, [&] {
memcpy(m_sync_cache.data(), PageTable::fast_page_as_ptr(), PAGE_SIZE);
PageTable::unmap_fast_page();
}
});
uint8_t sector_start = 0;
uint8_t sector_count = 0;

View File

@@ -1,4 +1,4 @@
#include <kernel/LockGuard.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Scheduler.h>
#include <kernel/Storage/NVMe/Queue.h>
#include <kernel/Timer/Timer.h>
@@ -44,7 +44,7 @@ namespace Kernel
uint16_t NVMeQueue::submit_command(NVMe::SubmissionQueueEntry& sqe)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
ASSERT(m_done == false);
m_status = 0;

View File

@@ -4,7 +4,7 @@
#include <BAN/UTF8.h>
#include <kernel/FS/DevFS/FileSystem.h>
#include <kernel/FS/VirtualFileSystem.h>
#include <kernel/LockGuard.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/PCI.h>
#include <kernel/Storage/StorageDevice.h>
#include <kernel/Thread.h>
@@ -150,7 +150,7 @@ namespace Kernel
BAN::Vector<uint8_t> lba1;
TRY(lba1.resize(sector_size()));
TRY(read_sectors(1, 1, lba1.span()));
TRY(read_sectors(1, 1, BAN::ByteSpan { lba1.span() }));
const GPTHeader& header = *(const GPTHeader*)lba1.data();
if (!is_valid_gpt_header(header, sector_size()))
@@ -165,7 +165,7 @@ namespace Kernel
BAN::Vector<uint8_t> entry_array;
TRY(entry_array.resize(size));
TRY(read_sectors(header.partition_entry_lba, size / sector_size(), entry_array.span()));
TRY(read_sectors(header.partition_entry_lba, size / sector_size(), BAN::ByteSpan { entry_array.span() }));
if (!is_valid_gpt_crc32(header, lba1, entry_array))
return BAN::Error::from_error_code(ErrorCode::Storage_GPTHeader);
@@ -207,14 +207,14 @@ namespace Kernel
void StorageDevice::add_disk_cache()
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
ASSERT(!m_disk_cache.has_value());
m_disk_cache.emplace(sector_size(), *this);
}
BAN::ErrorOr<void> StorageDevice::sync_disk_cache()
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
if (m_disk_cache.has_value())
TRY(m_disk_cache->sync());
return {};
@@ -225,14 +225,14 @@ namespace Kernel
ASSERT(buffer.size() >= sector_count * sector_size());
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
if (!m_disk_cache.has_value())
return read_sectors_impl(lba, sector_count, buffer);
}
for (uint64_t offset = 0; offset < sector_count; offset++)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
auto sector_buffer = buffer.slice(offset * sector_size(), sector_size());
if (m_disk_cache->read_from_cache(lba + offset, sector_buffer))
continue;
@@ -248,14 +248,14 @@ namespace Kernel
ASSERT(buffer.size() >= sector_count * sector_size());
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
if (!m_disk_cache.has_value())
return write_sectors_impl(lba, sector_count, buffer);
}
for (uint8_t offset = 0; offset < sector_count; offset++)
{
LockGuard _(m_lock);
LockGuard _(m_mutex);
auto sector_buffer = buffer.slice(offset * sector_size(), sector_size());
if (m_disk_cache->write_to_cache(lba + offset, sector_buffer, true).is_error())
TRY(write_sectors_impl(lba + offset, 1, sector_buffer));