Kernel: Rework storage device and disk cache locking
Syncing the disk cache no longer blocks the underlying storage device and the disk cache itself during sync
This commit is contained in:
parent
912c5ea0bf
commit
08bfa0971e
|
|
@ -3,6 +3,7 @@
|
|||
#include <BAN/Array.h>
|
||||
#include <BAN/ByteSpan.h>
|
||||
#include <BAN/Vector.h>
|
||||
#include <kernel/Lock/RWLock.h>
|
||||
#include <kernel/Memory/Types.h>
|
||||
|
||||
namespace Kernel
|
||||
|
|
@ -37,9 +38,13 @@ namespace Kernel
|
|||
uint64_t first_sector { 0 };
|
||||
uint8_t sector_mask { 0 };
|
||||
uint8_t dirty_mask { 0 };
|
||||
bool syncing { false };
|
||||
};
|
||||
|
||||
private:
|
||||
RWLock m_rw_lock;
|
||||
Mutex m_sync_mutex;
|
||||
|
||||
const size_t m_sector_size;
|
||||
StorageDevice& m_device;
|
||||
BAN::Vector<PageCache> m_cache;
|
||||
|
|
|
|||
|
|
@ -51,7 +51,6 @@ namespace Kernel
|
|||
virtual bool has_hungup_impl() const override { return false; }
|
||||
|
||||
private:
|
||||
Mutex m_mutex;
|
||||
BAN::Optional<DiskCache> m_disk_cache;
|
||||
BAN::Vector<BAN::RefPtr<Partition>> m_partitions;
|
||||
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
#include <BAN/ScopeGuard.h>
|
||||
#include <kernel/BootInfo.h>
|
||||
#include <kernel/Memory/Heap.h>
|
||||
#include <kernel/Memory/PageTable.h>
|
||||
|
|
@ -53,6 +54,8 @@ namespace Kernel
|
|||
const uint64_t page_cache_offset = sector % sectors_per_page;
|
||||
const uint64_t page_cache_start = sector - page_cache_offset;
|
||||
|
||||
RWLockRDGuard _(m_rw_lock);
|
||||
|
||||
const auto index = find_sector_cache_index(sector);
|
||||
if (index >= m_cache.size())
|
||||
return false;
|
||||
|
|
@ -78,6 +81,8 @@ namespace Kernel
|
|||
const uint64_t page_cache_offset = sector % sectors_per_page;
|
||||
const uint64_t page_cache_start = sector - page_cache_offset;
|
||||
|
||||
RWLockWRGuard _(m_rw_lock);
|
||||
|
||||
const auto index = find_sector_cache_index(sector);
|
||||
|
||||
if (index >= m_cache.size() || m_cache[index].first_sector != page_cache_start)
|
||||
|
|
@ -115,12 +120,35 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<void> DiskCache::sync_cache_index(size_t index)
|
||||
{
|
||||
auto& cache = m_cache[index];
|
||||
if (cache.dirty_mask == 0)
|
||||
return {};
|
||||
LockGuard _(m_sync_mutex);
|
||||
|
||||
PageTable::with_fast_page(cache.paddr, [&] {
|
||||
memcpy(m_sync_cache.data(), PageTable::fast_page_as_ptr(), PAGE_SIZE);
|
||||
PageCache temp_cache;
|
||||
|
||||
{
|
||||
RWLockWRGuard _(m_rw_lock);
|
||||
|
||||
if (index >= m_cache.size())
|
||||
return {};
|
||||
auto& cache = m_cache[index];
|
||||
if (cache.dirty_mask == 0)
|
||||
return {};
|
||||
|
||||
PageTable::with_fast_page(cache.paddr, [&] {
|
||||
memcpy(m_sync_cache.data(), PageTable::fast_page_as_ptr(), PAGE_SIZE);
|
||||
});
|
||||
|
||||
temp_cache = cache;
|
||||
cache.dirty_mask = 0;
|
||||
cache.syncing = true;
|
||||
}
|
||||
|
||||
// restores dirty mask if write to disk fails
|
||||
BAN::ScopeGuard dirty_guard([&] {
|
||||
RWLockWRGuard _(m_rw_lock);
|
||||
const auto new_index = find_sector_cache_index(temp_cache.first_sector);
|
||||
ASSERT(new_index < m_cache.size() && m_cache[new_index].first_sector == temp_cache.first_sector);
|
||||
m_cache[new_index].dirty_mask |= temp_cache.dirty_mask;
|
||||
m_cache[new_index].syncing = false;
|
||||
});
|
||||
|
||||
uint8_t sector_start = 0;
|
||||
|
|
@ -128,15 +156,16 @@ namespace Kernel
|
|||
|
||||
while (sector_start + sector_count <= PAGE_SIZE / m_sector_size)
|
||||
{
|
||||
if (cache.dirty_mask & (1 << (sector_start + sector_count)))
|
||||
if (temp_cache.dirty_mask & (1 << (sector_start + sector_count)))
|
||||
sector_count++;
|
||||
else if (sector_count == 0)
|
||||
sector_start++;
|
||||
else
|
||||
{
|
||||
dprintln_if(DEBUG_DISK_SYNC, "syncing {}->{}", cache.first_sector + sector_start, cache.first_sector + sector_start + sector_count);
|
||||
dprintln_if(DEBUG_DISK_SYNC, "syncing {}->{}", temp_cache.first_sector + sector_start, temp_cache.first_sector + sector_start + sector_count);
|
||||
auto data_slice = m_sync_cache.span().slice(sector_start * m_sector_size, sector_count * m_sector_size);
|
||||
TRY(m_device.write_sectors_impl(cache.first_sector + sector_start, sector_count, data_slice));
|
||||
TRY(m_device.write_sectors_impl(temp_cache.first_sector + sector_start, sector_count, data_slice));
|
||||
temp_cache.dirty_mask &= ~(((1 << sector_count) - 1) << sector_start);
|
||||
sector_start += sector_count + 1;
|
||||
sector_count = 0;
|
||||
}
|
||||
|
|
@ -144,13 +173,12 @@ namespace Kernel
|
|||
|
||||
if (sector_count > 0)
|
||||
{
|
||||
dprintln_if(DEBUG_DISK_SYNC, "syncing {}->{}", cache.first_sector + sector_start, cache.first_sector + sector_start + sector_count);
|
||||
dprintln_if(DEBUG_DISK_SYNC, "syncing {}->{}", temp_cache.first_sector + sector_start, temp_cache.first_sector + sector_start + sector_count);
|
||||
auto data_slice = m_sync_cache.span().slice(sector_start * m_sector_size, sector_count * m_sector_size);
|
||||
TRY(m_device.write_sectors_impl(cache.first_sector + sector_start, sector_count, data_slice));
|
||||
TRY(m_device.write_sectors_impl(temp_cache.first_sector + sector_start, sector_count, data_slice));
|
||||
temp_cache.dirty_mask &= ~(((1 << sector_count) - 1) << sector_start);
|
||||
}
|
||||
|
||||
cache.dirty_mask = 0;
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
|
|
@ -168,17 +196,17 @@ namespace Kernel
|
|||
if (g_disable_disk_write)
|
||||
return {};
|
||||
|
||||
const uint64_t sectors_per_page = PAGE_SIZE / m_sector_size;
|
||||
const uint64_t page_cache_offset = sector % sectors_per_page;
|
||||
const uint64_t page_cache_start = sector - page_cache_offset;
|
||||
|
||||
m_rw_lock.rd_lock();
|
||||
for (size_t i = find_sector_cache_index(sector); i < m_cache.size(); i++)
|
||||
{
|
||||
auto& cache = m_cache[i];
|
||||
if (cache.first_sector * sectors_per_page >= page_cache_start * sectors_per_page + block_count)
|
||||
if (cache.first_sector >= sector + block_count)
|
||||
break;
|
||||
m_rw_lock.rd_unlock();
|
||||
TRY(sync_cache_index(i));
|
||||
m_rw_lock.rd_lock();
|
||||
}
|
||||
m_rw_lock.rd_unlock();
|
||||
|
||||
return {};
|
||||
}
|
||||
|
|
@ -188,10 +216,12 @@ namespace Kernel
|
|||
// NOTE: There might not actually be page_count pages after this
|
||||
// function returns. The synchronization must be done elsewhere.
|
||||
|
||||
RWLockWRGuard _(m_rw_lock);
|
||||
|
||||
size_t released = 0;
|
||||
for (size_t i = 0; i < m_cache.size() && released < page_count;)
|
||||
{
|
||||
if (m_cache[i].dirty_mask == 0)
|
||||
if (!m_cache[i].syncing && m_cache[i].dirty_mask == 0)
|
||||
{
|
||||
Heap::get().release_page(m_cache[i].paddr);
|
||||
m_cache.remove(i);
|
||||
|
|
|
|||
|
|
@ -211,14 +211,12 @@ namespace Kernel
|
|||
|
||||
void StorageDevice::add_disk_cache()
|
||||
{
|
||||
LockGuard _(m_mutex);
|
||||
ASSERT(!m_disk_cache.has_value());
|
||||
m_disk_cache.emplace(sector_size(), *this);
|
||||
}
|
||||
|
||||
size_t StorageDevice::drop_disk_cache()
|
||||
{
|
||||
LockGuard _(m_mutex);
|
||||
if (m_disk_cache.has_value())
|
||||
return m_disk_cache->release_pages(-1);
|
||||
return 0;
|
||||
|
|
@ -226,7 +224,6 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<void> StorageDevice::sync_disk_cache()
|
||||
{
|
||||
LockGuard _(m_mutex);
|
||||
if (m_disk_cache.has_value())
|
||||
TRY(m_disk_cache->sync());
|
||||
return {};
|
||||
|
|
@ -236,8 +233,6 @@ namespace Kernel
|
|||
{
|
||||
ASSERT(buffer.size() >= sector_count * sector_size());
|
||||
|
||||
LockGuard _(m_mutex);
|
||||
|
||||
if (!m_disk_cache.has_value())
|
||||
return read_sectors_impl(lba, sector_count, buffer);
|
||||
|
||||
|
|
@ -275,8 +270,6 @@ namespace Kernel
|
|||
{
|
||||
ASSERT(buffer.size() >= sector_count * sector_size());
|
||||
|
||||
LockGuard _(m_mutex);
|
||||
|
||||
if (g_disable_disk_write)
|
||||
{
|
||||
if (!m_disk_cache.has_value())
|
||||
|
|
|
|||
Loading…
Reference in New Issue