Kernel/LibC: Implement fsync

This commit is contained in:
2024-12-02 03:42:49 +02:00
parent cccb4e6d5e
commit 747c3b2a4b
21 changed files with 140 additions and 35 deletions

View File

@@ -346,6 +346,17 @@ namespace Kernel
return {};
}
BAN::ErrorOr<void> Ext2FS::sync_block(uint32_t block)
{
LockGuard _(m_mutex);
const uint32_t sector_size = m_block_device->blksize();
const uint32_t block_size = this->block_size();
const uint32_t sectors_per_block = block_size / sector_size;
return m_block_device->sync_blocks(block * sectors_per_block, sectors_per_block);
}
Ext2FS::BlockBufferWrapper Ext2FS::get_block_buffer()
{
LockGuard _(m_mutex);

View File

@@ -259,6 +259,14 @@ namespace Kernel
return {};
}
BAN::ErrorOr<void> Ext2Inode::fsync_impl()
{
for (size_t i = 0; i < max_used_data_block_count(); i++)
if (const auto fs_block = TRY(fs_block_of_data_block_index(i)); fs_block.has_value())
TRY(m_fs.sync_block(fs_block.value()));
return {};
}
BAN::ErrorOr<void> Ext2Inode::cleanup_indirect_block(uint32_t block, uint32_t depth)
{
ASSERT(block);

View File

@@ -1,5 +1,6 @@
#include <kernel/FS/Inode.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/FileBackedRegion.h>
#include <fcntl.h>
@@ -203,6 +204,15 @@ namespace Kernel
return chown_impl(uid, gid);
}
BAN::ErrorOr<void> Inode::fsync()
{
LockGuard _(m_mutex);
if (auto shared = m_shared_region.lock())
for (size_t i = 0; i < shared->pages.size(); i++)
shared->sync(i);
return fsync_impl();
}
bool Inode::can_read() const
{
LockGuard _(m_mutex);

View File

@@ -1448,6 +1448,14 @@ namespace Kernel
return 0;
}
BAN::ErrorOr<long> Process::sys_fsync(int fd)
{
LockGuard _(m_process_lock);
auto inode = TRY(m_open_file_descriptors.inode_of(fd));
TRY(inode->fsync());
return 0;
}
BAN::ErrorOr<void> Process::mount(BAN::StringView source, BAN::StringView target)
{
BAN::String absolute_source, absolute_target;

View File

@@ -32,7 +32,7 @@ namespace Kernel
for (auto& cache : m_cache)
{
if (cache.first_sector < page_cache_start)
if (cache.first_sector + sectors_per_page <= page_cache_start)
continue;
if (cache.first_sector > page_cache_start)
break;
@@ -64,7 +64,7 @@ namespace Kernel
{
auto& cache = m_cache[index];
if (cache.first_sector < page_cache_start)
if (cache.first_sector + sectors_per_page <= page_cache_start)
continue;
if (cache.first_sector > page_cache_start)
break;
@@ -104,47 +104,73 @@ namespace Kernel
return {};
}
BAN::ErrorOr<void> DiskCache::sync()
BAN::ErrorOr<void> DiskCache::sync_cache_index(size_t index)
{
if (g_disable_disk_write)
auto& cache = m_cache[index];
if (cache.dirty_mask == 0)
return {};
for (auto& cache : m_cache)
PageTable::with_fast_page(cache.paddr, [&] {
memcpy(m_sync_cache.data(), PageTable::fast_page_as_ptr(), PAGE_SIZE);
});
uint8_t sector_start = 0;
uint8_t sector_count = 0;
while (sector_start + sector_count <= PAGE_SIZE / m_sector_size)
{
if (cache.dirty_mask == 0)
continue;
PageTable::with_fast_page(cache.paddr, [&] {
memcpy(m_sync_cache.data(), PageTable::fast_page_as_ptr(), PAGE_SIZE);
});
uint8_t sector_start = 0;
uint8_t sector_count = 0;
while (sector_start + sector_count <= PAGE_SIZE / m_sector_size)
{
if (cache.dirty_mask & (1 << (sector_start + sector_count)))
sector_count++;
else if (sector_count == 0)
sector_start++;
else
{
dprintln_if(DEBUG_DISK_SYNC, "syncing {}->{}", cache.first_sector + sector_start, cache.first_sector + sector_start + sector_count);
auto data_slice = m_sync_cache.span().slice(sector_start * m_sector_size, sector_count * m_sector_size);
TRY(m_device.write_sectors_impl(cache.first_sector + sector_start, sector_count, data_slice));
sector_start += sector_count + 1;
sector_count = 0;
}
}
if (sector_count > 0)
if (cache.dirty_mask & (1 << (sector_start + sector_count)))
sector_count++;
else if (sector_count == 0)
sector_start++;
else
{
dprintln_if(DEBUG_DISK_SYNC, "syncing {}->{}", cache.first_sector + sector_start, cache.first_sector + sector_start + sector_count);
auto data_slice = m_sync_cache.span().slice(sector_start * m_sector_size, sector_count * m_sector_size);
TRY(m_device.write_sectors_impl(cache.first_sector + sector_start, sector_count, data_slice));
sector_start += sector_count + 1;
sector_count = 0;
}
}
cache.dirty_mask = 0;
if (sector_count > 0)
{
dprintln_if(DEBUG_DISK_SYNC, "syncing {}->{}", cache.first_sector + sector_start, cache.first_sector + sector_start + sector_count);
auto data_slice = m_sync_cache.span().slice(sector_start * m_sector_size, sector_count * m_sector_size);
TRY(m_device.write_sectors_impl(cache.first_sector + sector_start, sector_count, data_slice));
}
cache.dirty_mask = 0;
return {};
}
BAN::ErrorOr<void> DiskCache::sync()
{
if (g_disable_disk_write)
return {};
for (size_t i = 0; i < m_cache.size(); i++)
TRY(sync_cache_index(i));
return {};
}
BAN::ErrorOr<void> DiskCache::sync(uint64_t sector, size_t block_count)
{
if (g_disable_disk_write)
return {};
uint64_t sectors_per_page = PAGE_SIZE / m_sector_size;
uint64_t page_cache_offset = sector % sectors_per_page;
uint64_t page_cache_start = sector - page_cache_offset;
for (size_t i = 0; i < m_cache.size(); i++)
{
auto& cache = m_cache[i];
if (cache.first_sector + sectors_per_page <= page_cache_start)
continue;
if (cache.first_sector * sectors_per_page >= page_cache_start * sectors_per_page + block_count)
break;
TRY(sync_cache_index(i));
}
return {};

View File

@@ -48,6 +48,15 @@ namespace Kernel
return {};
}
BAN::ErrorOr<void> Partition::sync_blocks(uint64_t block, size_t block_count)
{
const uint32_t blocks_in_partition = m_last_block - m_first_block + 1;
if (block + block_count > blocks_in_partition)
return BAN::Error::from_error_code(ErrorCode::Storage_Boundaries);
TRY(m_device->sync_blocks(m_first_block + block, block_count));
return {};
}
BAN::ErrorOr<size_t> Partition::read_impl(off_t offset, BAN::ByteSpan buffer)
{
ASSERT(offset >= 0);

View File

@@ -286,6 +286,13 @@ namespace Kernel
return {};
}
BAN::ErrorOr<void> StorageDevice::sync_blocks(uint64_t block, size_t block_count)
{
if (!m_disk_cache.has_value())
return {};
return m_disk_cache->sync(block, block_count);
}
BAN::ErrorOr<size_t> StorageDevice::read_impl(off_t offset, BAN::ByteSpan buffer)
{
if (offset % sector_size())