From 107b092982773470631455dfa7f1c9d42679dbe5 Mon Sep 17 00:00:00 2001 From: Bananymous Date: Mon, 30 Jun 2025 16:03:31 +0300 Subject: [PATCH] Kernel: Allow arbitrary sized tmpfs files The 2 block limit started to get annoying :D --- kernel/include/kernel/FS/TmpFS/Inode.h | 7 +- kernel/kernel/FS/TmpFS/Inode.cpp | 153 ++++++++++++++++++++++--- 2 files changed, 146 insertions(+), 14 deletions(-) diff --git a/kernel/include/kernel/FS/TmpFS/Inode.h b/kernel/include/kernel/FS/TmpFS/Inode.h index 402ee28e..5c4bb69f 100644 --- a/kernel/include/kernel/FS/TmpFS/Inode.h +++ b/kernel/include/kernel/FS/TmpFS/Inode.h @@ -53,11 +53,16 @@ namespace Kernel virtual BAN::ErrorOr fsync_impl() override { return {}; } void sync(); - void free_all_blocks(); virtual BAN::ErrorOr prepare_unlink() { return {}; }; + void free_all_blocks(); + void free_indirect_blocks(size_t block, uint32_t depth); + BAN::Optional block_index(size_t data_block_index); + BAN::Optional block_index_from_indirect(size_t block, size_t index, uint32_t depth); + BAN::ErrorOr block_index_with_allocation(size_t data_block_index); + BAN::ErrorOr block_index_from_indirect_with_allocation(size_t& block, size_t index, uint32_t depth); protected: TmpFileSystem& m_fs; diff --git a/kernel/kernel/FS/TmpFS/Inode.cpp b/kernel/kernel/FS/TmpFS/Inode.cpp index 2ba940c6..b8b3f44f 100644 --- a/kernel/kernel/FS/TmpFS/Inode.cpp +++ b/kernel/kernel/FS/TmpFS/Inode.cpp @@ -124,36 +124,160 @@ namespace Kernel void TmpInode::free_all_blocks() { for (size_t i = 0; i < TmpInodeInfo::direct_block_count; i++) - { if (m_inode_info.block[i]) m_fs.free_block(m_inode_info.block[i]); - m_inode_info.block[i] = 0; + if (size_t block = m_inode_info.block[TmpInodeInfo::direct_block_count + 0]) + free_indirect_blocks(block, 1); + if (size_t block = m_inode_info.block[TmpInodeInfo::direct_block_count + 1]) + free_indirect_blocks(block, 2); + if (size_t block = m_inode_info.block[TmpInodeInfo::direct_block_count + 2]) + free_indirect_blocks(block, 3); + for (auto& block : m_inode_info.block) + block = 0; + } + + void TmpInode::free_indirect_blocks(size_t block, uint32_t depth) + { + ASSERT(block != 0); + + if (depth == 0) + { + m_fs.free_block(block); + return; } - for (auto block : m_inode_info.block) - ASSERT(block == 0); + + const size_t indices_per_block = blksize() / sizeof(size_t); + for (size_t index = 0; index < indices_per_block; index++) + { + size_t next_block; + m_fs.with_block_buffer(block, [&](BAN::ByteSpan block_buffer) { + next_block = block_buffer.as_span()[index]; + }); + + if (next_block == 0) + continue; + + free_indirect_blocks(next_block, depth - 1); + } + + m_fs.free_block(block); } BAN::Optional TmpInode::block_index(size_t data_block_index) { - ASSERT(data_block_index < TmpInodeInfo::direct_block_count); - if (m_inode_info.block[data_block_index]) + if (data_block_index < TmpInodeInfo::direct_block_count) + { + if (m_inode_info.block[data_block_index] == 0) + return {}; return m_inode_info.block[data_block_index]; - return {}; + } + data_block_index -= TmpInodeInfo::direct_block_count; + + const size_t indices_per_block = blksize() / sizeof(size_t); + + if (data_block_index < indices_per_block) + return block_index_from_indirect(m_inode_info.block[TmpInodeInfo::direct_block_count + 0], data_block_index, 1); + data_block_index -= indices_per_block; + + if (data_block_index < indices_per_block * indices_per_block) + return block_index_from_indirect(m_inode_info.block[TmpInodeInfo::direct_block_count + 1], data_block_index, 2); + data_block_index -= indices_per_block * indices_per_block; + + if (data_block_index < indices_per_block * indices_per_block * indices_per_block) + return block_index_from_indirect(m_inode_info.block[TmpInodeInfo::direct_block_count + 2], data_block_index, 3); + + ASSERT_NOT_REACHED(); + } + + BAN::Optional TmpInode::block_index_from_indirect(size_t block, size_t index, uint32_t depth) + { + if (block == 0) + return {}; + ASSERT(depth >= 1); + + const size_t indices_per_block = blksize() / sizeof(size_t); + + size_t divisor = 1; + for (size_t i = 1; i < depth; i++) + divisor *= indices_per_block; + + size_t next_block; + m_fs.with_block_buffer(block, [&](BAN::ByteSpan block_buffer) { + next_block = block_buffer.as_span()[(index / divisor) % indices_per_block]; + }); + + if (next_block == 0) + return {}; + + if (depth == 1) + return next_block; + + return block_index_from_indirect(next_block, index, depth - 1); } BAN::ErrorOr TmpInode::block_index_with_allocation(size_t data_block_index) { - if (data_block_index >= TmpInodeInfo::direct_block_count) + if (data_block_index < TmpInodeInfo::direct_block_count) { - dprintln("only {} blocks supported :D", TmpInodeInfo::direct_block_count); - return BAN::Error::from_errno(ENOSPC); + if (m_inode_info.block[data_block_index] == 0) + { + m_inode_info.block[data_block_index] = TRY(m_fs.allocate_block()); + m_inode_info.blocks++; + } + return m_inode_info.block[data_block_index]; } - if (m_inode_info.block[data_block_index] == 0) + data_block_index -= TmpInodeInfo::direct_block_count; + + const size_t indices_per_block = blksize() / sizeof(size_t); + + if (data_block_index < indices_per_block) + return block_index_from_indirect_with_allocation(m_inode_info.block[TmpInodeInfo::direct_block_count + 0], data_block_index, 1); + data_block_index -= indices_per_block; + + if (data_block_index < indices_per_block * indices_per_block) + return block_index_from_indirect_with_allocation(m_inode_info.block[TmpInodeInfo::direct_block_count + 1], data_block_index, 2); + data_block_index -= indices_per_block * indices_per_block; + + if (data_block_index < indices_per_block * indices_per_block * indices_per_block) + return block_index_from_indirect_with_allocation(m_inode_info.block[TmpInodeInfo::direct_block_count + 2], data_block_index, 3); + + ASSERT_NOT_REACHED(); + } + + BAN::ErrorOr TmpInode::block_index_from_indirect_with_allocation(size_t& block, size_t index, uint32_t depth) + { + if (block == 0) { - m_inode_info.block[data_block_index] = TRY(m_fs.allocate_block()); + block = TRY(m_fs.allocate_block()); m_inode_info.blocks++; } - return m_inode_info.block[data_block_index]; + ASSERT(depth >= 1); + + const size_t indices_per_block = blksize() / sizeof(size_t); + + size_t divisor = 1; + for (size_t i = 1; i < depth; i++) + divisor *= indices_per_block; + + size_t next_block; + m_fs.with_block_buffer(block, [&](BAN::ByteSpan block_buffer) { + next_block = block_buffer.as_span()[(index / divisor) % indices_per_block]; + }); + + if (next_block == 0) + { + next_block = TRY(m_fs.allocate_block()); + m_inode_info.blocks++; + + m_fs.with_block_buffer(block, [&](BAN::ByteSpan block_buffer) { + block_buffer.as_span()[(index / divisor) % indices_per_block] = next_block; + }); + } + + if (depth == 1) + return next_block; + + return block_index_from_indirect_with_allocation(next_block, index, depth - 1); } /* FILE INODE */ @@ -241,6 +365,9 @@ namespace Kernel BAN::ErrorOr TmpFileInode::truncate_impl(size_t new_size) { + // FIXME: if size is decreased, we should probably free + // unused blocks + m_inode_info.size = new_size; return {}; }