update main #1

Merged
Sinipelto merged 240 commits from Bananymous/banan-os:main into main 2023-11-20 13:20:51 +02:00
4 changed files with 132 additions and 117 deletions
Showing only changes of commit ab4f033385 - Show all commits

View File

@ -4,17 +4,30 @@
#include <BAN/Iteration.h> #include <BAN/Iteration.h>
#include <kernel/FS/FileSystem.h> #include <kernel/FS/FileSystem.h>
#include <kernel/FS/TmpFS/Inode.h> #include <kernel/FS/TmpFS/Inode.h>
#include <kernel/Memory/PageTable.h>
#include <kernel/SpinLock.h> #include <kernel/SpinLock.h>
namespace Kernel namespace Kernel
{ {
namespace TmpFuncs
{
template<typename F> template<typename F>
concept for_each_indirect_paddr_allocating_callback = requires(F func, paddr_t paddr, bool was_allocated) concept for_each_indirect_paddr_allocating_callback = requires(F func, paddr_t paddr, bool was_allocated)
{ {
requires BAN::is_same_v<decltype(func(paddr, was_allocated)), BAN::Iteration>; requires BAN::is_same_v<decltype(func(paddr, was_allocated)), BAN::Iteration>;
}; };
template<typename F>
concept with_block_buffer_callback = requires(F func, BAN::ByteSpan buffer)
{
requires BAN::is_same_v<decltype(func(buffer)), void>;
};
}
class TmpFileSystem : public FileSystem class TmpFileSystem : public FileSystem
{ {
public: public:
@ -27,6 +40,7 @@ namespace Kernel
virtual BAN::RefPtr<Inode> root_inode() override { return m_root_inode; } virtual BAN::RefPtr<Inode> root_inode() override { return m_root_inode; }
BAN::ErrorOr<BAN::RefPtr<TmpInode>> open_inode(ino_t ino); BAN::ErrorOr<BAN::RefPtr<TmpInode>> open_inode(ino_t ino);
BAN::ErrorOr<void> add_to_cache(BAN::RefPtr<TmpInode>);
// FIXME: read_block and write_block should not require external buffer // FIXME: read_block and write_block should not require external buffer
// probably some wrapper like PageTable::with_fast_page could work? // probably some wrapper like PageTable::with_fast_page could work?
@ -36,8 +50,8 @@ namespace Kernel
void delete_inode(ino_t ino); void delete_inode(ino_t ino);
BAN::ErrorOr<ino_t> allocate_inode(const TmpInodeInfo&); BAN::ErrorOr<ino_t> allocate_inode(const TmpInodeInfo&);
void read_block(size_t index, BAN::ByteSpan buffer); template<TmpFuncs::with_block_buffer_callback F>
void write_block(size_t index, BAN::ConstByteSpan buffer); void with_block_buffer(size_t index, F callback);
void free_block(size_t index); void free_block(size_t index);
BAN::ErrorOr<size_t> allocate_block(); BAN::ErrorOr<size_t> allocate_block();
@ -47,6 +61,7 @@ namespace Kernel
enum Flags : paddr_t enum Flags : paddr_t
{ {
Present = 1 << 0, Present = 1 << 0,
Internal = 1 << 1,
}; };
// 12 bottom bits of paddr can be used as flags, since // 12 bottom bits of paddr can be used as flags, since
@ -79,9 +94,9 @@ namespace Kernel
paddr_t find_block(size_t index); paddr_t find_block(size_t index);
template<for_each_indirect_paddr_allocating_callback F> template<TmpFuncs::for_each_indirect_paddr_allocating_callback F>
BAN::ErrorOr<void> for_each_indirect_paddr_allocating(PageInfo page_info, F callback, size_t depth); BAN::ErrorOr<void> for_each_indirect_paddr_allocating(PageInfo page_info, F callback, size_t depth);
template<for_each_indirect_paddr_allocating_callback F> template<TmpFuncs::for_each_indirect_paddr_allocating_callback F>
BAN::ErrorOr<BAN::Iteration> for_each_indirect_paddr_allocating_internal(PageInfo page_info, F callback, size_t depth); BAN::ErrorOr<BAN::Iteration> for_each_indirect_paddr_allocating_internal(PageInfo page_info, F callback, size_t depth);
paddr_t find_indirect(PageInfo root, size_t index, size_t depth); paddr_t find_indirect(PageInfo root, size_t index, size_t depth);
@ -119,4 +134,14 @@ namespace Kernel
const size_t m_max_pages; const size_t m_max_pages;
}; };
template<TmpFuncs::with_block_buffer_callback F>
void TmpFileSystem::with_block_buffer(size_t index, F callback)
{
paddr_t block_paddr = find_block(index);
PageTable::with_fast_page(block_paddr, [&] {
BAN::ByteSpan buffer(reinterpret_cast<uint8_t*>(PageTable::fast_page()), PAGE_SIZE);
callback(buffer);
});
}
} }

View File

@ -8,6 +8,17 @@
namespace Kernel namespace Kernel
{ {
namespace TmpFuncs
{
template<typename F>
concept for_each_entry_callback = requires(F func, const TmpDirectoryEntry& entry)
{
requires BAN::is_same_v<decltype(func(entry)), BAN::Iteration>;
};
}
class TmpFileSystem; class TmpFileSystem;
class TmpInode : public Inode class TmpInode : public Inode
@ -51,7 +62,7 @@ namespace Kernel
class TmpFileInode : public TmpInode class TmpFileInode : public TmpInode
{ {
public: public:
static BAN::ErrorOr<BAN::RefPtr<TmpFileInode>> create(TmpFileSystem&, mode_t, uid_t, gid_t); static BAN::ErrorOr<BAN::RefPtr<TmpFileInode>> create_new(TmpFileSystem&, mode_t, uid_t, gid_t);
~TmpFileInode(); ~TmpFileInode();
protected: protected:
@ -74,12 +85,6 @@ namespace Kernel
TmpSymlinkInode(TmpFileSystem&, ino_t, const TmpInodeInfo&, BAN::StringView target); TmpSymlinkInode(TmpFileSystem&, ino_t, const TmpInodeInfo&, BAN::StringView target);
}; };
template<typename F>
concept for_each_entry_callback = requires(F func, const TmpDirectoryEntry& entry)
{
requires BAN::is_same_v<decltype(func(entry)), BAN::Iteration>;
};
class TmpDirectoryInode : public TmpInode class TmpDirectoryInode : public TmpInode
{ {
public: public:
@ -100,7 +105,7 @@ namespace Kernel
BAN::ErrorOr<void> link_inode(TmpInode&, BAN::StringView); BAN::ErrorOr<void> link_inode(TmpInode&, BAN::StringView);
template<for_each_entry_callback F> template<TmpFuncs::for_each_entry_callback F>
void for_each_entry(F callback); void for_each_entry(F callback);
friend class TmpInode; friend class TmpInode;

View File

@ -1,6 +1,5 @@
#include <kernel/FS/TmpFS/FileSystem.h> #include <kernel/FS/TmpFS/FileSystem.h>
#include <kernel/Memory/Heap.h> #include <kernel/Memory/Heap.h>
#include <kernel/Memory/PageTable.h>
namespace Kernel namespace Kernel
{ {
@ -39,7 +38,6 @@ namespace Kernel
}); });
m_root_inode = TRY(TmpDirectoryInode::create_root(*this, mode, uid, gid)); m_root_inode = TRY(TmpDirectoryInode::create_root(*this, mode, uid, gid));
TRY(m_inode_cache.insert(m_root_inode->ino(), m_root_inode));
return {}; return {};
} }
@ -66,6 +64,13 @@ namespace Kernel
return inode; return inode;
} }
BAN::ErrorOr<void> TmpFileSystem::add_to_cache(BAN::RefPtr<TmpInode> inode)
{
if (!m_inode_cache.contains(inode->ino()))
TRY(m_inode_cache.insert(inode->ino(), inode));
return {};
}
void TmpFileSystem::read_inode(ino_t ino, TmpInodeInfo& out) void TmpFileSystem::read_inode(ino_t ino, TmpInodeInfo& out)
{ {
auto inode_location = find_inode(ino); auto inode_location = find_inode(ino);
@ -135,24 +140,6 @@ namespace Kernel
}; };
} }
void TmpFileSystem::read_block(size_t index, BAN::ByteSpan buffer)
{
ASSERT(buffer.size() >= PAGE_SIZE);
paddr_t block_paddr = find_block(index);
PageTable::with_fast_page(block_paddr, [&] {
memcpy(buffer.data(), PageTable::fast_page_as_ptr(), PAGE_SIZE);
});
}
void TmpFileSystem::write_block(size_t index, BAN::ConstByteSpan buffer)
{
ASSERT(buffer.size() >= PAGE_SIZE);
paddr_t block_paddr = find_block(index);
PageTable::with_fast_page(block_paddr, [&] {
memcpy(PageTable::fast_page_as_ptr(), buffer.data(), PAGE_SIZE);
});
}
void TmpFileSystem::free_block(size_t index) void TmpFileSystem::free_block(size_t index)
{ {
ASSERT_NOT_REACHED(); ASSERT_NOT_REACHED();
@ -180,12 +167,15 @@ namespace Kernel
{ {
ASSERT(root.flags() & PageInfo::Flags::Present); ASSERT(root.flags() & PageInfo::Flags::Present);
if (depth == 0) if (depth == 0)
{
ASSERT(index == 0);
return root.paddr(); return root.paddr();
}
constexpr size_t addresses_per_page = PAGE_SIZE / sizeof(PageInfo); constexpr size_t addresses_per_page = PAGE_SIZE / sizeof(PageInfo);
size_t divisor = 1; size_t divisor = 1;
for (size_t i = 0; i < depth; i++) for (size_t i = 1; i < depth; i++)
divisor *= addresses_per_page; divisor *= addresses_per_page;
size_t index_of_page = index / divisor; size_t index_of_page = index / divisor;
@ -201,11 +191,15 @@ namespace Kernel
return find_indirect(next, index_in_page, depth - 1); return find_indirect(next, index_in_page, depth - 1);
} }
template<for_each_indirect_paddr_allocating_callback F> template<TmpFuncs::for_each_indirect_paddr_allocating_callback F>
BAN::ErrorOr<BAN::Iteration> TmpFileSystem::for_each_indirect_paddr_allocating_internal(PageInfo page_info, F callback, size_t depth) BAN::ErrorOr<BAN::Iteration> TmpFileSystem::for_each_indirect_paddr_allocating_internal(PageInfo page_info, F callback, size_t depth)
{ {
ASSERT_GT(depth, 0);
ASSERT(page_info.flags() & PageInfo::Flags::Present); ASSERT(page_info.flags() & PageInfo::Flags::Present);
if (depth == 0)
{
bool is_new_block = page_info.flags() & PageInfo::Flags::Internal;
return callback(page_info.paddr(), is_new_block);
}
for (size_t i = 0; i < PAGE_SIZE / sizeof(PageInfo); i++) for (size_t i = 0; i < PAGE_SIZE / sizeof(PageInfo); i++)
{ {
@ -214,8 +208,6 @@ namespace Kernel
next_info = PageTable::fast_page_as_sized<PageInfo>(i); next_info = PageTable::fast_page_as_sized<PageInfo>(i);
}); });
bool allocated = false;
if (!(next_info.flags() & PageInfo::Flags::Present)) if (!(next_info.flags() & PageInfo::Flags::Present))
{ {
paddr_t new_paddr = Heap::get().take_free_page(); paddr_t new_paddr = Heap::get().take_free_page();
@ -234,15 +226,11 @@ namespace Kernel
to_update_info = next_info; to_update_info = next_info;
}); });
allocated = true; // Don't sync the internal bit to actual memory
next_info.set_flags(PageInfo::Flags::Internal | PageInfo::Flags::Present);
} }
BAN::Iteration result; auto result = TRY(for_each_indirect_paddr_allocating_internal(next_info, callback, depth - 1));
if (depth == 1)
result = callback(next_info.paddr(), allocated);
else
result = TRY(for_each_indirect_paddr_allocating_internal(next_info, callback, depth - 1));
switch (result) switch (result)
{ {
case BAN::Iteration::Continue: case BAN::Iteration::Continue:
@ -257,7 +245,7 @@ namespace Kernel
return BAN::Iteration::Continue; return BAN::Iteration::Continue;
} }
template<for_each_indirect_paddr_allocating_callback F> template<TmpFuncs::for_each_indirect_paddr_allocating_callback F>
BAN::ErrorOr<void> TmpFileSystem::for_each_indirect_paddr_allocating(PageInfo page_info, F callback, size_t depth) BAN::ErrorOr<void> TmpFileSystem::for_each_indirect_paddr_allocating(PageInfo page_info, F callback, size_t depth)
{ {
BAN::Iteration result = TRY(for_each_indirect_paddr_allocating_internal(page_info, callback, depth)); BAN::Iteration result = TRY(for_each_indirect_paddr_allocating_internal(page_info, callback, depth));

View File

@ -64,7 +64,10 @@ namespace Kernel
: m_fs(fs) : m_fs(fs)
, m_inode_info(info) , m_inode_info(info)
, m_ino(ino) , m_ino(ino)
{} {
// FIXME: this should be able to fail
MUST(fs.add_to_cache(this));
}
void TmpInode::sync() void TmpInode::sync()
{ {
@ -102,7 +105,7 @@ namespace Kernel
/* FILE INODE */ /* FILE INODE */
BAN::ErrorOr<BAN::RefPtr<TmpFileInode>> TmpFileInode::create(TmpFileSystem& fs, mode_t mode, uid_t uid, gid_t gid) BAN::ErrorOr<BAN::RefPtr<TmpFileInode>> TmpFileInode::create_new(TmpFileSystem& fs, mode_t mode, uid_t uid, gid_t gid)
{ {
auto info = create_inode_info(Mode::IFREG | mode, uid, gid); auto info = create_inode_info(Mode::IFREG | mode, uid, gid);
ino_t ino = TRY(fs.allocate_inode(info)); ino_t ino = TRY(fs.allocate_inode(info));
@ -131,15 +134,12 @@ namespace Kernel
m_fs.delete_inode(ino()); m_fs.delete_inode(ino());
} }
BAN::ErrorOr<size_t> TmpFileInode::read_impl(off_t offset, BAN::ByteSpan buffer) BAN::ErrorOr<size_t> TmpFileInode::read_impl(off_t offset, BAN::ByteSpan out_buffer)
{ {
if (offset >= size() || buffer.size() == 0) if (offset >= size() || out_buffer.size() == 0)
return 0; return 0;
BAN::Vector<uint8_t> block_buffer; const size_t bytes_to_read = BAN::Math::min<size_t>(size() - offset, out_buffer.size());
TRY(block_buffer.resize(blksize()));
const size_t bytes_to_read = BAN::Math::min<size_t>(size() - offset, buffer.size());
size_t read_done = 0; size_t read_done = 0;
while (read_done < bytes_to_read) while (read_done < bytes_to_read)
@ -152,11 +152,11 @@ namespace Kernel
const size_t bytes = BAN::Math::min<size_t>(bytes_to_read - read_done, blksize() - block_offset); const size_t bytes = BAN::Math::min<size_t>(bytes_to_read - read_done, blksize() - block_offset);
if (block_index.has_value()) if (block_index.has_value())
m_fs.read_block(block_index.value(), block_buffer.span()); m_fs.with_block_buffer(block_index.value(), [&](BAN::ByteSpan block_buffer) {
memcpy(out_buffer.data() + read_done, block_buffer.data() + block_offset, bytes);
});
else else
memset(block_buffer.data(), 0x00, block_buffer.size()); memset(out_buffer.data() + read_done, 0x00, bytes);
memcpy(buffer.data() + read_done, block_buffer.data() + block_offset, bytes);
read_done += bytes; read_done += bytes;
} }
@ -164,17 +164,14 @@ namespace Kernel
return read_done; return read_done;
} }
BAN::ErrorOr<size_t> TmpFileInode::write_impl(off_t offset, BAN::ConstByteSpan buffer) BAN::ErrorOr<size_t> TmpFileInode::write_impl(off_t offset, BAN::ConstByteSpan in_buffer)
{ {
// FIXME: handle overflow // FIXME: handle overflow
if (offset + buffer.size() > (size_t)size()) if (offset + in_buffer.size() > (size_t)size())
TRY(truncate_impl(offset + buffer.size())); TRY(truncate_impl(offset + in_buffer.size()));
BAN::Vector<uint8_t> block_buffer; const size_t bytes_to_write = in_buffer.size();
TRY(block_buffer.resize(blksize()));
const size_t bytes_to_write = buffer.size();
size_t write_done = 0; size_t write_done = 0;
while (write_done < bytes_to_write) while (write_done < bytes_to_write)
@ -186,11 +183,9 @@ namespace Kernel
const size_t bytes = BAN::Math::min<size_t>(bytes_to_write - write_done, blksize() - block_offset); const size_t bytes = BAN::Math::min<size_t>(bytes_to_write - write_done, blksize() - block_offset);
if (bytes < (size_t)blksize()) m_fs.with_block_buffer(block_index, [&](BAN::ByteSpan block_buffer) {
m_fs.read_block(block_index, block_buffer.span()); memcpy(block_buffer.data() + block_offset, in_buffer.data() + write_done, bytes);
memcpy(block_buffer.data() + block_offset, buffer.data() + write_done, bytes); });
m_fs.write_block(block_index, block_buffer.span());
write_done += bytes; write_done += bytes;
} }
@ -233,7 +228,7 @@ namespace Kernel
auto inode = BAN::RefPtr<TmpDirectoryInode>::adopt(inode_ptr); auto inode = BAN::RefPtr<TmpDirectoryInode>::adopt(inode_ptr);
TRY(inode->link_inode(*inode, "."sv)); TRY(inode->link_inode(*inode, "."sv));
TRY(inode->link_inode(parent, "."sv)); TRY(inode->link_inode(parent, ".."sv));
return inode; return inode;
} }
@ -282,7 +277,7 @@ namespace Kernel
BAN::ErrorOr<void> TmpDirectoryInode::create_file_impl(BAN::StringView name, mode_t mode, uid_t uid, gid_t gid) BAN::ErrorOr<void> TmpDirectoryInode::create_file_impl(BAN::StringView name, mode_t mode, uid_t uid, gid_t gid)
{ {
auto new_inode = TRY(TmpFileInode::create(m_fs, mode, uid, gid)); auto new_inode = TRY(TmpFileInode::create_new(m_fs, mode, uid, gid));
TRY(link_inode(*new_inode, name)); TRY(link_inode(*new_inode, name));
return {}; return {};
} }
@ -303,37 +298,41 @@ namespace Kernel
{ {
static constexpr size_t directory_entry_alignment = 16; static constexpr size_t directory_entry_alignment = 16;
size_t current_size = size();
size_t new_entry_size = sizeof(TmpDirectoryEntry) + name.size(); size_t new_entry_size = sizeof(TmpDirectoryEntry) + name.size();
if (auto rem = new_entry_size % directory_entry_alignment) if (auto rem = new_entry_size % directory_entry_alignment)
new_entry_size += directory_entry_alignment - rem; new_entry_size += directory_entry_alignment - rem;
ASSERT(new_entry_size < (size_t)blksize()); ASSERT(new_entry_size < (size_t)blksize());
size_t new_entry_offset = current_size % blksize(); size_t new_entry_offset = size() % blksize();
// Target is the last block, or if it doesn't fit the new entry, the next one. // Target is the last block, or if it doesn't fit the new entry, the next one.
size_t target_data_block = current_size / blksize(); size_t target_data_block = size() / blksize();
if (blksize() - new_entry_offset < new_entry_size) if (blksize() - new_entry_offset < new_entry_size)
{
// insert an empty entry at the end of current block
m_fs.with_block_buffer(block_index(target_data_block).value(), [&](BAN::ByteSpan bytespan) {
auto& empty_entry = bytespan.slice(new_entry_offset).as<TmpDirectoryEntry>();
empty_entry.type = DT_UNKNOWN;
empty_entry.ino = 0;
empty_entry.rec_len = blksize() - new_entry_offset;
});
m_inode_info.size += blksize() - new_entry_offset;
target_data_block++; target_data_block++;
new_entry_offset = 0;
}
size_t block_index = TRY(block_index_with_allocation(target_data_block)); size_t block_index = TRY(block_index_with_allocation(target_data_block));
BAN::Vector<uint8_t> buffer; m_fs.with_block_buffer(block_index, [&](BAN::ByteSpan bytespan) {
TRY(buffer.resize(blksize()));
BAN::ByteSpan bytespan = buffer.span();
m_fs.read_block(block_index, bytespan);
auto& new_entry = bytespan.slice(new_entry_offset).as<TmpDirectoryEntry>(); auto& new_entry = bytespan.slice(new_entry_offset).as<TmpDirectoryEntry>();
ASSERT(new_entry.type == DT_UNKNOWN);
new_entry.type = inode_mode_to_dt_type(inode.mode()); new_entry.type = inode_mode_to_dt_type(inode.mode());
new_entry.ino = inode.ino(); new_entry.ino = inode.ino();
new_entry.name_len = name.size(); new_entry.name_len = name.size();
new_entry.rec_len = new_entry_size; new_entry.rec_len = new_entry_size;
memcpy(new_entry.name, name.data(), name.size()); memcpy(new_entry.name, name.data(), name.size());
});
m_fs.write_block(block_index, bytespan);
// increase current size // increase current size
m_inode_info.size += new_entry_size; m_inode_info.size += new_entry_size;
@ -344,33 +343,31 @@ namespace Kernel
return {}; return {};
} }
template<for_each_entry_callback F> template<TmpFuncs::for_each_entry_callback F>
void TmpDirectoryInode::for_each_entry(F callback) void TmpDirectoryInode::for_each_entry(F callback)
{ {
size_t full_offset = 0; for (size_t data_block_index = 0; data_block_index * blksize() < (size_t)size(); data_block_index++)
while (full_offset < (size_t)size())
{ {
const size_t data_block_index = full_offset / blksize();
const size_t block_index = this->block_index(data_block_index).value(); const size_t block_index = this->block_index(data_block_index).value();
const size_t byte_count = BAN::Math::min<size_t>(size() - data_block_index * blksize(), blksize());
// FIXME: implement fast heap pages? m_fs.with_block_buffer(block_index, [&](BAN::ByteSpan bytespan) {
BAN::Vector<uint8_t> buffer;
MUST(buffer.resize(blksize()));
BAN::ByteSpan bytespan = buffer.span();
m_fs.read_block(block_index, bytespan);
size_t byte_count = BAN::Math::min<size_t>(blksize(), size() - full_offset);
bytespan = bytespan.slice(0, byte_count); bytespan = bytespan.slice(0, byte_count);
while (bytespan.size() > 0) while (bytespan.size() > 0)
{ {
auto& entry = bytespan.as<TmpDirectoryEntry>(); const auto& entry = bytespan.as<TmpDirectoryEntry>();
callback(entry); switch (callback(entry))
{
case BAN::Iteration::Continue:
break;
case BAN::Iteration::Break:
return;
default:
ASSERT_NOT_REACHED();
}
bytespan = bytespan.slice(entry.rec_len); bytespan = bytespan.slice(entry.rec_len);
} }
});
full_offset += blksize();
} }
} }