Compare commits

..

No commits in common. "3f164c6b828e0c889f862875e999fb7fc919a511" and "7e9e4c47ae0cbab887798d8f314b471ca37e88ac" have entirely different histories.

19 changed files with 65 additions and 834 deletions

View File

@ -32,12 +32,9 @@ set(KERNEL_SOURCES
kernel/Input/PS2Keymap.cpp kernel/Input/PS2Keymap.cpp
kernel/InterruptController.cpp kernel/InterruptController.cpp
kernel/kernel.cpp kernel/kernel.cpp
kernel/Memory/FileBackedRegion.cpp
kernel/Memory/GeneralAllocator.cpp kernel/Memory/GeneralAllocator.cpp
kernel/Memory/Heap.cpp kernel/Memory/Heap.cpp
kernel/Memory/kmalloc.cpp kernel/Memory/kmalloc.cpp
kernel/Memory/MemoryBackedRegion.cpp
kernel/Memory/MemoryRegion.cpp
kernel/Memory/PhysicalRange.cpp kernel/Memory/PhysicalRange.cpp
kernel/Memory/VirtualRange.cpp kernel/Memory/VirtualRange.cpp
kernel/Networking/E1000.cpp kernel/Networking/E1000.cpp

View File

@ -3,7 +3,6 @@
#include <BAN/RefPtr.h> #include <BAN/RefPtr.h>
#include <BAN/String.h> #include <BAN/String.h>
#include <BAN/StringView.h> #include <BAN/StringView.h>
#include <BAN/WeakPtr.h>
#include <BAN/Vector.h> #include <BAN/Vector.h>
#include <kernel/API/DirectoryEntry.h> #include <kernel/API/DirectoryEntry.h>
@ -18,9 +17,6 @@ namespace Kernel
using namespace API; using namespace API;
class FileBackedRegion;
class SharedFileData;
class Inode : public BAN::RefCounted<Inode> class Inode : public BAN::RefCounted<Inode>
{ {
public: public:
@ -116,9 +112,6 @@ namespace Kernel
private: private:
mutable RecursiveSpinLock m_lock; mutable RecursiveSpinLock m_lock;
BAN::WeakPtr<SharedFileData> m_shared_region;
friend class FileBackedRegion;
}; };
} }

View File

@ -1,43 +0,0 @@
#pragma once
#include <kernel/FS/Inode.h>
#include <kernel/Memory/MemoryRegion.h>
namespace Kernel
{
struct SharedFileData : public BAN::RefCounted<SharedFileData>, public BAN::Weakable<SharedFileData>
{
~SharedFileData();
// FIXME: this should probably be ordered tree like map
// for fast lookup and less memory usage
BAN::Vector<paddr_t> pages;
BAN::RefPtr<Inode> inode;
uint8_t page_buffer[PAGE_SIZE];
};
class FileBackedRegion final : public MemoryRegion
{
BAN_NON_COPYABLE(FileBackedRegion);
BAN_NON_MOVABLE(FileBackedRegion);
public:
static BAN::ErrorOr<BAN::UniqPtr<FileBackedRegion>> create(BAN::RefPtr<Inode>, PageTable&, off_t offset, size_t size, AddressRange address_range, Type, PageTable::flags_t);
~FileBackedRegion();
virtual BAN::ErrorOr<bool> allocate_page_containing(vaddr_t vaddr) override;
virtual BAN::ErrorOr<BAN::UniqPtr<MemoryRegion>> clone(PageTable& new_page_table) override;
private:
FileBackedRegion(BAN::RefPtr<Inode>, PageTable&, off_t offset, ssize_t size, Type flags, PageTable::flags_t page_flags);
private:
BAN::RefPtr<Inode> m_inode;
const off_t m_offset;
BAN::RefPtr<SharedFileData> m_shared_data;
};
}

View File

@ -1,29 +0,0 @@
#pragma once
#include <kernel/Memory/MemoryRegion.h>
namespace Kernel
{
class MemoryBackedRegion final : public MemoryRegion
{
BAN_NON_COPYABLE(MemoryBackedRegion);
BAN_NON_MOVABLE(MemoryBackedRegion);
public:
static BAN::ErrorOr<BAN::UniqPtr<MemoryBackedRegion>> create(PageTable&, size_t size, AddressRange, Type, PageTable::flags_t);
~MemoryBackedRegion();
virtual BAN::ErrorOr<bool> allocate_page_containing(vaddr_t vaddr) override;
virtual BAN::ErrorOr<BAN::UniqPtr<MemoryRegion>> clone(PageTable& new_page_table) override;
// Copy data from buffer into this region
// This can fail if no memory is mapped and no free memory was available
BAN::ErrorOr<void> copy_data_to_region(size_t offset_into_region, const uint8_t* buffer, size_t buffer_size);
private:
MemoryBackedRegion(PageTable&, size_t size, Type, PageTable::flags_t);
};
}

View File

@ -1,59 +0,0 @@
#pragma once
#include <BAN/UniqPtr.h>
#include <kernel/Memory/PageTable.h>
#include <kernel/Memory/Types.h>
#include <stddef.h>
namespace Kernel
{
struct AddressRange
{
vaddr_t start;
vaddr_t end;
};
class MemoryRegion
{
BAN_NON_COPYABLE(MemoryRegion);
BAN_NON_MOVABLE(MemoryRegion);
public:
enum class Type : uint8_t
{
PRIVATE,
SHARED
};
public:
virtual ~MemoryRegion();
bool contains(vaddr_t address) const;
bool contains_fully(vaddr_t address, size_t size) const;
bool overlaps(vaddr_t address, size_t size) const;
size_t size() const { return m_size; }
vaddr_t vaddr() const { return m_vaddr; }
// Returns error if no memory was available
// Returns true if page was succesfully allocated
// Returns false if page was already allocated
virtual BAN::ErrorOr<bool> allocate_page_containing(vaddr_t address) = 0;
virtual BAN::ErrorOr<BAN::UniqPtr<MemoryRegion>> clone(PageTable& new_page_table) = 0;
protected:
MemoryRegion(PageTable&, size_t size, Type type, PageTable::flags_t flags);
BAN::ErrorOr<void> initialize(AddressRange);
protected:
PageTable& m_page_table;
const size_t m_size;
const Type m_type;
const PageTable::flags_t m_flags;
vaddr_t m_vaddr { 0 };
};
}

View File

@ -48,7 +48,6 @@ namespace Kernel
BAN::ErrorOr<BAN::StringView> path_of(int) const; BAN::ErrorOr<BAN::StringView> path_of(int) const;
BAN::ErrorOr<BAN::RefPtr<Inode>> inode_of(int); BAN::ErrorOr<BAN::RefPtr<Inode>> inode_of(int);
BAN::ErrorOr<int> flags_of(int) const;
private: private:
struct OpenFileDescription : public BAN::RefCounted<OpenFileDescription> struct OpenFileDescription : public BAN::RefCounted<OpenFileDescription>

View File

@ -7,7 +7,7 @@
#include <kernel/Credentials.h> #include <kernel/Credentials.h>
#include <kernel/FS/Inode.h> #include <kernel/FS/Inode.h>
#include <kernel/Memory/Heap.h> #include <kernel/Memory/Heap.h>
#include <kernel/Memory/MemoryRegion.h> #include <kernel/Memory/VirtualRange.h>
#include <kernel/OpenFileDescriptorSet.h> #include <kernel/OpenFileDescriptorSet.h>
#include <kernel/SpinLock.h> #include <kernel/SpinLock.h>
#include <kernel/Terminal/TTY.h> #include <kernel/Terminal/TTY.h>
@ -174,7 +174,7 @@ namespace Kernel
OpenFileDescriptorSet m_open_file_descriptors; OpenFileDescriptorSet m_open_file_descriptors;
BAN::UniqPtr<LibELF::LoadableELF> m_loadable_elf; BAN::UniqPtr<LibELF::LoadableELF> m_loadable_elf;
BAN::Vector<BAN::UniqPtr<MemoryRegion>> m_mapped_regions; BAN::Vector<BAN::UniqPtr<VirtualRange>> m_mapped_ranges;
pid_t m_sid; pid_t m_sid;
pid_t m_pgrp; pid_t m_pgrp;

View File

@ -1,190 +0,0 @@
#include <kernel/LockGuard.h>
#include <kernel/Memory/FileBackedRegion.h>
#include <kernel/Memory/Heap.h>
namespace Kernel
{
BAN::ErrorOr<BAN::UniqPtr<FileBackedRegion>> FileBackedRegion::create(BAN::RefPtr<Inode> inode, PageTable& page_table, off_t offset, size_t size, AddressRange address_range, Type type, PageTable::flags_t flags)
{
ASSERT(inode->mode().ifreg());
if (offset < 0 || offset % PAGE_SIZE || size == 0)
return BAN::Error::from_errno(EINVAL);
if (size > (size_t)inode->size() || (size_t)offset > (size_t)inode->size() - size)
return BAN::Error::from_errno(EOVERFLOW);
auto* region_ptr = new FileBackedRegion(inode, page_table, offset, size, type, flags);
if (region_ptr == nullptr)
return BAN::Error::from_errno(ENOMEM);
auto region = BAN::UniqPtr<FileBackedRegion>::adopt(region_ptr);
TRY(region->initialize(address_range));
if (type == Type::SHARED)
{
LockGuard _(inode->m_lock);
if (inode->m_shared_region.valid())
region->m_shared_data = inode->m_shared_region.lock();
else
{
auto shared_data = TRY(BAN::RefPtr<SharedFileData>::create());
TRY(shared_data->pages.resize(BAN::Math::div_round_up<size_t>(inode->size(), PAGE_SIZE)));
shared_data->inode = inode;
inode->m_shared_region = TRY(shared_data->get_weak_ptr());
region->m_shared_data = BAN::move(shared_data);
}
}
return region;
}
FileBackedRegion::FileBackedRegion(BAN::RefPtr<Inode> inode, PageTable& page_table, off_t offset, ssize_t size, Type type, PageTable::flags_t flags)
: MemoryRegion(page_table, size, type, flags)
, m_inode(inode)
, m_offset(offset)
{
}
FileBackedRegion::~FileBackedRegion()
{
if (m_vaddr == 0)
return;
if (m_type == Type::SHARED)
return;
size_t needed_pages = BAN::Math::div_round_up<size_t>(m_size, PAGE_SIZE);
for (size_t i = 0; i < needed_pages; i++)
{
paddr_t paddr = m_page_table.physical_address_of(m_vaddr + i * PAGE_SIZE);
if (paddr != 0)
Heap::get().release_page(paddr);
}
}
SharedFileData::~SharedFileData()
{
for (size_t i = 0; i < pages.size(); i++)
{
if (pages[i] == 0)
continue;
{
auto& page_table = PageTable::current();
LockGuard _(page_table);
ASSERT(page_table.is_page_free(0));
page_table.map_page_at(pages[i], 0, PageTable::Flags::Present);
memcpy(page_buffer, (void*)0, PAGE_SIZE);
page_table.unmap_page(0);
}
if (auto ret = inode->write(i * PAGE_SIZE, page_buffer, PAGE_SIZE); ret.is_error())
dwarnln("{}", ret.error());
}
}
BAN::ErrorOr<bool> FileBackedRegion::allocate_page_containing(vaddr_t address)
{
ASSERT(contains(address));
// Check if address is already mapped
vaddr_t vaddr = address & PAGE_ADDR_MASK;
if (m_page_table.physical_address_of(vaddr) != 0)
return false;
if (m_type == Type::PRIVATE)
{
// Map new physcial page to address
paddr_t paddr = Heap::get().take_free_page();
if (paddr == 0)
return BAN::Error::from_errno(ENOMEM);
m_page_table.map_page_at(paddr, vaddr, m_flags);
size_t file_offset = m_offset + (vaddr - m_vaddr);
size_t bytes = BAN::Math::min<size_t>(m_size - file_offset, PAGE_SIZE);
BAN::ErrorOr<size_t> read_ret = 0;
// Zero out the new page
if (&PageTable::current() == &m_page_table)
read_ret = m_inode->read(file_offset, (void*)vaddr, bytes);
else
{
auto& page_table = PageTable::current();
LockGuard _(page_table);
ASSERT(page_table.is_page_free(0));
page_table.map_page_at(paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
read_ret = m_inode->read(file_offset, (void*)0, bytes);
memset((void*)0, 0x00, PAGE_SIZE);
page_table.unmap_page(0);
}
if (read_ret.is_error())
{
Heap::get().release_page(paddr);
m_page_table.unmap_page(vaddr);
return read_ret.release_error();
}
if (read_ret.value() < bytes)
{
dwarnln("Only {}/{} bytes read", read_ret.value(), bytes);
Heap::get().release_page(paddr);
m_page_table.unmap_page(vaddr);
return BAN::Error::from_errno(EIO);
}
}
else if (m_type == Type::SHARED)
{
LockGuard _(m_inode->m_lock);
ASSERT(m_inode->m_shared_region.valid());
ASSERT(m_shared_data->pages.size() == BAN::Math::div_round_up<size_t>(m_inode->size(), PAGE_SIZE));
auto& pages = m_shared_data->pages;
size_t page_index = (vaddr - m_vaddr) / PAGE_SIZE;
if (pages[page_index] == 0)
{
pages[page_index] = Heap::get().take_free_page();
if (pages[page_index] == 0)
return BAN::Error::from_errno(ENOMEM);
size_t offset = vaddr - m_vaddr;
size_t bytes = BAN::Math::min<size_t>(m_size - offset, PAGE_SIZE);
TRY(m_inode->read(offset, m_shared_data->page_buffer, bytes));
auto& page_table = PageTable::current();
// TODO: check if this can cause deadlock?
LockGuard page_table_lock(page_table);
ASSERT(page_table.is_page_free(0));
page_table.map_page_at(pages[page_index], 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
memcpy((void*)0, m_shared_data->page_buffer, PAGE_SIZE);
page_table.unmap_page(0);
}
paddr_t paddr = pages[page_index];
ASSERT(paddr);
m_page_table.map_page_at(paddr, vaddr, m_flags);
}
else
{
ASSERT_NOT_REACHED();
}
return true;
}
BAN::ErrorOr<BAN::UniqPtr<MemoryRegion>> FileBackedRegion::clone(PageTable& new_page_table)
{
ASSERT_NOT_REACHED();
}
}

View File

@ -1,125 +0,0 @@
#include <kernel/LockGuard.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/MemoryBackedRegion.h>
namespace Kernel
{
BAN::ErrorOr<BAN::UniqPtr<MemoryBackedRegion>> MemoryBackedRegion::create(PageTable& page_table, size_t size, AddressRange address_range, Type type, PageTable::flags_t flags)
{
if (type != Type::PRIVATE)
return BAN::Error::from_errno(ENOTSUP);
auto* region_ptr = new MemoryBackedRegion(page_table, size, type, flags);
if (region_ptr == nullptr)
return BAN::Error::from_errno(ENOMEM);
auto region = BAN::UniqPtr<MemoryBackedRegion>::adopt(region_ptr);
TRY(region->initialize(address_range));
return region;
}
MemoryBackedRegion::MemoryBackedRegion(PageTable& page_table, size_t size, Type type, PageTable::flags_t flags)
: MemoryRegion(page_table, size, type, flags)
{
}
MemoryBackedRegion::~MemoryBackedRegion()
{
ASSERT(m_type == Type::PRIVATE);
size_t needed_pages = BAN::Math::div_round_up<size_t>(m_size, PAGE_SIZE);
for (size_t i = 0; i < needed_pages; i++)
{
paddr_t paddr = m_page_table.physical_address_of(m_vaddr + i * PAGE_SIZE);
if (paddr != 0)
Heap::get().release_page(paddr);
}
}
BAN::ErrorOr<bool> MemoryBackedRegion::allocate_page_containing(vaddr_t address)
{
ASSERT(m_type == Type::PRIVATE);
ASSERT(contains(address));
// Check if address is already mapped
vaddr_t vaddr = address & PAGE_ADDR_MASK;
if (m_page_table.physical_address_of(vaddr) != 0)
return false;
// Map new physcial page to address
paddr_t paddr = Heap::get().take_free_page();
if (paddr == 0)
return BAN::Error::from_errno(ENOMEM);
m_page_table.map_page_at(paddr, vaddr, m_flags);
// Zero out the new page
if (&PageTable::current() == &m_page_table)
memset((void*)vaddr, 0x00, PAGE_SIZE);
else
{
LockGuard _(PageTable::current());
ASSERT(PageTable::current().is_page_free(0));
PageTable::current().map_page_at(paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
memset((void*)0, 0x00, PAGE_SIZE);
PageTable::current().unmap_page(0);
}
return true;
}
BAN::ErrorOr<BAN::UniqPtr<MemoryRegion>> MemoryBackedRegion::clone(PageTable& new_page_table)
{
ASSERT(&PageTable::current() == &m_page_table);
auto result = TRY(MemoryBackedRegion::create(new_page_table, m_size, { .start = m_vaddr, .end = m_vaddr + m_size }, m_type, m_flags));
for (size_t offset = 0; offset < m_size; offset += PAGE_SIZE)
{
paddr_t paddr = m_page_table.physical_address_of(m_vaddr + offset);
if (paddr == 0)
continue;
TRY(result->copy_data_to_region(offset, (const uint8_t*)(m_vaddr + offset), PAGE_SIZE));
}
return BAN::UniqPtr<MemoryRegion>(BAN::move(result));
}
BAN::ErrorOr<void> MemoryBackedRegion::copy_data_to_region(size_t offset_into_region, const uint8_t* buffer, size_t buffer_size)
{
ASSERT(offset_into_region + buffer_size <= m_size);
size_t written = 0;
while (written < buffer_size)
{
vaddr_t write_vaddr = m_vaddr + offset_into_region + written;
vaddr_t page_offset = write_vaddr % PAGE_SIZE;
size_t bytes = BAN::Math::min<size_t>(buffer_size - written, PAGE_SIZE - page_offset);
TRY(allocate_page_containing(write_vaddr));
if (&PageTable::current() == &m_page_table)
memcpy((void*)write_vaddr, (void*)(buffer + written), bytes);
else
{
paddr_t paddr = m_page_table.physical_address_of(write_vaddr & PAGE_ADDR_MASK);
ASSERT(paddr);
LockGuard _(PageTable::current());
ASSERT(PageTable::current().is_page_free(0));
PageTable::current().map_page_at(paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
memcpy((void*)page_offset, (void*)(buffer + written), bytes);
PageTable::current().unmap_page(0);
}
written += bytes;
}
return {};
}
}

View File

@ -1,50 +0,0 @@
#include <kernel/Memory/MemoryRegion.h>
namespace Kernel
{
MemoryRegion::MemoryRegion(PageTable& page_table, size_t size, Type type, PageTable::flags_t flags)
: m_page_table(page_table)
, m_size(size)
, m_type(type)
, m_flags(flags)
{
}
MemoryRegion::~MemoryRegion()
{
if (m_vaddr)
m_page_table.unmap_range(m_vaddr, m_size);
}
BAN::ErrorOr<void> MemoryRegion::initialize(AddressRange address_range)
{
size_t needed_pages = BAN::Math::div_round_up<size_t>(m_size, PAGE_SIZE);
m_vaddr = m_page_table.reserve_free_contiguous_pages(needed_pages, address_range.start);
if (m_vaddr == 0)
return BAN::Error::from_errno(ENOMEM);
if (m_vaddr + needed_pages * PAGE_SIZE > address_range.end)
return BAN::Error::from_errno(ENOMEM);
return {};
}
bool MemoryRegion::contains(vaddr_t address) const
{
return m_vaddr <= address && address < m_vaddr + m_size;
}
bool MemoryRegion::contains_fully(vaddr_t address, size_t size) const
{
return m_vaddr <= address && address + size < m_vaddr + m_size;
}
bool MemoryRegion::overlaps(vaddr_t address, size_t size) const
{
if (address + size < m_vaddr)
return false;
if (address >= m_vaddr + m_size)
return false;
return true;
}
}

View File

@ -309,12 +309,6 @@ namespace Kernel
return m_open_files[fd]->inode; return m_open_files[fd]->inode;
} }
BAN::ErrorOr<int> OpenFileDescriptorSet::flags_of(int fd) const
{
TRY(validate_fd(fd));
return m_open_files[fd]->flags;
}
BAN::ErrorOr<void> OpenFileDescriptorSet::validate_fd(int fd) const BAN::ErrorOr<void> OpenFileDescriptorSet::validate_fd(int fd) const
{ {
if (fd < 0 || fd >= (int)m_open_files.size()) if (fd < 0 || fd >= (int)m_open_files.size())

View File

@ -6,9 +6,7 @@
#include <kernel/IDT.h> #include <kernel/IDT.h>
#include <kernel/InterruptController.h> #include <kernel/InterruptController.h>
#include <kernel/LockGuard.h> #include <kernel/LockGuard.h>
#include <kernel/Memory/FileBackedRegion.h>
#include <kernel/Memory/Heap.h> #include <kernel/Memory/Heap.h>
#include <kernel/Memory/MemoryBackedRegion.h>
#include <kernel/Memory/PageTableScope.h> #include <kernel/Memory/PageTableScope.h>
#include <kernel/Process.h> #include <kernel/Process.h>
#include <kernel/Scheduler.h> #include <kernel/Scheduler.h>
@ -130,23 +128,23 @@ namespace Kernel
if (auto rem = needed_bytes % PAGE_SIZE) if (auto rem = needed_bytes % PAGE_SIZE)
needed_bytes += PAGE_SIZE - rem; needed_bytes += PAGE_SIZE - rem;
auto argv_region = MUST(MemoryBackedRegion::create( auto argv_range = MUST(VirtualRange::create_to_vaddr_range(
process->page_table(), process->page_table(),
0x400000, KERNEL_OFFSET,
needed_bytes, needed_bytes,
{ .start = 0x400000, .end = KERNEL_OFFSET }, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present,
MemoryRegion::Type::PRIVATE, true
PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present
)); ));
uintptr_t temp = argv_region->vaddr() + sizeof(char*) * 2; uintptr_t temp = argv_range->vaddr() + sizeof(char*) * 2;
MUST(argv_region->copy_data_to_region(0, (const uint8_t*)&temp, sizeof(char*))); argv_range->copy_from(0, (uint8_t*)&temp, sizeof(char*));
temp = 0; temp = 0;
MUST(argv_region->copy_data_to_region(sizeof(char*), (const uint8_t*)&temp, sizeof(char*))); argv_range->copy_from(sizeof(char*), (uint8_t*)&temp, sizeof(char*));
MUST(argv_region->copy_data_to_region(sizeof(char*) * 2, (const uint8_t*)path.data(), path.size())); argv_range->copy_from(sizeof(char*) * 2, (const uint8_t*)path.data(), path.size());
MUST(process->m_mapped_regions.push_back(BAN::move(argv_region))); MUST(process->m_mapped_ranges.push_back(BAN::move(argv_range)));
} }
process->m_userspace_info.argc = 1; process->m_userspace_info.argc = 1;
@ -174,8 +172,7 @@ namespace Kernel
Process::~Process() Process::~Process()
{ {
ASSERT(m_threads.empty()); ASSERT(m_threads.empty());
ASSERT(m_mapped_regions.empty()); ASSERT(m_mapped_ranges.empty());
ASSERT(!m_loadable_elf);
ASSERT(m_exit_status.waiting == 0); ASSERT(m_exit_status.waiting == 0);
ASSERT(&PageTable::current() != m_page_table.ptr()); ASSERT(&PageTable::current() != m_page_table.ptr());
} }
@ -208,8 +205,7 @@ namespace Kernel
m_open_file_descriptors.close_all(); m_open_file_descriptors.close_all();
// NOTE: We must unmap ranges while the page table is still alive // NOTE: We must unmap ranges while the page table is still alive
m_mapped_regions.clear(); m_mapped_ranges.clear();
m_loadable_elf.clear();
} }
void Process::on_thread_exit(Thread& thread) void Process::on_thread_exit(Thread& thread)
@ -322,10 +318,10 @@ namespace Kernel
OpenFileDescriptorSet open_file_descriptors(m_credentials); OpenFileDescriptorSet open_file_descriptors(m_credentials);
TRY(open_file_descriptors.clone_from(m_open_file_descriptors)); TRY(open_file_descriptors.clone_from(m_open_file_descriptors));
BAN::Vector<BAN::UniqPtr<MemoryRegion>> mapped_regions; BAN::Vector<BAN::UniqPtr<VirtualRange>> mapped_ranges;
TRY(mapped_regions.reserve(m_mapped_regions.size())); TRY(mapped_ranges.reserve(m_mapped_ranges.size()));
for (auto& mapped_region : m_mapped_regions) for (auto& mapped_range : m_mapped_ranges)
MUST(mapped_regions.push_back(TRY(mapped_region->clone(*page_table)))); MUST(mapped_ranges.push_back(TRY(mapped_range->clone(*page_table))));
auto loadable_elf = TRY(m_loadable_elf->clone(*page_table)); auto loadable_elf = TRY(m_loadable_elf->clone(*page_table));
@ -334,7 +330,7 @@ namespace Kernel
forked->m_working_directory = BAN::move(working_directory); forked->m_working_directory = BAN::move(working_directory);
forked->m_page_table = BAN::move(page_table); forked->m_page_table = BAN::move(page_table);
forked->m_open_file_descriptors = BAN::move(open_file_descriptors); forked->m_open_file_descriptors = BAN::move(open_file_descriptors);
forked->m_mapped_regions = BAN::move(mapped_regions); forked->m_mapped_ranges = BAN::move(mapped_ranges);
forked->m_loadable_elf = BAN::move(loadable_elf); forked->m_loadable_elf = BAN::move(loadable_elf);
forked->m_is_userspace = m_is_userspace; forked->m_is_userspace = m_is_userspace;
forked->m_userspace_info = m_userspace_info; forked->m_userspace_info = m_userspace_info;
@ -377,7 +373,7 @@ namespace Kernel
m_open_file_descriptors.close_cloexec(); m_open_file_descriptors.close_cloexec();
m_mapped_regions.clear(); m_mapped_ranges.clear();
m_loadable_elf.clear(); m_loadable_elf.clear();
m_loadable_elf = TRY(load_elf_for_exec(m_credentials, executable_path, m_working_directory, page_table())); m_loadable_elf = TRY(load_elf_for_exec(m_credentials, executable_path, m_working_directory, page_table()));
@ -391,8 +387,8 @@ namespace Kernel
ASSERT(&Process::current() == this); ASSERT(&Process::current() == this);
// allocate memory on the new process for arguments and environment // allocate memory on the new process for arguments and environment
auto create_region = auto create_range =
[&](BAN::Span<BAN::String> container) -> BAN::ErrorOr<BAN::UniqPtr<MemoryRegion>> [&](const auto& container) -> BAN::UniqPtr<VirtualRange>
{ {
size_t bytes = sizeof(char*); size_t bytes = sizeof(char*);
for (auto& elem : container) for (auto& elem : container)
@ -401,36 +397,36 @@ namespace Kernel
if (auto rem = bytes % PAGE_SIZE) if (auto rem = bytes % PAGE_SIZE)
bytes += PAGE_SIZE - rem; bytes += PAGE_SIZE - rem;
auto region = TRY(MemoryBackedRegion::create( auto range = MUST(VirtualRange::create_to_vaddr_range(
page_table(), page_table(),
0x400000, KERNEL_OFFSET,
bytes, bytes,
{ .start = 0x400000, .end = KERNEL_OFFSET }, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present,
MemoryRegion::Type::PRIVATE, true
PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present
)); ));
size_t data_offset = sizeof(char*) * (container.size() + 1); size_t data_offset = sizeof(char*) * (container.size() + 1);
for (size_t i = 0; i < container.size(); i++) for (size_t i = 0; i < container.size(); i++)
{ {
uintptr_t ptr_addr = region->vaddr() + data_offset; uintptr_t ptr_addr = range->vaddr() + data_offset;
TRY(region->copy_data_to_region(sizeof(char*) * i, (const uint8_t*)&ptr_addr, sizeof(char*))); range->copy_from(sizeof(char*) * i, (const uint8_t*)&ptr_addr, sizeof(char*));
TRY(region->copy_data_to_region(data_offset, (const uint8_t*)container[i].data(), container[i].size())); range->copy_from(data_offset, (const uint8_t*)container[i].data(), container[i].size());
data_offset += container[i].size() + 1; data_offset += container[i].size() + 1;
} }
uintptr_t null = 0; uintptr_t null = 0;
TRY(region->copy_data_to_region(sizeof(char*) * container.size(), (const uint8_t*)&null, sizeof(char*))); range->copy_from(sizeof(char*) * container.size(), (const uint8_t*)&null, sizeof(char*));
return BAN::UniqPtr<MemoryRegion>(BAN::move(region)); return BAN::move(range);
}; };
auto argv_region = MUST(create_region(str_argv.span())); auto argv_range = create_range(str_argv);
m_userspace_info.argv = (char**)argv_region->vaddr(); m_userspace_info.argv = (char**)argv_range->vaddr();
MUST(m_mapped_regions.push_back(BAN::move(argv_region))); MUST(m_mapped_ranges.push_back(BAN::move(argv_range)));
auto envp_region = MUST(create_region(str_envp.span())); auto envp_range = create_range(str_envp);
m_userspace_info.envp = (char**)envp_region->vaddr(); m_userspace_info.envp = (char**)envp_range->vaddr();
MUST(m_mapped_regions.push_back(BAN::move(envp_region))); MUST(m_mapped_ranges.push_back(BAN::move(envp_range)));
m_userspace_info.argc = str_argv.size(); m_userspace_info.argc = str_argv.size();
@ -493,10 +489,7 @@ namespace Kernel
return BAN::Error::from_errno(ECHILD); return BAN::Error::from_errno(ECHILD);
pid_t ret = target->pid(); pid_t ret = target->pid();
*stat_loc = target->block_until_exit();
int stat = target->block_until_exit();
if (stat_loc)
*stat_loc = stat;
return ret; return ret;
} }
@ -551,11 +544,11 @@ namespace Kernel
return true; return true;
} }
for (auto& region : m_mapped_regions) for (auto& mapped_range : m_mapped_ranges)
{ {
if (!region->contains(address)) if (!mapped_range->contains(address))
continue; continue;
TRY(region->allocate_page_containing(address)); TRY(mapped_range->allocate_page_for_demand_paging(address));
return true; return true;
} }
@ -814,69 +807,34 @@ namespace Kernel
if (args->prot != PROT_NONE && args->prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC)) if (args->prot != PROT_NONE && args->prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
return BAN::Error::from_errno(EINVAL); return BAN::Error::from_errno(EINVAL);
if (args->flags & MAP_FIXED) PageTable::flags_t flags = PageTable::Flags::UserSupervisor;
return BAN::Error::from_errno(ENOTSUP);
if (!(args->flags & MAP_PRIVATE) == !(args->flags & MAP_SHARED))
return BAN::Error::from_errno(EINVAL);
auto region_type = (args->flags & MAP_PRIVATE) ? MemoryRegion::Type::PRIVATE : MemoryRegion::Type::SHARED;
PageTable::flags_t page_flags = 0;
if (args->prot & PROT_READ) if (args->prot & PROT_READ)
page_flags |= PageTable::Flags::Present; flags |= PageTable::Flags::Present;
if (args->prot & PROT_WRITE) if (args->prot & PROT_WRITE)
page_flags |= PageTable::Flags::ReadWrite | PageTable::Flags::Present; flags |= PageTable::Flags::ReadWrite | PageTable::Flags::Present;
if (args->prot & PROT_EXEC) if (args->prot & PROT_EXEC)
page_flags |= PageTable::Flags::Execute | PageTable::Flags::Present; flags |= PageTable::Flags::Execute | PageTable::Flags::Present;
if (page_flags == 0) if (args->flags == (MAP_ANONYMOUS | MAP_PRIVATE))
page_flags = PageTable::Flags::Reserved;
else
page_flags |= PageTable::Flags::UserSupervisor;
if (args->flags & MAP_ANONYMOUS)
{ {
if (args->addr != nullptr) if (args->addr != nullptr)
return BAN::Error::from_errno(ENOTSUP); return BAN::Error::from_errno(ENOTSUP);
if (args->off != 0) if (args->off != 0)
return BAN::Error::from_errno(EINVAL); return BAN::Error::from_errno(EINVAL);
if (args->len % PAGE_SIZE != 0)
return BAN::Error::from_errno(EINVAL);
auto region = TRY(MemoryBackedRegion::create( auto range = TRY(VirtualRange::create_to_vaddr_range(
page_table(), page_table(),
0x400000, KERNEL_OFFSET,
args->len, args->len,
{ .start = 0x400000, .end = KERNEL_OFFSET }, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present,
region_type, page_flags false
)); ));
LockGuard _(m_lock); LockGuard _(m_lock);
TRY(m_mapped_regions.push_back(BAN::move(region))); TRY(m_mapped_ranges.push_back(BAN::move(range)));
return m_mapped_regions.back()->vaddr(); return m_mapped_ranges.back()->vaddr();
}
auto inode = TRY(m_open_file_descriptors.inode_of(args->fildes));
if (inode->mode().ifreg())
{
if (args->addr != nullptr)
return BAN::Error::from_errno(ENOTSUP);
auto inode_flags = TRY(m_open_file_descriptors.flags_of(args->fildes));
if (!(inode_flags & O_RDONLY))
return BAN::Error::from_errno(EACCES);
if (region_type == MemoryRegion::Type::SHARED)
if ((args->prot & PROT_WRITE) && !(inode_flags & O_WRONLY))
return BAN::Error::from_errno(EACCES);
auto region = TRY(FileBackedRegion::create(
inode,
page_table(),
args->off, args->len,
{ .start = 0x400000, .end = KERNEL_OFFSET },
region_type, page_flags
));
LockGuard _(m_lock);
TRY(m_mapped_regions.push_back(BAN::move(region)));
return m_mapped_regions.back()->vaddr();
} }
return BAN::Error::from_errno(ENOTSUP); return BAN::Error::from_errno(ENOTSUP);
@ -893,10 +851,13 @@ namespace Kernel
LockGuard _(m_lock); LockGuard _(m_lock);
// FIXME: We should only map partial regions for (size_t i = 0; i < m_mapped_ranges.size(); i++)
for (size_t i = 0; i < m_mapped_regions.size(); i++) {
if (m_mapped_regions[i]->overlaps(vaddr, len)) auto& range = m_mapped_ranges[i];
m_mapped_regions.remove(i); if (vaddr + len < range->vaddr() || vaddr >= range->vaddr() + range->size())
continue;
m_mapped_ranges.remove(i);
}
return 0; return 0;
} }
@ -1380,11 +1341,10 @@ namespace Kernel
return; return;
// FIXME: should we allow cross mapping access? // FIXME: should we allow cross mapping access?
for (auto& mapped_region : m_mapped_regions) for (auto& mapped_range : m_mapped_ranges)
mapped_region->contains_fully(vaddr, size); if (vaddr >= mapped_range->vaddr() && vaddr + size <= mapped_range->vaddr() + mapped_range->size())
return; return;
// FIXME: elf should contain full range [vaddr, vaddr + size)
if (m_loadable_elf->contains(vaddr)) if (m_loadable_elf->contains(vaddr))
return; return;

View File

@ -54,12 +54,10 @@ static bool allocate_pool(size_t pool_index)
assert(pool.start == nullptr); assert(pool.start == nullptr);
// allocate memory for pool // allocate memory for pool
void* new_pool = mmap(nullptr, pool.size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); pool.start = (uint8_t*)mmap(nullptr, pool.size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (new_pool == MAP_FAILED) if (pool.start == nullptr)
return false; return false;
pool.start = (uint8_t*)new_pool;
// initialize pool to single unallocated node // initialize pool to single unallocated node
auto* node = (malloc_node_t*)pool.start; auto* node = (malloc_node_t*)pool.start;
node->allocated = false; node->allocated = false;

View File

@ -9,12 +9,11 @@ void* mmap(void* addr, size_t len, int prot, int flags, int fildes, off_t off)
.len = len, .len = len,
.prot = prot, .prot = prot,
.flags = flags, .flags = flags,
.fildes = fildes,
.off = off .off = off
}; };
long ret = syscall(SYS_MMAP, &args); long ret = syscall(SYS_MMAP, &args);
if (ret == -1) if (ret == -1)
return MAP_FAILED; return nullptr;
return (void*)ret; return (void*)ret;
} }

View File

@ -4,13 +4,11 @@ project(userspace CXX)
set(USERSPACE_PROJECTS set(USERSPACE_PROJECTS
cat cat
cat-mmap
dd dd
echo echo
id id
init init
ls ls
mmap-shared-test
poweroff poweroff
Shell Shell
snake snake

View File

@ -1,17 +0,0 @@
cmake_minimum_required(VERSION 3.26)
project(cat-mmap CXX)
set(SOURCES
main.cpp
)
add_executable(cat-mmap ${SOURCES})
target_compile_options(cat-mmap PUBLIC -O2 -g)
target_link_libraries(cat-mmap PUBLIC libc)
add_custom_target(cat-mmap-install
COMMAND sudo cp ${CMAKE_CURRENT_BINARY_DIR}/cat-mmap ${BANAN_BIN}/
DEPENDS cat-mmap
USES_TERMINAL
)

View File

@ -1,62 +0,0 @@
#include <fcntl.h>
#include <stdio.h>
#include <sys/mman.h>
#include <sys/stat.h>
bool cat_file(int fd)
{
struct stat st;
if (fstat(fd, &st) == -1)
{
perror("stat");
return false;
}
void* addr = mmap(nullptr, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
if (addr == MAP_FAILED)
{
perror("mmap");
return false;
}
ssize_t nwrite = write(STDOUT_FILENO, addr, st.st_size);
if (nwrite == -1)
perror("write");
if (munmap(addr, st.st_size) == -1)
{
perror("munmap");
return false;
}
return true;
}
int main(int argc, char** argv)
{
int ret = 0;
if (argc > 1)
{
for (int i = 1; i < argc; i++)
{
int fd = open(argv[i], O_RDONLY);
if (fd == -1)
{
perror(argv[i]);
ret = 1;
continue;
}
if (!cat_file(fd))
ret = 1;
close(fd);
}
}
else
{
if (!cat_file(STDIN_FILENO))
ret = 1;
}
return ret;
}

View File

@ -1,17 +0,0 @@
cmake_minimum_required(VERSION 3.26)
project(mmap-shared-test CXX)
set(SOURCES
main.cpp
)
add_executable(mmap-shared-test ${SOURCES})
target_compile_options(mmap-shared-test PUBLIC -O2 -g)
target_link_libraries(mmap-shared-test PUBLIC libc)
add_custom_target(mmap-shared-test-install
COMMAND sudo cp ${CMAKE_CURRENT_BINARY_DIR}/mmap-shared-test ${BANAN_BIN}/
DEPENDS mmap-shared-test
USES_TERMINAL
)

View File

@ -1,115 +0,0 @@
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <unistd.h>
#define FILE_NAME "test-file"
#define FILE_SIZE (1024*1024)
int prepare_file()
{
int fd = open(FILE_NAME, O_RDWR | O_TRUNC | O_CREAT, 0666);
if (fd == -1)
{
perror("open");
return 1;
}
void* null_buffer = malloc(FILE_SIZE);
memset(null_buffer, 0x00, FILE_SIZE);
if (write(fd, null_buffer, FILE_SIZE) == -1)
{
perror("write");
return 1;
}
free(null_buffer);
close(fd);
printf("file created\n");
return 0;
}
int job1()
{
int fd = open(FILE_NAME, O_RDONLY);
if (fd == -1)
{
perror("open");
return 1;
}
void* addr = mmap(nullptr, FILE_SIZE, PROT_READ, MAP_SHARED, fd, 0);
if (addr == MAP_FAILED)
{
perror("mmap");
return 1;
}
sleep(4);
size_t sum = 0;
for (int i = 0; i < FILE_SIZE; i++)
sum += ((uint8_t*)addr)[i];
munmap(addr, FILE_SIZE);
close(fd);
printf("sum: %zu\n", sum);
return 0;
}
int job2()
{
sleep(2);
int fd = open(FILE_NAME, O_RDWR);
if (fd == -1)
{
perror("open");
return 1;
}
void* addr = mmap(nullptr, FILE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (addr == MAP_FAILED)
{
perror("mmap");
return 1;
}
memset(addr, 'a', FILE_SIZE);
munmap(addr, FILE_SIZE);
close(fd);
printf("expecting: %zu\n", (size_t)'a' * FILE_SIZE);
return 0;
}
int main()
{
if (int ret = prepare_file())
return ret;
pid_t pid = fork();
if (pid == 0)
return job1();
if (pid == -1)
{
perror("fork");
return 1;
}
int ret = job2();
waitpid(pid, nullptr, 0);
return ret;
}