Compare commits
8 Commits
7e9e4c47ae
...
3f164c6b82
Author | SHA1 | Date |
---|---|---|
Bananymous | 3f164c6b82 | |
Bananymous | f953f3d3ff | |
Bananymous | 9fc75fe445 | |
Bananymous | 7a5bb6a56b | |
Bananymous | d54c6b7f6b | |
Bananymous | db5d6a7f80 | |
Bananymous | 4a92f44cf6 | |
Bananymous | 376b9f7272 |
|
@ -32,9 +32,12 @@ set(KERNEL_SOURCES
|
|||
kernel/Input/PS2Keymap.cpp
|
||||
kernel/InterruptController.cpp
|
||||
kernel/kernel.cpp
|
||||
kernel/Memory/FileBackedRegion.cpp
|
||||
kernel/Memory/GeneralAllocator.cpp
|
||||
kernel/Memory/Heap.cpp
|
||||
kernel/Memory/kmalloc.cpp
|
||||
kernel/Memory/MemoryBackedRegion.cpp
|
||||
kernel/Memory/MemoryRegion.cpp
|
||||
kernel/Memory/PhysicalRange.cpp
|
||||
kernel/Memory/VirtualRange.cpp
|
||||
kernel/Networking/E1000.cpp
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
#include <BAN/RefPtr.h>
|
||||
#include <BAN/String.h>
|
||||
#include <BAN/StringView.h>
|
||||
#include <BAN/WeakPtr.h>
|
||||
#include <BAN/Vector.h>
|
||||
|
||||
#include <kernel/API/DirectoryEntry.h>
|
||||
|
@ -17,6 +18,9 @@ namespace Kernel
|
|||
|
||||
using namespace API;
|
||||
|
||||
class FileBackedRegion;
|
||||
class SharedFileData;
|
||||
|
||||
class Inode : public BAN::RefCounted<Inode>
|
||||
{
|
||||
public:
|
||||
|
@ -112,6 +116,9 @@ namespace Kernel
|
|||
|
||||
private:
|
||||
mutable RecursiveSpinLock m_lock;
|
||||
|
||||
BAN::WeakPtr<SharedFileData> m_shared_region;
|
||||
friend class FileBackedRegion;
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
#pragma once
|
||||
|
||||
#include <kernel/FS/Inode.h>
|
||||
#include <kernel/Memory/MemoryRegion.h>
|
||||
|
||||
namespace Kernel
|
||||
{
|
||||
|
||||
struct SharedFileData : public BAN::RefCounted<SharedFileData>, public BAN::Weakable<SharedFileData>
|
||||
{
|
||||
~SharedFileData();
|
||||
|
||||
// FIXME: this should probably be ordered tree like map
|
||||
// for fast lookup and less memory usage
|
||||
BAN::Vector<paddr_t> pages;
|
||||
BAN::RefPtr<Inode> inode;
|
||||
uint8_t page_buffer[PAGE_SIZE];
|
||||
};
|
||||
|
||||
class FileBackedRegion final : public MemoryRegion
|
||||
{
|
||||
BAN_NON_COPYABLE(FileBackedRegion);
|
||||
BAN_NON_MOVABLE(FileBackedRegion);
|
||||
|
||||
public:
|
||||
static BAN::ErrorOr<BAN::UniqPtr<FileBackedRegion>> create(BAN::RefPtr<Inode>, PageTable&, off_t offset, size_t size, AddressRange address_range, Type, PageTable::flags_t);
|
||||
~FileBackedRegion();
|
||||
|
||||
virtual BAN::ErrorOr<bool> allocate_page_containing(vaddr_t vaddr) override;
|
||||
|
||||
virtual BAN::ErrorOr<BAN::UniqPtr<MemoryRegion>> clone(PageTable& new_page_table) override;
|
||||
|
||||
private:
|
||||
FileBackedRegion(BAN::RefPtr<Inode>, PageTable&, off_t offset, ssize_t size, Type flags, PageTable::flags_t page_flags);
|
||||
|
||||
private:
|
||||
BAN::RefPtr<Inode> m_inode;
|
||||
const off_t m_offset;
|
||||
|
||||
BAN::RefPtr<SharedFileData> m_shared_data;
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
#pragma once
|
||||
|
||||
#include <kernel/Memory/MemoryRegion.h>
|
||||
|
||||
namespace Kernel
|
||||
{
|
||||
|
||||
class MemoryBackedRegion final : public MemoryRegion
|
||||
{
|
||||
BAN_NON_COPYABLE(MemoryBackedRegion);
|
||||
BAN_NON_MOVABLE(MemoryBackedRegion);
|
||||
|
||||
public:
|
||||
static BAN::ErrorOr<BAN::UniqPtr<MemoryBackedRegion>> create(PageTable&, size_t size, AddressRange, Type, PageTable::flags_t);
|
||||
~MemoryBackedRegion();
|
||||
|
||||
virtual BAN::ErrorOr<bool> allocate_page_containing(vaddr_t vaddr) override;
|
||||
|
||||
virtual BAN::ErrorOr<BAN::UniqPtr<MemoryRegion>> clone(PageTable& new_page_table) override;
|
||||
|
||||
// Copy data from buffer into this region
|
||||
// This can fail if no memory is mapped and no free memory was available
|
||||
BAN::ErrorOr<void> copy_data_to_region(size_t offset_into_region, const uint8_t* buffer, size_t buffer_size);
|
||||
|
||||
private:
|
||||
MemoryBackedRegion(PageTable&, size_t size, Type, PageTable::flags_t);
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,59 @@
|
|||
#pragma once
|
||||
|
||||
#include <BAN/UniqPtr.h>
|
||||
#include <kernel/Memory/PageTable.h>
|
||||
#include <kernel/Memory/Types.h>
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
namespace Kernel
|
||||
{
|
||||
|
||||
struct AddressRange
|
||||
{
|
||||
vaddr_t start;
|
||||
vaddr_t end;
|
||||
};
|
||||
|
||||
class MemoryRegion
|
||||
{
|
||||
BAN_NON_COPYABLE(MemoryRegion);
|
||||
BAN_NON_MOVABLE(MemoryRegion);
|
||||
|
||||
public:
|
||||
enum class Type : uint8_t
|
||||
{
|
||||
PRIVATE,
|
||||
SHARED
|
||||
};
|
||||
|
||||
public:
|
||||
virtual ~MemoryRegion();
|
||||
|
||||
bool contains(vaddr_t address) const;
|
||||
bool contains_fully(vaddr_t address, size_t size) const;
|
||||
bool overlaps(vaddr_t address, size_t size) const;
|
||||
|
||||
size_t size() const { return m_size; }
|
||||
vaddr_t vaddr() const { return m_vaddr; }
|
||||
|
||||
// Returns error if no memory was available
|
||||
// Returns true if page was succesfully allocated
|
||||
// Returns false if page was already allocated
|
||||
virtual BAN::ErrorOr<bool> allocate_page_containing(vaddr_t address) = 0;
|
||||
|
||||
virtual BAN::ErrorOr<BAN::UniqPtr<MemoryRegion>> clone(PageTable& new_page_table) = 0;
|
||||
|
||||
protected:
|
||||
MemoryRegion(PageTable&, size_t size, Type type, PageTable::flags_t flags);
|
||||
BAN::ErrorOr<void> initialize(AddressRange);
|
||||
|
||||
protected:
|
||||
PageTable& m_page_table;
|
||||
const size_t m_size;
|
||||
const Type m_type;
|
||||
const PageTable::flags_t m_flags;
|
||||
vaddr_t m_vaddr { 0 };
|
||||
};
|
||||
|
||||
}
|
|
@ -48,6 +48,7 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<BAN::StringView> path_of(int) const;
|
||||
BAN::ErrorOr<BAN::RefPtr<Inode>> inode_of(int);
|
||||
BAN::ErrorOr<int> flags_of(int) const;
|
||||
|
||||
private:
|
||||
struct OpenFileDescription : public BAN::RefCounted<OpenFileDescription>
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
#include <kernel/Credentials.h>
|
||||
#include <kernel/FS/Inode.h>
|
||||
#include <kernel/Memory/Heap.h>
|
||||
#include <kernel/Memory/VirtualRange.h>
|
||||
#include <kernel/Memory/MemoryRegion.h>
|
||||
#include <kernel/OpenFileDescriptorSet.h>
|
||||
#include <kernel/SpinLock.h>
|
||||
#include <kernel/Terminal/TTY.h>
|
||||
|
@ -174,7 +174,7 @@ namespace Kernel
|
|||
OpenFileDescriptorSet m_open_file_descriptors;
|
||||
|
||||
BAN::UniqPtr<LibELF::LoadableELF> m_loadable_elf;
|
||||
BAN::Vector<BAN::UniqPtr<VirtualRange>> m_mapped_ranges;
|
||||
BAN::Vector<BAN::UniqPtr<MemoryRegion>> m_mapped_regions;
|
||||
|
||||
pid_t m_sid;
|
||||
pid_t m_pgrp;
|
||||
|
|
|
@ -0,0 +1,190 @@
|
|||
#include <kernel/LockGuard.h>
|
||||
#include <kernel/Memory/FileBackedRegion.h>
|
||||
#include <kernel/Memory/Heap.h>
|
||||
|
||||
namespace Kernel
|
||||
{
|
||||
|
||||
BAN::ErrorOr<BAN::UniqPtr<FileBackedRegion>> FileBackedRegion::create(BAN::RefPtr<Inode> inode, PageTable& page_table, off_t offset, size_t size, AddressRange address_range, Type type, PageTable::flags_t flags)
|
||||
{
|
||||
ASSERT(inode->mode().ifreg());
|
||||
|
||||
if (offset < 0 || offset % PAGE_SIZE || size == 0)
|
||||
return BAN::Error::from_errno(EINVAL);
|
||||
if (size > (size_t)inode->size() || (size_t)offset > (size_t)inode->size() - size)
|
||||
return BAN::Error::from_errno(EOVERFLOW);
|
||||
|
||||
auto* region_ptr = new FileBackedRegion(inode, page_table, offset, size, type, flags);
|
||||
if (region_ptr == nullptr)
|
||||
return BAN::Error::from_errno(ENOMEM);
|
||||
auto region = BAN::UniqPtr<FileBackedRegion>::adopt(region_ptr);
|
||||
|
||||
TRY(region->initialize(address_range));
|
||||
|
||||
if (type == Type::SHARED)
|
||||
{
|
||||
LockGuard _(inode->m_lock);
|
||||
if (inode->m_shared_region.valid())
|
||||
region->m_shared_data = inode->m_shared_region.lock();
|
||||
else
|
||||
{
|
||||
auto shared_data = TRY(BAN::RefPtr<SharedFileData>::create());
|
||||
TRY(shared_data->pages.resize(BAN::Math::div_round_up<size_t>(inode->size(), PAGE_SIZE)));
|
||||
shared_data->inode = inode;
|
||||
inode->m_shared_region = TRY(shared_data->get_weak_ptr());
|
||||
region->m_shared_data = BAN::move(shared_data);
|
||||
}
|
||||
}
|
||||
|
||||
return region;
|
||||
}
|
||||
|
||||
FileBackedRegion::FileBackedRegion(BAN::RefPtr<Inode> inode, PageTable& page_table, off_t offset, ssize_t size, Type type, PageTable::flags_t flags)
|
||||
: MemoryRegion(page_table, size, type, flags)
|
||||
, m_inode(inode)
|
||||
, m_offset(offset)
|
||||
{
|
||||
}
|
||||
|
||||
FileBackedRegion::~FileBackedRegion()
|
||||
{
|
||||
if (m_vaddr == 0)
|
||||
return;
|
||||
|
||||
if (m_type == Type::SHARED)
|
||||
return;
|
||||
|
||||
size_t needed_pages = BAN::Math::div_round_up<size_t>(m_size, PAGE_SIZE);
|
||||
for (size_t i = 0; i < needed_pages; i++)
|
||||
{
|
||||
paddr_t paddr = m_page_table.physical_address_of(m_vaddr + i * PAGE_SIZE);
|
||||
if (paddr != 0)
|
||||
Heap::get().release_page(paddr);
|
||||
}
|
||||
}
|
||||
|
||||
SharedFileData::~SharedFileData()
|
||||
{
|
||||
for (size_t i = 0; i < pages.size(); i++)
|
||||
{
|
||||
if (pages[i] == 0)
|
||||
continue;
|
||||
|
||||
{
|
||||
auto& page_table = PageTable::current();
|
||||
LockGuard _(page_table);
|
||||
ASSERT(page_table.is_page_free(0));
|
||||
|
||||
page_table.map_page_at(pages[i], 0, PageTable::Flags::Present);
|
||||
memcpy(page_buffer, (void*)0, PAGE_SIZE);
|
||||
page_table.unmap_page(0);
|
||||
}
|
||||
|
||||
if (auto ret = inode->write(i * PAGE_SIZE, page_buffer, PAGE_SIZE); ret.is_error())
|
||||
dwarnln("{}", ret.error());
|
||||
}
|
||||
}
|
||||
|
||||
BAN::ErrorOr<bool> FileBackedRegion::allocate_page_containing(vaddr_t address)
|
||||
{
|
||||
ASSERT(contains(address));
|
||||
|
||||
// Check if address is already mapped
|
||||
vaddr_t vaddr = address & PAGE_ADDR_MASK;
|
||||
if (m_page_table.physical_address_of(vaddr) != 0)
|
||||
return false;
|
||||
|
||||
if (m_type == Type::PRIVATE)
|
||||
{
|
||||
// Map new physcial page to address
|
||||
paddr_t paddr = Heap::get().take_free_page();
|
||||
if (paddr == 0)
|
||||
return BAN::Error::from_errno(ENOMEM);
|
||||
m_page_table.map_page_at(paddr, vaddr, m_flags);
|
||||
|
||||
size_t file_offset = m_offset + (vaddr - m_vaddr);
|
||||
size_t bytes = BAN::Math::min<size_t>(m_size - file_offset, PAGE_SIZE);
|
||||
|
||||
BAN::ErrorOr<size_t> read_ret = 0;
|
||||
|
||||
// Zero out the new page
|
||||
if (&PageTable::current() == &m_page_table)
|
||||
read_ret = m_inode->read(file_offset, (void*)vaddr, bytes);
|
||||
else
|
||||
{
|
||||
auto& page_table = PageTable::current();
|
||||
|
||||
LockGuard _(page_table);
|
||||
ASSERT(page_table.is_page_free(0));
|
||||
|
||||
page_table.map_page_at(paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
|
||||
read_ret = m_inode->read(file_offset, (void*)0, bytes);
|
||||
memset((void*)0, 0x00, PAGE_SIZE);
|
||||
page_table.unmap_page(0);
|
||||
}
|
||||
|
||||
if (read_ret.is_error())
|
||||
{
|
||||
Heap::get().release_page(paddr);
|
||||
m_page_table.unmap_page(vaddr);
|
||||
return read_ret.release_error();
|
||||
}
|
||||
|
||||
if (read_ret.value() < bytes)
|
||||
{
|
||||
dwarnln("Only {}/{} bytes read", read_ret.value(), bytes);
|
||||
Heap::get().release_page(paddr);
|
||||
m_page_table.unmap_page(vaddr);
|
||||
return BAN::Error::from_errno(EIO);
|
||||
}
|
||||
}
|
||||
else if (m_type == Type::SHARED)
|
||||
{
|
||||
LockGuard _(m_inode->m_lock);
|
||||
ASSERT(m_inode->m_shared_region.valid());
|
||||
ASSERT(m_shared_data->pages.size() == BAN::Math::div_round_up<size_t>(m_inode->size(), PAGE_SIZE));
|
||||
|
||||
auto& pages = m_shared_data->pages;
|
||||
size_t page_index = (vaddr - m_vaddr) / PAGE_SIZE;
|
||||
|
||||
if (pages[page_index] == 0)
|
||||
{
|
||||
pages[page_index] = Heap::get().take_free_page();
|
||||
if (pages[page_index] == 0)
|
||||
return BAN::Error::from_errno(ENOMEM);
|
||||
|
||||
size_t offset = vaddr - m_vaddr;
|
||||
size_t bytes = BAN::Math::min<size_t>(m_size - offset, PAGE_SIZE);
|
||||
|
||||
TRY(m_inode->read(offset, m_shared_data->page_buffer, bytes));
|
||||
|
||||
auto& page_table = PageTable::current();
|
||||
|
||||
// TODO: check if this can cause deadlock?
|
||||
LockGuard page_table_lock(page_table);
|
||||
ASSERT(page_table.is_page_free(0));
|
||||
|
||||
page_table.map_page_at(pages[page_index], 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
|
||||
memcpy((void*)0, m_shared_data->page_buffer, PAGE_SIZE);
|
||||
page_table.unmap_page(0);
|
||||
}
|
||||
|
||||
paddr_t paddr = pages[page_index];
|
||||
ASSERT(paddr);
|
||||
|
||||
m_page_table.map_page_at(paddr, vaddr, m_flags);
|
||||
}
|
||||
else
|
||||
{
|
||||
ASSERT_NOT_REACHED();
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
BAN::ErrorOr<BAN::UniqPtr<MemoryRegion>> FileBackedRegion::clone(PageTable& new_page_table)
|
||||
{
|
||||
ASSERT_NOT_REACHED();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,125 @@
|
|||
#include <kernel/LockGuard.h>
|
||||
#include <kernel/Memory/Heap.h>
|
||||
#include <kernel/Memory/MemoryBackedRegion.h>
|
||||
|
||||
namespace Kernel
|
||||
{
|
||||
|
||||
BAN::ErrorOr<BAN::UniqPtr<MemoryBackedRegion>> MemoryBackedRegion::create(PageTable& page_table, size_t size, AddressRange address_range, Type type, PageTable::flags_t flags)
|
||||
{
|
||||
if (type != Type::PRIVATE)
|
||||
return BAN::Error::from_errno(ENOTSUP);
|
||||
|
||||
auto* region_ptr = new MemoryBackedRegion(page_table, size, type, flags);
|
||||
if (region_ptr == nullptr)
|
||||
return BAN::Error::from_errno(ENOMEM);
|
||||
auto region = BAN::UniqPtr<MemoryBackedRegion>::adopt(region_ptr);
|
||||
|
||||
TRY(region->initialize(address_range));
|
||||
|
||||
return region;
|
||||
}
|
||||
|
||||
MemoryBackedRegion::MemoryBackedRegion(PageTable& page_table, size_t size, Type type, PageTable::flags_t flags)
|
||||
: MemoryRegion(page_table, size, type, flags)
|
||||
{
|
||||
}
|
||||
|
||||
MemoryBackedRegion::~MemoryBackedRegion()
|
||||
{
|
||||
ASSERT(m_type == Type::PRIVATE);
|
||||
|
||||
size_t needed_pages = BAN::Math::div_round_up<size_t>(m_size, PAGE_SIZE);
|
||||
for (size_t i = 0; i < needed_pages; i++)
|
||||
{
|
||||
paddr_t paddr = m_page_table.physical_address_of(m_vaddr + i * PAGE_SIZE);
|
||||
if (paddr != 0)
|
||||
Heap::get().release_page(paddr);
|
||||
}
|
||||
}
|
||||
|
||||
BAN::ErrorOr<bool> MemoryBackedRegion::allocate_page_containing(vaddr_t address)
|
||||
{
|
||||
ASSERT(m_type == Type::PRIVATE);
|
||||
|
||||
ASSERT(contains(address));
|
||||
|
||||
// Check if address is already mapped
|
||||
vaddr_t vaddr = address & PAGE_ADDR_MASK;
|
||||
if (m_page_table.physical_address_of(vaddr) != 0)
|
||||
return false;
|
||||
|
||||
// Map new physcial page to address
|
||||
paddr_t paddr = Heap::get().take_free_page();
|
||||
if (paddr == 0)
|
||||
return BAN::Error::from_errno(ENOMEM);
|
||||
m_page_table.map_page_at(paddr, vaddr, m_flags);
|
||||
|
||||
// Zero out the new page
|
||||
if (&PageTable::current() == &m_page_table)
|
||||
memset((void*)vaddr, 0x00, PAGE_SIZE);
|
||||
else
|
||||
{
|
||||
LockGuard _(PageTable::current());
|
||||
ASSERT(PageTable::current().is_page_free(0));
|
||||
|
||||
PageTable::current().map_page_at(paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
|
||||
memset((void*)0, 0x00, PAGE_SIZE);
|
||||
PageTable::current().unmap_page(0);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
BAN::ErrorOr<BAN::UniqPtr<MemoryRegion>> MemoryBackedRegion::clone(PageTable& new_page_table)
|
||||
{
|
||||
ASSERT(&PageTable::current() == &m_page_table);
|
||||
|
||||
auto result = TRY(MemoryBackedRegion::create(new_page_table, m_size, { .start = m_vaddr, .end = m_vaddr + m_size }, m_type, m_flags));
|
||||
|
||||
for (size_t offset = 0; offset < m_size; offset += PAGE_SIZE)
|
||||
{
|
||||
paddr_t paddr = m_page_table.physical_address_of(m_vaddr + offset);
|
||||
if (paddr == 0)
|
||||
continue;
|
||||
TRY(result->copy_data_to_region(offset, (const uint8_t*)(m_vaddr + offset), PAGE_SIZE));
|
||||
}
|
||||
|
||||
return BAN::UniqPtr<MemoryRegion>(BAN::move(result));
|
||||
}
|
||||
|
||||
BAN::ErrorOr<void> MemoryBackedRegion::copy_data_to_region(size_t offset_into_region, const uint8_t* buffer, size_t buffer_size)
|
||||
{
|
||||
ASSERT(offset_into_region + buffer_size <= m_size);
|
||||
|
||||
size_t written = 0;
|
||||
while (written < buffer_size)
|
||||
{
|
||||
vaddr_t write_vaddr = m_vaddr + offset_into_region + written;
|
||||
vaddr_t page_offset = write_vaddr % PAGE_SIZE;
|
||||
size_t bytes = BAN::Math::min<size_t>(buffer_size - written, PAGE_SIZE - page_offset);
|
||||
|
||||
TRY(allocate_page_containing(write_vaddr));
|
||||
|
||||
if (&PageTable::current() == &m_page_table)
|
||||
memcpy((void*)write_vaddr, (void*)(buffer + written), bytes);
|
||||
else
|
||||
{
|
||||
paddr_t paddr = m_page_table.physical_address_of(write_vaddr & PAGE_ADDR_MASK);
|
||||
ASSERT(paddr);
|
||||
|
||||
LockGuard _(PageTable::current());
|
||||
ASSERT(PageTable::current().is_page_free(0));
|
||||
|
||||
PageTable::current().map_page_at(paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
|
||||
memcpy((void*)page_offset, (void*)(buffer + written), bytes);
|
||||
PageTable::current().unmap_page(0);
|
||||
}
|
||||
|
||||
written += bytes;
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,50 @@
|
|||
#include <kernel/Memory/MemoryRegion.h>
|
||||
|
||||
namespace Kernel
|
||||
{
|
||||
|
||||
MemoryRegion::MemoryRegion(PageTable& page_table, size_t size, Type type, PageTable::flags_t flags)
|
||||
: m_page_table(page_table)
|
||||
, m_size(size)
|
||||
, m_type(type)
|
||||
, m_flags(flags)
|
||||
{
|
||||
}
|
||||
|
||||
MemoryRegion::~MemoryRegion()
|
||||
{
|
||||
if (m_vaddr)
|
||||
m_page_table.unmap_range(m_vaddr, m_size);
|
||||
}
|
||||
|
||||
BAN::ErrorOr<void> MemoryRegion::initialize(AddressRange address_range)
|
||||
{
|
||||
size_t needed_pages = BAN::Math::div_round_up<size_t>(m_size, PAGE_SIZE);
|
||||
m_vaddr = m_page_table.reserve_free_contiguous_pages(needed_pages, address_range.start);
|
||||
if (m_vaddr == 0)
|
||||
return BAN::Error::from_errno(ENOMEM);
|
||||
if (m_vaddr + needed_pages * PAGE_SIZE > address_range.end)
|
||||
return BAN::Error::from_errno(ENOMEM);
|
||||
return {};
|
||||
}
|
||||
|
||||
bool MemoryRegion::contains(vaddr_t address) const
|
||||
{
|
||||
return m_vaddr <= address && address < m_vaddr + m_size;
|
||||
}
|
||||
|
||||
bool MemoryRegion::contains_fully(vaddr_t address, size_t size) const
|
||||
{
|
||||
return m_vaddr <= address && address + size < m_vaddr + m_size;
|
||||
}
|
||||
|
||||
bool MemoryRegion::overlaps(vaddr_t address, size_t size) const
|
||||
{
|
||||
if (address + size < m_vaddr)
|
||||
return false;
|
||||
if (address >= m_vaddr + m_size)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
|
@ -309,6 +309,12 @@ namespace Kernel
|
|||
return m_open_files[fd]->inode;
|
||||
}
|
||||
|
||||
BAN::ErrorOr<int> OpenFileDescriptorSet::flags_of(int fd) const
|
||||
{
|
||||
TRY(validate_fd(fd));
|
||||
return m_open_files[fd]->flags;
|
||||
}
|
||||
|
||||
BAN::ErrorOr<void> OpenFileDescriptorSet::validate_fd(int fd) const
|
||||
{
|
||||
if (fd < 0 || fd >= (int)m_open_files.size())
|
||||
|
|
|
@ -6,7 +6,9 @@
|
|||
#include <kernel/IDT.h>
|
||||
#include <kernel/InterruptController.h>
|
||||
#include <kernel/LockGuard.h>
|
||||
#include <kernel/Memory/FileBackedRegion.h>
|
||||
#include <kernel/Memory/Heap.h>
|
||||
#include <kernel/Memory/MemoryBackedRegion.h>
|
||||
#include <kernel/Memory/PageTableScope.h>
|
||||
#include <kernel/Process.h>
|
||||
#include <kernel/Scheduler.h>
|
||||
|
@ -128,23 +130,23 @@ namespace Kernel
|
|||
if (auto rem = needed_bytes % PAGE_SIZE)
|
||||
needed_bytes += PAGE_SIZE - rem;
|
||||
|
||||
auto argv_range = MUST(VirtualRange::create_to_vaddr_range(
|
||||
auto argv_region = MUST(MemoryBackedRegion::create(
|
||||
process->page_table(),
|
||||
0x400000, KERNEL_OFFSET,
|
||||
needed_bytes,
|
||||
PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present,
|
||||
true
|
||||
{ .start = 0x400000, .end = KERNEL_OFFSET },
|
||||
MemoryRegion::Type::PRIVATE,
|
||||
PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present
|
||||
));
|
||||
|
||||
uintptr_t temp = argv_range->vaddr() + sizeof(char*) * 2;
|
||||
argv_range->copy_from(0, (uint8_t*)&temp, sizeof(char*));
|
||||
uintptr_t temp = argv_region->vaddr() + sizeof(char*) * 2;
|
||||
MUST(argv_region->copy_data_to_region(0, (const uint8_t*)&temp, sizeof(char*)));
|
||||
|
||||
temp = 0;
|
||||
argv_range->copy_from(sizeof(char*), (uint8_t*)&temp, sizeof(char*));
|
||||
MUST(argv_region->copy_data_to_region(sizeof(char*), (const uint8_t*)&temp, sizeof(char*)));
|
||||
|
||||
argv_range->copy_from(sizeof(char*) * 2, (const uint8_t*)path.data(), path.size());
|
||||
MUST(argv_region->copy_data_to_region(sizeof(char*) * 2, (const uint8_t*)path.data(), path.size()));
|
||||
|
||||
MUST(process->m_mapped_ranges.push_back(BAN::move(argv_range)));
|
||||
MUST(process->m_mapped_regions.push_back(BAN::move(argv_region)));
|
||||
}
|
||||
|
||||
process->m_userspace_info.argc = 1;
|
||||
|
@ -172,7 +174,8 @@ namespace Kernel
|
|||
Process::~Process()
|
||||
{
|
||||
ASSERT(m_threads.empty());
|
||||
ASSERT(m_mapped_ranges.empty());
|
||||
ASSERT(m_mapped_regions.empty());
|
||||
ASSERT(!m_loadable_elf);
|
||||
ASSERT(m_exit_status.waiting == 0);
|
||||
ASSERT(&PageTable::current() != m_page_table.ptr());
|
||||
}
|
||||
|
@ -205,7 +208,8 @@ namespace Kernel
|
|||
m_open_file_descriptors.close_all();
|
||||
|
||||
// NOTE: We must unmap ranges while the page table is still alive
|
||||
m_mapped_ranges.clear();
|
||||
m_mapped_regions.clear();
|
||||
m_loadable_elf.clear();
|
||||
}
|
||||
|
||||
void Process::on_thread_exit(Thread& thread)
|
||||
|
@ -318,10 +322,10 @@ namespace Kernel
|
|||
OpenFileDescriptorSet open_file_descriptors(m_credentials);
|
||||
TRY(open_file_descriptors.clone_from(m_open_file_descriptors));
|
||||
|
||||
BAN::Vector<BAN::UniqPtr<VirtualRange>> mapped_ranges;
|
||||
TRY(mapped_ranges.reserve(m_mapped_ranges.size()));
|
||||
for (auto& mapped_range : m_mapped_ranges)
|
||||
MUST(mapped_ranges.push_back(TRY(mapped_range->clone(*page_table))));
|
||||
BAN::Vector<BAN::UniqPtr<MemoryRegion>> mapped_regions;
|
||||
TRY(mapped_regions.reserve(m_mapped_regions.size()));
|
||||
for (auto& mapped_region : m_mapped_regions)
|
||||
MUST(mapped_regions.push_back(TRY(mapped_region->clone(*page_table))));
|
||||
|
||||
auto loadable_elf = TRY(m_loadable_elf->clone(*page_table));
|
||||
|
||||
|
@ -330,7 +334,7 @@ namespace Kernel
|
|||
forked->m_working_directory = BAN::move(working_directory);
|
||||
forked->m_page_table = BAN::move(page_table);
|
||||
forked->m_open_file_descriptors = BAN::move(open_file_descriptors);
|
||||
forked->m_mapped_ranges = BAN::move(mapped_ranges);
|
||||
forked->m_mapped_regions = BAN::move(mapped_regions);
|
||||
forked->m_loadable_elf = BAN::move(loadable_elf);
|
||||
forked->m_is_userspace = m_is_userspace;
|
||||
forked->m_userspace_info = m_userspace_info;
|
||||
|
@ -373,7 +377,7 @@ namespace Kernel
|
|||
|
||||
m_open_file_descriptors.close_cloexec();
|
||||
|
||||
m_mapped_ranges.clear();
|
||||
m_mapped_regions.clear();
|
||||
m_loadable_elf.clear();
|
||||
|
||||
m_loadable_elf = TRY(load_elf_for_exec(m_credentials, executable_path, m_working_directory, page_table()));
|
||||
|
@ -387,8 +391,8 @@ namespace Kernel
|
|||
ASSERT(&Process::current() == this);
|
||||
|
||||
// allocate memory on the new process for arguments and environment
|
||||
auto create_range =
|
||||
[&](const auto& container) -> BAN::UniqPtr<VirtualRange>
|
||||
auto create_region =
|
||||
[&](BAN::Span<BAN::String> container) -> BAN::ErrorOr<BAN::UniqPtr<MemoryRegion>>
|
||||
{
|
||||
size_t bytes = sizeof(char*);
|
||||
for (auto& elem : container)
|
||||
|
@ -397,36 +401,36 @@ namespace Kernel
|
|||
if (auto rem = bytes % PAGE_SIZE)
|
||||
bytes += PAGE_SIZE - rem;
|
||||
|
||||
auto range = MUST(VirtualRange::create_to_vaddr_range(
|
||||
auto region = TRY(MemoryBackedRegion::create(
|
||||
page_table(),
|
||||
0x400000, KERNEL_OFFSET,
|
||||
bytes,
|
||||
PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present,
|
||||
true
|
||||
{ .start = 0x400000, .end = KERNEL_OFFSET },
|
||||
MemoryRegion::Type::PRIVATE,
|
||||
PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present
|
||||
));
|
||||
|
||||
size_t data_offset = sizeof(char*) * (container.size() + 1);
|
||||
for (size_t i = 0; i < container.size(); i++)
|
||||
{
|
||||
uintptr_t ptr_addr = range->vaddr() + data_offset;
|
||||
range->copy_from(sizeof(char*) * i, (const uint8_t*)&ptr_addr, sizeof(char*));
|
||||
range->copy_from(data_offset, (const uint8_t*)container[i].data(), container[i].size());
|
||||
uintptr_t ptr_addr = region->vaddr() + data_offset;
|
||||
TRY(region->copy_data_to_region(sizeof(char*) * i, (const uint8_t*)&ptr_addr, sizeof(char*)));
|
||||
TRY(region->copy_data_to_region(data_offset, (const uint8_t*)container[i].data(), container[i].size()));
|
||||
data_offset += container[i].size() + 1;
|
||||
}
|
||||
|
||||
uintptr_t null = 0;
|
||||
range->copy_from(sizeof(char*) * container.size(), (const uint8_t*)&null, sizeof(char*));
|
||||
TRY(region->copy_data_to_region(sizeof(char*) * container.size(), (const uint8_t*)&null, sizeof(char*)));
|
||||
|
||||
return BAN::move(range);
|
||||
return BAN::UniqPtr<MemoryRegion>(BAN::move(region));
|
||||
};
|
||||
|
||||
auto argv_range = create_range(str_argv);
|
||||
m_userspace_info.argv = (char**)argv_range->vaddr();
|
||||
MUST(m_mapped_ranges.push_back(BAN::move(argv_range)));
|
||||
auto argv_region = MUST(create_region(str_argv.span()));
|
||||
m_userspace_info.argv = (char**)argv_region->vaddr();
|
||||
MUST(m_mapped_regions.push_back(BAN::move(argv_region)));
|
||||
|
||||
auto envp_range = create_range(str_envp);
|
||||
m_userspace_info.envp = (char**)envp_range->vaddr();
|
||||
MUST(m_mapped_ranges.push_back(BAN::move(envp_range)));
|
||||
auto envp_region = MUST(create_region(str_envp.span()));
|
||||
m_userspace_info.envp = (char**)envp_region->vaddr();
|
||||
MUST(m_mapped_regions.push_back(BAN::move(envp_region)));
|
||||
|
||||
m_userspace_info.argc = str_argv.size();
|
||||
|
||||
|
@ -489,7 +493,10 @@ namespace Kernel
|
|||
return BAN::Error::from_errno(ECHILD);
|
||||
|
||||
pid_t ret = target->pid();
|
||||
*stat_loc = target->block_until_exit();
|
||||
|
||||
int stat = target->block_until_exit();
|
||||
if (stat_loc)
|
||||
*stat_loc = stat;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -544,11 +551,11 @@ namespace Kernel
|
|||
return true;
|
||||
}
|
||||
|
||||
for (auto& mapped_range : m_mapped_ranges)
|
||||
for (auto& region : m_mapped_regions)
|
||||
{
|
||||
if (!mapped_range->contains(address))
|
||||
if (!region->contains(address))
|
||||
continue;
|
||||
TRY(mapped_range->allocate_page_for_demand_paging(address));
|
||||
TRY(region->allocate_page_containing(address));
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -807,34 +814,69 @@ namespace Kernel
|
|||
if (args->prot != PROT_NONE && args->prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
|
||||
return BAN::Error::from_errno(EINVAL);
|
||||
|
||||
PageTable::flags_t flags = PageTable::Flags::UserSupervisor;
|
||||
if (args->prot & PROT_READ)
|
||||
flags |= PageTable::Flags::Present;
|
||||
if (args->prot & PROT_WRITE)
|
||||
flags |= PageTable::Flags::ReadWrite | PageTable::Flags::Present;
|
||||
if (args->prot & PROT_EXEC)
|
||||
flags |= PageTable::Flags::Execute | PageTable::Flags::Present;
|
||||
if (args->flags & MAP_FIXED)
|
||||
return BAN::Error::from_errno(ENOTSUP);
|
||||
|
||||
if (args->flags == (MAP_ANONYMOUS | MAP_PRIVATE))
|
||||
if (!(args->flags & MAP_PRIVATE) == !(args->flags & MAP_SHARED))
|
||||
return BAN::Error::from_errno(EINVAL);
|
||||
auto region_type = (args->flags & MAP_PRIVATE) ? MemoryRegion::Type::PRIVATE : MemoryRegion::Type::SHARED;
|
||||
|
||||
PageTable::flags_t page_flags = 0;
|
||||
if (args->prot & PROT_READ)
|
||||
page_flags |= PageTable::Flags::Present;
|
||||
if (args->prot & PROT_WRITE)
|
||||
page_flags |= PageTable::Flags::ReadWrite | PageTable::Flags::Present;
|
||||
if (args->prot & PROT_EXEC)
|
||||
page_flags |= PageTable::Flags::Execute | PageTable::Flags::Present;
|
||||
|
||||
if (page_flags == 0)
|
||||
page_flags = PageTable::Flags::Reserved;
|
||||
else
|
||||
page_flags |= PageTable::Flags::UserSupervisor;
|
||||
|
||||
if (args->flags & MAP_ANONYMOUS)
|
||||
{
|
||||
if (args->addr != nullptr)
|
||||
return BAN::Error::from_errno(ENOTSUP);
|
||||
if (args->off != 0)
|
||||
return BAN::Error::from_errno(EINVAL);
|
||||
if (args->len % PAGE_SIZE != 0)
|
||||
return BAN::Error::from_errno(EINVAL);
|
||||
|
||||
auto range = TRY(VirtualRange::create_to_vaddr_range(
|
||||
auto region = TRY(MemoryBackedRegion::create(
|
||||
page_table(),
|
||||
0x400000, KERNEL_OFFSET,
|
||||
args->len,
|
||||
PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present,
|
||||
false
|
||||
{ .start = 0x400000, .end = KERNEL_OFFSET },
|
||||
region_type, page_flags
|
||||
));
|
||||
|
||||
LockGuard _(m_lock);
|
||||
TRY(m_mapped_ranges.push_back(BAN::move(range)));
|
||||
return m_mapped_ranges.back()->vaddr();
|
||||
TRY(m_mapped_regions.push_back(BAN::move(region)));
|
||||
return m_mapped_regions.back()->vaddr();
|
||||
}
|
||||
|
||||
auto inode = TRY(m_open_file_descriptors.inode_of(args->fildes));
|
||||
if (inode->mode().ifreg())
|
||||
{
|
||||
if (args->addr != nullptr)
|
||||
return BAN::Error::from_errno(ENOTSUP);
|
||||
|
||||
auto inode_flags = TRY(m_open_file_descriptors.flags_of(args->fildes));
|
||||
if (!(inode_flags & O_RDONLY))
|
||||
return BAN::Error::from_errno(EACCES);
|
||||
if (region_type == MemoryRegion::Type::SHARED)
|
||||
if ((args->prot & PROT_WRITE) && !(inode_flags & O_WRONLY))
|
||||
return BAN::Error::from_errno(EACCES);
|
||||
|
||||
auto region = TRY(FileBackedRegion::create(
|
||||
inode,
|
||||
page_table(),
|
||||
args->off, args->len,
|
||||
{ .start = 0x400000, .end = KERNEL_OFFSET },
|
||||
region_type, page_flags
|
||||
));
|
||||
|
||||
LockGuard _(m_lock);
|
||||
TRY(m_mapped_regions.push_back(BAN::move(region)));
|
||||
return m_mapped_regions.back()->vaddr();
|
||||
}
|
||||
|
||||
return BAN::Error::from_errno(ENOTSUP);
|
||||
|
@ -851,13 +893,10 @@ namespace Kernel
|
|||
|
||||
LockGuard _(m_lock);
|
||||
|
||||
for (size_t i = 0; i < m_mapped_ranges.size(); i++)
|
||||
{
|
||||
auto& range = m_mapped_ranges[i];
|
||||
if (vaddr + len < range->vaddr() || vaddr >= range->vaddr() + range->size())
|
||||
continue;
|
||||
m_mapped_ranges.remove(i);
|
||||
}
|
||||
// FIXME: We should only map partial regions
|
||||
for (size_t i = 0; i < m_mapped_regions.size(); i++)
|
||||
if (m_mapped_regions[i]->overlaps(vaddr, len))
|
||||
m_mapped_regions.remove(i);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1341,10 +1380,11 @@ namespace Kernel
|
|||
return;
|
||||
|
||||
// FIXME: should we allow cross mapping access?
|
||||
for (auto& mapped_range : m_mapped_ranges)
|
||||
if (vaddr >= mapped_range->vaddr() && vaddr + size <= mapped_range->vaddr() + mapped_range->size())
|
||||
for (auto& mapped_region : m_mapped_regions)
|
||||
mapped_region->contains_fully(vaddr, size);
|
||||
return;
|
||||
|
||||
// FIXME: elf should contain full range [vaddr, vaddr + size)
|
||||
if (m_loadable_elf->contains(vaddr))
|
||||
return;
|
||||
|
||||
|
|
|
@ -54,10 +54,12 @@ static bool allocate_pool(size_t pool_index)
|
|||
assert(pool.start == nullptr);
|
||||
|
||||
// allocate memory for pool
|
||||
pool.start = (uint8_t*)mmap(nullptr, pool.size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
|
||||
if (pool.start == nullptr)
|
||||
void* new_pool = mmap(nullptr, pool.size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
|
||||
if (new_pool == MAP_FAILED)
|
||||
return false;
|
||||
|
||||
pool.start = (uint8_t*)new_pool;
|
||||
|
||||
// initialize pool to single unallocated node
|
||||
auto* node = (malloc_node_t*)pool.start;
|
||||
node->allocated = false;
|
||||
|
|
|
@ -9,11 +9,12 @@ void* mmap(void* addr, size_t len, int prot, int flags, int fildes, off_t off)
|
|||
.len = len,
|
||||
.prot = prot,
|
||||
.flags = flags,
|
||||
.fildes = fildes,
|
||||
.off = off
|
||||
};
|
||||
long ret = syscall(SYS_MMAP, &args);
|
||||
if (ret == -1)
|
||||
return nullptr;
|
||||
return MAP_FAILED;
|
||||
return (void*)ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -4,11 +4,13 @@ project(userspace CXX)
|
|||
|
||||
set(USERSPACE_PROJECTS
|
||||
cat
|
||||
cat-mmap
|
||||
dd
|
||||
echo
|
||||
id
|
||||
init
|
||||
ls
|
||||
mmap-shared-test
|
||||
poweroff
|
||||
Shell
|
||||
snake
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
cmake_minimum_required(VERSION 3.26)
|
||||
|
||||
project(cat-mmap CXX)
|
||||
|
||||
set(SOURCES
|
||||
main.cpp
|
||||
)
|
||||
|
||||
add_executable(cat-mmap ${SOURCES})
|
||||
target_compile_options(cat-mmap PUBLIC -O2 -g)
|
||||
target_link_libraries(cat-mmap PUBLIC libc)
|
||||
|
||||
add_custom_target(cat-mmap-install
|
||||
COMMAND sudo cp ${CMAKE_CURRENT_BINARY_DIR}/cat-mmap ${BANAN_BIN}/
|
||||
DEPENDS cat-mmap
|
||||
USES_TERMINAL
|
||||
)
|
|
@ -0,0 +1,62 @@
|
|||
#include <fcntl.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
bool cat_file(int fd)
|
||||
{
|
||||
struct stat st;
|
||||
if (fstat(fd, &st) == -1)
|
||||
{
|
||||
perror("stat");
|
||||
return false;
|
||||
}
|
||||
|
||||
void* addr = mmap(nullptr, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
|
||||
if (addr == MAP_FAILED)
|
||||
{
|
||||
perror("mmap");
|
||||
return false;
|
||||
}
|
||||
|
||||
ssize_t nwrite = write(STDOUT_FILENO, addr, st.st_size);
|
||||
if (nwrite == -1)
|
||||
perror("write");
|
||||
|
||||
if (munmap(addr, st.st_size) == -1)
|
||||
{
|
||||
perror("munmap");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (argc > 1)
|
||||
{
|
||||
for (int i = 1; i < argc; i++)
|
||||
{
|
||||
int fd = open(argv[i], O_RDONLY);
|
||||
if (fd == -1)
|
||||
{
|
||||
perror(argv[i]);
|
||||
ret = 1;
|
||||
continue;
|
||||
}
|
||||
if (!cat_file(fd))
|
||||
ret = 1;
|
||||
close(fd);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!cat_file(STDIN_FILENO))
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
cmake_minimum_required(VERSION 3.26)
|
||||
|
||||
project(mmap-shared-test CXX)
|
||||
|
||||
set(SOURCES
|
||||
main.cpp
|
||||
)
|
||||
|
||||
add_executable(mmap-shared-test ${SOURCES})
|
||||
target_compile_options(mmap-shared-test PUBLIC -O2 -g)
|
||||
target_link_libraries(mmap-shared-test PUBLIC libc)
|
||||
|
||||
add_custom_target(mmap-shared-test-install
|
||||
COMMAND sudo cp ${CMAKE_CURRENT_BINARY_DIR}/mmap-shared-test ${BANAN_BIN}/
|
||||
DEPENDS mmap-shared-test
|
||||
USES_TERMINAL
|
||||
)
|
|
@ -0,0 +1,115 @@
|
|||
#include <fcntl.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#define FILE_NAME "test-file"
|
||||
#define FILE_SIZE (1024*1024)
|
||||
|
||||
int prepare_file()
|
||||
{
|
||||
int fd = open(FILE_NAME, O_RDWR | O_TRUNC | O_CREAT, 0666);
|
||||
if (fd == -1)
|
||||
{
|
||||
perror("open");
|
||||
return 1;
|
||||
}
|
||||
|
||||
void* null_buffer = malloc(FILE_SIZE);
|
||||
memset(null_buffer, 0x00, FILE_SIZE);
|
||||
|
||||
if (write(fd, null_buffer, FILE_SIZE) == -1)
|
||||
{
|
||||
perror("write");
|
||||
return 1;
|
||||
}
|
||||
|
||||
free(null_buffer);
|
||||
close(fd);
|
||||
|
||||
printf("file created\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int job1()
|
||||
{
|
||||
int fd = open(FILE_NAME, O_RDONLY);
|
||||
if (fd == -1)
|
||||
{
|
||||
perror("open");
|
||||
return 1;
|
||||
}
|
||||
|
||||
void* addr = mmap(nullptr, FILE_SIZE, PROT_READ, MAP_SHARED, fd, 0);
|
||||
if (addr == MAP_FAILED)
|
||||
{
|
||||
perror("mmap");
|
||||
return 1;
|
||||
}
|
||||
|
||||
sleep(4);
|
||||
|
||||
size_t sum = 0;
|
||||
for (int i = 0; i < FILE_SIZE; i++)
|
||||
sum += ((uint8_t*)addr)[i];
|
||||
|
||||
munmap(addr, FILE_SIZE);
|
||||
close(fd);
|
||||
|
||||
printf("sum: %zu\n", sum);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int job2()
|
||||
{
|
||||
sleep(2);
|
||||
|
||||
int fd = open(FILE_NAME, O_RDWR);
|
||||
if (fd == -1)
|
||||
{
|
||||
perror("open");
|
||||
return 1;
|
||||
}
|
||||
|
||||
void* addr = mmap(nullptr, FILE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
|
||||
if (addr == MAP_FAILED)
|
||||
{
|
||||
perror("mmap");
|
||||
return 1;
|
||||
}
|
||||
|
||||
memset(addr, 'a', FILE_SIZE);
|
||||
|
||||
munmap(addr, FILE_SIZE);
|
||||
close(fd);
|
||||
|
||||
printf("expecting: %zu\n", (size_t)'a' * FILE_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
if (int ret = prepare_file())
|
||||
return ret;
|
||||
|
||||
pid_t pid = fork();
|
||||
if (pid == 0)
|
||||
return job1();
|
||||
|
||||
if (pid == -1)
|
||||
{
|
||||
perror("fork");
|
||||
return 1;
|
||||
}
|
||||
|
||||
int ret = job2();
|
||||
waitpid(pid, nullptr, 0);
|
||||
|
||||
return ret;
|
||||
}
|
Loading…
Reference in New Issue