Kernel: Allow private file mappings past file end

This is just to make memory mapping ELF files easier :D
This commit is contained in:
Bananymous 2024-09-05 14:48:42 +03:00
parent e8bcebfb8e
commit de35cec2e1
1 changed files with 37 additions and 19 deletions

View File

@ -13,8 +13,20 @@ namespace Kernel
if (offset < 0 || offset % PAGE_SIZE || size == 0) if (offset < 0 || offset % PAGE_SIZE || size == 0)
return BAN::Error::from_errno(EINVAL); return BAN::Error::from_errno(EINVAL);
if (size > (size_t)inode->size() || (size_t)offset > (size_t)inode->size() - size) switch (type)
{
case Type::PRIVATE:
if (offset >= inode->size())
return BAN::Error::from_errno(EOVERFLOW); return BAN::Error::from_errno(EOVERFLOW);
break;
case Type::SHARED:
if ((size > (size_t)inode->size() || (size_t)offset > (size_t)inode->size() - size))
return BAN::Error::from_errno(EOVERFLOW);
break;
default:
ASSERT_NOT_REACHED();
break;
}
auto* region_ptr = new FileBackedRegion(inode, page_table, offset, size, type, flags); auto* region_ptr = new FileBackedRegion(inode, page_table, offset, size, type, flags);
if (region_ptr == nullptr) if (region_ptr == nullptr)
@ -112,7 +124,7 @@ namespace Kernel
ASSERT(contains(address)); ASSERT(contains(address));
// Check if address is already mapped // Check if address is already mapped
vaddr_t vaddr = address & PAGE_ADDR_MASK; const vaddr_t vaddr = address & PAGE_ADDR_MASK;
if (m_page_table.physical_address_of(vaddr) != 0) if (m_page_table.physical_address_of(vaddr) != 0)
return false; return false;
@ -126,10 +138,14 @@ namespace Kernel
// Temporarily force mapping to be writable so kernel can write to it // Temporarily force mapping to be writable so kernel can write to it
m_page_table.map_page_at(paddr, vaddr, m_flags | PageTable::Flags::ReadWrite); m_page_table.map_page_at(paddr, vaddr, m_flags | PageTable::Flags::ReadWrite);
size_t file_offset = m_offset + (vaddr - m_vaddr);
size_t bytes = BAN::Math::min<size_t>(m_size - file_offset, PAGE_SIZE);
ASSERT(&PageTable::current() == &m_page_table); ASSERT(&PageTable::current() == &m_page_table);
memset(reinterpret_cast<void*>(vaddr), 0x00, PAGE_SIZE);
const size_t file_offset = m_offset + (vaddr - m_vaddr);
if (file_offset < static_cast<size_t>(m_inode->size()))
{
const size_t bytes = BAN::Math::min<size_t>(BAN::Math::min<size_t>(m_offset + m_size, m_inode->size()) - file_offset, PAGE_SIZE);
auto read_ret = m_inode->read(file_offset, BAN::ByteSpan((uint8_t*)vaddr, bytes)); auto read_ret = m_inode->read(file_offset, BAN::ByteSpan((uint8_t*)vaddr, bytes));
if (read_ret.is_error()) if (read_ret.is_error())
@ -146,6 +162,7 @@ namespace Kernel
m_page_table.unmap_page(vaddr); m_page_table.unmap_page(vaddr);
return BAN::Error::from_errno(EIO); return BAN::Error::from_errno(EIO);
} }
}
// Disable writable if not wanted // Disable writable if not wanted
if (!(m_flags & PageTable::Flags::ReadWrite)) if (!(m_flags & PageTable::Flags::ReadWrite))
@ -172,7 +189,8 @@ namespace Kernel
TRY(m_inode->read(offset, BAN::ByteSpan(m_shared_data->page_buffer, bytes))); TRY(m_inode->read(offset, BAN::ByteSpan(m_shared_data->page_buffer, bytes)));
PageTable::with_fast_page(pages[page_index], [&] { PageTable::with_fast_page(pages[page_index], [&] {
memcpy(PageTable::fast_page_as_ptr(), m_shared_data->page_buffer, PAGE_SIZE); memcpy(PageTable::fast_page_as_ptr(), m_shared_data->page_buffer, bytes);
memset(PageTable::fast_page_as_ptr(bytes), 0x00, PAGE_SIZE - bytes);
}); });
} }