forked from Bananymous/banan-os
Kernel: Cleanup VirtualRange code
Virtual range does not need to look into the page table for mapped physcial pages. It can (and should) just keep track of them itself.
This commit is contained in:
parent
02051ed60f
commit
96c7e9e29d
|
@ -30,19 +30,20 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<void> allocate_page_for_demand_paging(vaddr_t address);
|
||||
|
||||
void copy_from(size_t offset, const uint8_t* buffer, size_t bytes);
|
||||
private:
|
||||
VirtualRange(PageTable&, bool preallocated, vaddr_t, size_t, PageTable::flags_t);
|
||||
BAN::ErrorOr<void> initialize();
|
||||
|
||||
private:
|
||||
VirtualRange(PageTable&, bool preallocated);
|
||||
PageTable& m_page_table;
|
||||
const bool m_preallocated;
|
||||
const vaddr_t m_vaddr;
|
||||
const size_t m_size;
|
||||
const PageTable::flags_t m_flags;
|
||||
BAN::Vector<paddr_t> m_paddrs;
|
||||
SpinLock m_lock;
|
||||
|
||||
void set_zero();
|
||||
|
||||
private:
|
||||
PageTable& m_page_table;
|
||||
const bool m_preallocated;
|
||||
vaddr_t m_vaddr { 0 };
|
||||
size_t m_size { 0 };
|
||||
PageTable::flags_t m_flags { 0 };
|
||||
friend class BAN::UniqPtr<VirtualRange>;
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -11,36 +11,9 @@ namespace Kernel
|
|||
ASSERT(vaddr % PAGE_SIZE == 0);
|
||||
ASSERT(vaddr > 0);
|
||||
|
||||
VirtualRange* result_ptr = new VirtualRange(page_table, preallocate_pages);
|
||||
if (result_ptr == nullptr)
|
||||
return BAN::Error::from_errno(ENOMEM);
|
||||
|
||||
auto result = BAN::UniqPtr<VirtualRange>::adopt(result_ptr);
|
||||
result->m_vaddr = vaddr;
|
||||
result->m_size = size;
|
||||
result->m_flags = flags;
|
||||
|
||||
auto result = TRY(BAN::UniqPtr<VirtualRange>::create(page_table, preallocate_pages, vaddr, size, flags));
|
||||
ASSERT(page_table.reserve_range(vaddr, size));
|
||||
|
||||
if (!preallocate_pages)
|
||||
return result;
|
||||
|
||||
size_t needed_pages = size / PAGE_SIZE;
|
||||
for (size_t i = 0; i < needed_pages; i++)
|
||||
{
|
||||
paddr_t paddr = Heap::get().take_free_page();
|
||||
if (paddr == 0)
|
||||
{
|
||||
for (size_t j = 0; j < i; j++)
|
||||
Heap::get().release_page(page_table.physical_address_of(vaddr + j * PAGE_SIZE));
|
||||
page_table.unmap_range(vaddr, size);
|
||||
result->m_vaddr = 0;
|
||||
return BAN::Error::from_errno(ENOMEM);
|
||||
}
|
||||
page_table.map_page_at(paddr, vaddr + i * PAGE_SIZE, flags);
|
||||
}
|
||||
|
||||
result->set_zero();
|
||||
TRY(result->initialize());
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -61,34 +34,71 @@ namespace Kernel
|
|||
|
||||
vaddr_t vaddr = page_table.reserve_free_contiguous_pages(size / PAGE_SIZE, vaddr_start, vaddr_end);
|
||||
if (vaddr == 0)
|
||||
{
|
||||
dprintln("no free {} byte area", size);
|
||||
return BAN::Error::from_errno(ENOMEM);
|
||||
}
|
||||
ASSERT(vaddr >= vaddr_start);
|
||||
ASSERT(vaddr + size <= vaddr_end);
|
||||
|
||||
SpinLockGuard _(page_table);
|
||||
page_table.unmap_range(vaddr, size); // We have to unmap here to allow reservation in create_to_vaddr()
|
||||
return create_to_vaddr(page_table, vaddr, size, flags, preallocate_pages);
|
||||
auto result_or_error = BAN::UniqPtr<VirtualRange>::create(page_table, preallocate_pages, vaddr, size, flags);
|
||||
if (result_or_error.is_error())
|
||||
{
|
||||
page_table.unmap_range(vaddr, size);
|
||||
return result_or_error.release_error();
|
||||
}
|
||||
|
||||
auto result = result_or_error.release_value();
|
||||
TRY(result->initialize());
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
VirtualRange::VirtualRange(PageTable& page_table, bool preallocated)
|
||||
VirtualRange::VirtualRange(PageTable& page_table, bool preallocated, vaddr_t vaddr, size_t size, PageTable::flags_t flags)
|
||||
: m_page_table(page_table)
|
||||
, m_preallocated(preallocated)
|
||||
, m_vaddr(vaddr)
|
||||
, m_size(size)
|
||||
, m_flags(flags)
|
||||
{ }
|
||||
|
||||
VirtualRange::~VirtualRange()
|
||||
{
|
||||
if (m_vaddr == 0)
|
||||
return;
|
||||
ASSERT(m_vaddr);
|
||||
m_page_table.unmap_range(m_vaddr, m_size);
|
||||
|
||||
for (size_t offset = 0; offset < size(); offset += PAGE_SIZE)
|
||||
{
|
||||
paddr_t paddr = m_page_table.physical_address_of(vaddr() + offset);
|
||||
if (paddr)
|
||||
for (paddr_t paddr : m_paddrs)
|
||||
if (paddr != 0)
|
||||
Heap::get().release_page(paddr);
|
||||
}
|
||||
|
||||
BAN::ErrorOr<void> VirtualRange::initialize()
|
||||
{
|
||||
TRY(m_paddrs.resize(m_size / PAGE_SIZE, 0));
|
||||
|
||||
if (!m_preallocated)
|
||||
return {};
|
||||
|
||||
const size_t page_count = m_size / PAGE_SIZE;
|
||||
for (size_t i = 0; i < page_count; i++)
|
||||
{
|
||||
m_paddrs[i] = Heap::get().take_free_page();
|
||||
if (m_paddrs[i] == 0)
|
||||
return BAN::Error::from_errno(ENOMEM);
|
||||
m_page_table.map_page_at(m_paddrs[i], m_vaddr + i * PAGE_SIZE, m_flags);
|
||||
}
|
||||
m_page_table.unmap_range(vaddr(), size());
|
||||
|
||||
if (&PageTable::current() == &m_page_table || &PageTable::kernel() == &m_page_table)
|
||||
memset(reinterpret_cast<void*>(m_vaddr), 0, m_size);
|
||||
else
|
||||
{
|
||||
const size_t page_count = m_size / PAGE_SIZE;
|
||||
for (size_t i = 0; i < page_count; i++)
|
||||
{
|
||||
PageTable::with_fast_page(m_paddrs[i], [&] {
|
||||
memset(PageTable::fast_page_as_ptr(), 0, PAGE_SIZE);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::clone(PageTable& page_table)
|
||||
|
@ -96,92 +106,51 @@ namespace Kernel
|
|||
ASSERT(&PageTable::current() == &m_page_table);
|
||||
ASSERT(&m_page_table != &page_table);
|
||||
|
||||
auto result = TRY(create_to_vaddr(page_table, vaddr(), size(), flags(), m_preallocated));
|
||||
auto result = TRY(create_to_vaddr(page_table, m_vaddr, m_size, m_flags, m_preallocated));
|
||||
|
||||
SpinLockGuard _(m_page_table);
|
||||
for (size_t offset = 0; offset < size(); offset += PAGE_SIZE)
|
||||
SpinLockGuard _(m_lock);
|
||||
|
||||
const size_t page_count = m_size / PAGE_SIZE;
|
||||
for (size_t i = 0; i < page_count; i++)
|
||||
{
|
||||
if (!m_preallocated && m_page_table.physical_address_of(vaddr() + offset))
|
||||
if (m_paddrs[i] == 0)
|
||||
continue;
|
||||
if (!result->m_preallocated)
|
||||
{
|
||||
paddr_t paddr = Heap::get().take_free_page();
|
||||
if (paddr == 0)
|
||||
result->m_paddrs[i] = Heap::get().take_free_page();
|
||||
if (result->m_paddrs[i] == 0)
|
||||
return BAN::Error::from_errno(ENOMEM);
|
||||
result->m_page_table.map_page_at(paddr, vaddr() + offset, m_flags);
|
||||
result->m_page_table.map_page_at(result->m_paddrs[i], m_vaddr + i * PAGE_SIZE, m_flags);
|
||||
}
|
||||
|
||||
PageTable::with_fast_page(result->m_page_table.physical_address_of(vaddr() + offset), [&] {
|
||||
memcpy(PageTable::fast_page_as_ptr(), (void*)(vaddr() + offset), PAGE_SIZE);
|
||||
PageTable::with_fast_page(result->m_paddrs[i], [&] {
|
||||
memcpy(PageTable::fast_page_as_ptr(), reinterpret_cast<void*>(m_vaddr + i * PAGE_SIZE), PAGE_SIZE);
|
||||
});
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
BAN::ErrorOr<void> VirtualRange::allocate_page_for_demand_paging(vaddr_t address)
|
||||
BAN::ErrorOr<void> VirtualRange::allocate_page_for_demand_paging(vaddr_t vaddr)
|
||||
{
|
||||
ASSERT(!m_preallocated);
|
||||
ASSERT(contains(address));
|
||||
ASSERT(vaddr % PAGE_SIZE == 0);
|
||||
ASSERT(contains(vaddr));
|
||||
ASSERT(&PageTable::current() == &m_page_table);
|
||||
|
||||
vaddr_t vaddr = address & PAGE_ADDR_MASK;
|
||||
ASSERT(m_page_table.physical_address_of(vaddr) == 0);
|
||||
const size_t index = (vaddr - m_vaddr) / PAGE_SIZE;
|
||||
ASSERT(m_paddrs[index] == 0);
|
||||
|
||||
paddr_t paddr = Heap::get().take_free_page();
|
||||
if (paddr == 0)
|
||||
SpinLockGuard _(m_lock);
|
||||
|
||||
m_paddrs[index] = Heap::get().take_free_page();
|
||||
if (m_paddrs[index] == 0)
|
||||
return BAN::Error::from_errno(ENOMEM);
|
||||
|
||||
m_page_table.map_page_at(paddr, vaddr, m_flags);
|
||||
memset((void*)vaddr, 0x00, PAGE_SIZE);
|
||||
m_page_table.map_page_at(m_paddrs[index], vaddr, m_flags);
|
||||
memset(reinterpret_cast<void*>(vaddr), 0, PAGE_SIZE);
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
void VirtualRange::set_zero()
|
||||
{
|
||||
if (&PageTable::current() == &m_page_table || &PageTable::kernel() == &m_page_table)
|
||||
{
|
||||
memset((void*)vaddr(), 0, size());
|
||||
return;
|
||||
}
|
||||
|
||||
for (size_t offset = 0; offset < size(); offset += PAGE_SIZE)
|
||||
{
|
||||
PageTable::with_fast_page(m_page_table.physical_address_of(vaddr() + offset), [&] {
|
||||
memset(PageTable::fast_page_as_ptr(), 0x00, PAGE_SIZE);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
void VirtualRange::copy_from(size_t offset, const uint8_t* buffer, size_t bytes)
|
||||
{
|
||||
if (bytes == 0)
|
||||
return;
|
||||
|
||||
// Verify no overflow
|
||||
ASSERT(bytes <= size());
|
||||
ASSERT(offset <= size());
|
||||
ASSERT(offset <= size() - bytes);
|
||||
|
||||
if (&PageTable::current() == &m_page_table || &PageTable::kernel() == &m_page_table)
|
||||
{
|
||||
memcpy((void*)(vaddr() + offset), buffer, bytes);
|
||||
return;
|
||||
}
|
||||
|
||||
size_t page_offset = offset % PAGE_SIZE;
|
||||
size_t page_index = offset / PAGE_SIZE;
|
||||
|
||||
while (bytes > 0)
|
||||
{
|
||||
PageTable::with_fast_page(m_page_table.physical_address_of(vaddr() + page_index * PAGE_SIZE), [&] {
|
||||
memcpy(PageTable::fast_page_as_ptr(page_offset), buffer, PAGE_SIZE - page_offset);
|
||||
});
|
||||
|
||||
buffer += PAGE_SIZE - page_offset;
|
||||
bytes -= PAGE_SIZE - page_offset;
|
||||
page_offset = 0;
|
||||
page_index++;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue