diff --git a/kernel/include/kernel/Memory/MemoryBackedRegion.h b/kernel/include/kernel/Memory/MemoryBackedRegion.h index de1790d6..718c0b9a 100644 --- a/kernel/include/kernel/Memory/MemoryBackedRegion.h +++ b/kernel/include/kernel/Memory/MemoryBackedRegion.h @@ -28,6 +28,20 @@ namespace Kernel private: MemoryBackedRegion(PageTable&, size_t size, Type, PageTable::flags_t, int status_flags); + + private: + struct PhysicalPage + { + PhysicalPage(paddr_t paddr) + : paddr(paddr) + { } + ~PhysicalPage(); + + BAN::Atomic ref_count { 1 }; + const paddr_t paddr; + }; + BAN::Vector m_physical_pages; + Mutex m_mutex; }; } diff --git a/kernel/kernel/Memory/MemoryBackedRegion.cpp b/kernel/kernel/Memory/MemoryBackedRegion.cpp index 34ca07eb..4b2bafd1 100644 --- a/kernel/kernel/Memory/MemoryBackedRegion.cpp +++ b/kernel/kernel/Memory/MemoryBackedRegion.cpp @@ -1,5 +1,6 @@ #include #include +#include namespace Kernel { @@ -14,6 +15,9 @@ namespace Kernel return BAN::Error::from_errno(ENOMEM); auto region = BAN::UniqPtr::adopt(region_ptr); + const size_t page_count = (size + PAGE_SIZE - 1) / PAGE_SIZE; + TRY(region->m_physical_pages.resize(page_count, nullptr)); + TRY(region->initialize(address_range)); return region; @@ -28,38 +32,75 @@ namespace Kernel { ASSERT(m_type == Type::PRIVATE); - size_t needed_pages = BAN::Math::div_round_up(m_size, PAGE_SIZE); - for (size_t i = 0; i < needed_pages; i++) - { - paddr_t paddr = m_page_table.physical_address_of(m_vaddr + i * PAGE_SIZE); - if (paddr != 0) - Heap::get().release_page(paddr); - } + for (auto* page : m_physical_pages) + if (page && --page->ref_count == 0) + delete page; + } + + MemoryBackedRegion::PhysicalPage::~PhysicalPage() + { + Heap::get().release_page(paddr); } BAN::ErrorOr MemoryBackedRegion::allocate_page_containing_impl(vaddr_t address, bool wants_write) { ASSERT(m_type == Type::PRIVATE); - ASSERT(contains(address)); - (void)wants_write; - // Check if address is already mapped - vaddr_t vaddr = address & PAGE_ADDR_MASK; - if (m_page_table.physical_address_of(vaddr) != 0) - return false; + const vaddr_t vaddr = address & PAGE_ADDR_MASK; - // Map new physcial page to address - paddr_t paddr = Heap::get().take_free_page(); + LockGuard _(m_mutex); + + auto& physical_page = m_physical_pages[(vaddr - m_vaddr) / PAGE_SIZE]; + + if (physical_page == nullptr) + { + const paddr_t paddr = Heap::get().take_free_page(); + if (paddr == 0) + return BAN::Error::from_errno(ENOMEM); + + physical_page = new PhysicalPage(paddr); + if (physical_page == nullptr) + return BAN::Error::from_errno(ENOMEM); + + m_page_table.map_page_at(paddr, vaddr, m_flags); + PageTable::with_fast_page(paddr, [] { + memset(PageTable::fast_page_as_ptr(), 0x00, PAGE_SIZE); + }); + + return true; + } + + if (auto is_only_ref = (physical_page->ref_count == 1); is_only_ref || !wants_write) + { + auto flags = m_flags; + if (!is_only_ref) + flags &= ~PageTable::ReadWrite; + + m_page_table.map_page_at(physical_page->paddr, vaddr, flags); + + return true; + } + + const paddr_t paddr = Heap::get().take_free_page(); if (paddr == 0) return BAN::Error::from_errno(ENOMEM); + + auto* new_physical_page = new PhysicalPage(paddr); + if (new_physical_page == nullptr) + return BAN::Error::from_errno(ENOMEM); + m_page_table.map_page_at(paddr, vaddr, m_flags); - // Zero out the new page - PageTable::with_fast_page(paddr, [&] { - memset(PageTable::fast_page_as_ptr(), 0x00, PAGE_SIZE); + ASSERT(&m_page_table == &PageTable::current()); + PageTable::with_fast_page(physical_page->paddr, [vaddr] { + memcpy(reinterpret_cast(vaddr), PageTable::fast_page_as_ptr(), PAGE_SIZE); }); + if (--physical_page->ref_count == 0) + delete physical_page; + physical_page = new_physical_page; + return true; } @@ -67,16 +108,20 @@ namespace Kernel { ASSERT(&PageTable::current() == &m_page_table); + LockGuard _(m_mutex); + const size_t aligned_size = (m_size + PAGE_SIZE - 1) & PAGE_ADDR_MASK; auto result = TRY(MemoryBackedRegion::create(new_page_table, m_size, { .start = m_vaddr, .end = m_vaddr + aligned_size }, m_type, m_flags, m_status_flags)); - for (size_t offset = 0; offset < m_size; offset += PAGE_SIZE) + if (writable()) + m_page_table.remove_writable_from_range(m_vaddr, m_size); + + for (size_t i = 0; i < m_physical_pages.size(); i++) { - paddr_t paddr = m_page_table.physical_address_of(m_vaddr + offset); - if (paddr == 0) + if (m_physical_pages[i] == nullptr) continue; - const size_t to_copy = BAN::Math::min(PAGE_SIZE, m_size - offset); - TRY(result->copy_data_to_region(offset, (const uint8_t*)(m_vaddr + offset), to_copy)); + result->m_physical_pages[i] = m_physical_pages[i]; + result->m_physical_pages[i]->ref_count++; } return BAN::UniqPtr(BAN::move(result)); @@ -87,20 +132,35 @@ namespace Kernel ASSERT(offset && offset < m_size); ASSERT(offset % PAGE_SIZE == 0); - auto* new_region = new MemoryBackedRegion(m_page_table, m_size - offset, m_type, m_flags, m_status_flags); - if (new_region == nullptr) + LockGuard _(m_mutex); + + auto* new_region_ptr = new MemoryBackedRegion(m_page_table, m_size - offset, m_type, m_flags, m_status_flags); + if (new_region_ptr == nullptr) return BAN::Error::from_errno(ENOMEM); + auto new_region = BAN::UniqPtr::adopt(new_region_ptr); + new_region->m_vaddr = m_vaddr + offset; + const size_t moved_pages = (m_size - offset + PAGE_SIZE - 1) / PAGE_SIZE; + TRY(new_region->m_physical_pages.resize(moved_pages)); + + const size_t remaining_pages = m_physical_pages.size() - moved_pages; + + for (size_t i = 0; i < moved_pages; i++) + new_region->m_physical_pages[i] = m_physical_pages[remaining_pages + i]; + MUST(m_physical_pages.resize(remaining_pages)); + m_size = offset; - return BAN::UniqPtr::adopt(new_region); + return BAN::UniqPtr(BAN::move(new_region)); } BAN::ErrorOr MemoryBackedRegion::copy_data_to_region(size_t offset_into_region, const uint8_t* buffer, size_t buffer_size) { ASSERT(offset_into_region + buffer_size <= m_size); + LockGuard _(m_mutex); + size_t written = 0; while (written < buffer_size) { @@ -108,18 +168,18 @@ namespace Kernel vaddr_t page_offset = write_vaddr % PAGE_SIZE; size_t bytes = BAN::Math::min(buffer_size - written, PAGE_SIZE - page_offset); - paddr_t paddr = m_page_table.physical_address_of(write_vaddr & PAGE_ADDR_MASK); - if (paddr == 0) + if (!(m_page_table.get_page_flags(write_vaddr & PAGE_ADDR_MASK) & PageTable::ReadWrite)) { - if (!TRY(allocate_page_containing(write_vaddr, false))) + if (!TRY(allocate_page_containing(write_vaddr, true))) { dwarnln("Could not allocate page for data copying"); return BAN::Error::from_errno(EFAULT); } - paddr = m_page_table.physical_address_of(write_vaddr & PAGE_ADDR_MASK); - ASSERT(paddr); } + const paddr_t paddr = m_page_table.physical_address_of(write_vaddr & PAGE_ADDR_MASK); + ASSERT(paddr); + PageTable::with_fast_page(paddr, [&] { memcpy(PageTable::fast_page_as_ptr(page_offset), (void*)(buffer + written), bytes); });