diff --git a/kernel/CMakeLists.txt b/kernel/CMakeLists.txt index 893f71d829..ccb7416d33 100644 --- a/kernel/CMakeLists.txt +++ b/kernel/CMakeLists.txt @@ -29,6 +29,8 @@ set(KERNEL_SOURCES kernel/Memory/GeneralAllocator.cpp kernel/Memory/Heap.cpp kernel/Memory/kmalloc.cpp + kernel/Memory/PhysicalRange.cpp + kernel/Memory/VirtualRange.cpp kernel/Panic.cpp kernel/PCI.cpp kernel/PIC.cpp diff --git a/kernel/include/kernel/Memory/Heap.h b/kernel/include/kernel/Memory/Heap.h index e742cb8616..92ed0d50bc 100644 --- a/kernel/include/kernel/Memory/Heap.h +++ b/kernel/include/kernel/Memory/Heap.h @@ -3,50 +3,11 @@ #include #include -#include - -#define PAGE_SIZE 4096 +#include +#include namespace Kernel { - - using vaddr_t = uintptr_t; - using paddr_t = uintptr_t; - - class PhysicalRange - { - public: - PhysicalRange(paddr_t, size_t); - paddr_t reserve_page(); - void release_page(paddr_t); - - paddr_t start() const { return m_start; } - paddr_t end() const { return m_start + m_size; } - bool contains(paddr_t addr) const { return m_start <= addr && addr < m_start + m_size; } - - size_t usable_memory() const { return m_reservable_pages * PAGE_SIZE; } - - private: - struct node - { - node* next; - node* prev; - }; - - paddr_t page_address(const node*) const; - node* node_address(paddr_t) const; - - private: - paddr_t m_start { 0 }; - size_t m_size { 0 }; - - uint64_t m_total_pages { 0 }; - uint64_t m_reservable_pages { 0 }; - uint64_t m_list_pages { 0 }; - - node* m_free_list { nullptr }; - node* m_used_list { nullptr }; - }; class Heap { @@ -65,7 +26,8 @@ namespace Kernel void initialize_impl(); private: - BAN::Vector m_physical_ranges; + BAN::Vector m_physical_ranges; + SpinLock m_lock; }; } diff --git a/kernel/include/kernel/Memory/PhysicalRange.h b/kernel/include/kernel/Memory/PhysicalRange.h new file mode 100644 index 0000000000..39335146fb --- /dev/null +++ b/kernel/include/kernel/Memory/PhysicalRange.h @@ -0,0 +1,46 @@ +#pragma once + +#include + +#include +#include + +namespace Kernel +{ + + class PhysicalRange + { + public: + PhysicalRange(paddr_t, size_t); + paddr_t reserve_page(); + void release_page(paddr_t); + + paddr_t start() const { return m_start; } + paddr_t end() const { return m_start + m_size; } + bool contains(paddr_t addr) const { return m_start <= addr && addr < m_start + m_size; } + + size_t usable_memory() const { return m_reservable_pages * PAGE_SIZE; } + + private: + struct node + { + node* next; + node* prev; + }; + + paddr_t page_address(const node*) const; + node* node_address(paddr_t) const; + + private: + paddr_t m_start { 0 }; + size_t m_size { 0 }; + + uint64_t m_total_pages { 0 }; + uint64_t m_reservable_pages { 0 }; + uint64_t m_list_pages { 0 }; + + node* m_free_list { nullptr }; + node* m_used_list { nullptr }; + }; + +} \ No newline at end of file diff --git a/kernel/include/kernel/Memory/Types.h b/kernel/include/kernel/Memory/Types.h new file mode 100644 index 0000000000..3af4098f18 --- /dev/null +++ b/kernel/include/kernel/Memory/Types.h @@ -0,0 +1,11 @@ +#pragma once + +#define PAGE_SIZE 4096 + +namespace Kernel +{ + + using vaddr_t = uintptr_t; + using paddr_t = uintptr_t; + +} \ No newline at end of file diff --git a/kernel/include/kernel/Memory/VirtualRange.h b/kernel/include/kernel/Memory/VirtualRange.h new file mode 100644 index 0000000000..9144b20d16 --- /dev/null +++ b/kernel/include/kernel/Memory/VirtualRange.h @@ -0,0 +1,38 @@ +#pragma once + +#include +#include +#include +#include + +namespace Kernel +{ + + class VirtualRange + { + BAN_NON_COPYABLE(VirtualRange); + BAN_NON_MOVABLE(VirtualRange); + + public: + static VirtualRange* create(MMU&, vaddr_t, size_t, uint8_t flags); + static VirtualRange* create_kmalloc(size_t); + ~VirtualRange(); + + VirtualRange* clone(MMU& new_mmu); + + vaddr_t vaddr() const { return m_vaddr; } + size_t size() const { return m_size; } + uint8_t flags() const { return m_flags; } + + private: + VirtualRange(MMU&); + + private: + MMU& m_mmu; + vaddr_t m_vaddr { 0 }; + size_t m_size { 0 }; + uint8_t m_flags { 0 }; + BAN::Vector m_physical_pages; + }; + +} \ No newline at end of file diff --git a/kernel/kernel/Memory/Heap.cpp b/kernel/kernel/Memory/Heap.cpp index badb0281f9..015ef5bdbc 100644 --- a/kernel/kernel/Memory/Heap.cpp +++ b/kernel/kernel/Memory/Heap.cpp @@ -1,118 +1,11 @@ +#include #include #include #include -extern uint8_t g_kernel_end[]; - namespace Kernel { - PhysicalRange::PhysicalRange(paddr_t start, size_t size) - { - // We can't use the memory ovelapping with kernel - if (start + size <= (paddr_t)g_kernel_end) - return; - - // Align start to page boundary and after the kernel memory - m_start = BAN::Math::max(start, (paddr_t)g_kernel_end); - if (auto rem = m_start % PAGE_SIZE) - m_start += PAGE_SIZE - rem; - - if (size <= m_start - start) - return; - - // Align size to page boundary - m_size = size - (m_start - start); - if (auto rem = m_size % PAGE_SIZE) - m_size -= rem; - - // We need atleast 2 pages - m_total_pages = m_size / PAGE_SIZE; - if (m_total_pages <= 1) - return; - - // FIXME: if total pages is just over multiple of (PAGE_SIZE / sizeof(node)) we might make - // couple of pages unallocatable - m_list_pages = BAN::Math::div_round_up(m_total_pages * sizeof(node), PAGE_SIZE); - m_reservable_pages = m_total_pages - m_list_pages; - - MMU::get().identity_map_range(m_start, m_list_pages * PAGE_SIZE, MMU::Flags::ReadWrite | MMU::Flags::Present); - - // Initialize page list so that every page points to the next one - node* page_list = (node*)m_start; - - ASSERT((paddr_t)&page_list[m_reservable_pages - 1] <= m_start + m_size); - - for (uint64_t i = 0; i < m_reservable_pages; i++) - page_list[i] = { page_list + i - 1, page_list + i + 1 }; - page_list[ 0 ].next = nullptr; - page_list[m_reservable_pages - 1].prev = nullptr; - - m_free_list = page_list; - m_used_list = nullptr; - } - - paddr_t PhysicalRange::reserve_page() - { - if (m_free_list == nullptr) - return 0; - - node* page = m_free_list; - ASSERT(page->next == nullptr); - - // Detatch page from top of the free list - m_free_list = m_free_list->prev ? m_free_list->prev : nullptr; - if (m_free_list) - m_free_list->next = nullptr; - - // Add page to used list - if (m_used_list) - m_used_list->next = page; - page->prev = m_used_list; - m_used_list = page; - - return page_address(page); - } - - void PhysicalRange::release_page(paddr_t page_address) - { - ASSERT(m_used_list); - - node* page = node_address(page_address); - - // Detach page from used list - if (page->prev) - page->prev->next = page->next; - if (page->next) - page->next->prev = page->prev; - if (m_used_list == page) - m_used_list = page->prev; - - // Add page to the top of free list - page->prev = m_free_list; - page->next = nullptr; - if (m_free_list) - m_free_list->next = page; - m_free_list = page; - } - - paddr_t PhysicalRange::page_address(const node* page) const - { - ASSERT((paddr_t)page <= m_start + m_reservable_pages * sizeof(node)); - uint64_t page_index = page - (node*)m_start; - return m_start + (page_index + m_list_pages) * PAGE_SIZE; - } - - PhysicalRange::node* PhysicalRange::node_address(paddr_t page_address) const - { - ASSERT(page_address % PAGE_SIZE == 0); - ASSERT(m_start + m_list_pages * PAGE_SIZE <= page_address && page_address < m_start + m_size); - uint64_t page_offset = page_address - (m_start + m_list_pages * PAGE_SIZE); - return (node*)m_start + page_offset / PAGE_SIZE; - } - - - static Heap* s_instance = nullptr; void Heap::initialize() @@ -160,6 +53,7 @@ namespace Kernel paddr_t Heap::take_free_page() { + LockGuard _(m_lock); for (auto& range : m_physical_ranges) if (paddr_t page = range.reserve_page()) return page; @@ -168,6 +62,7 @@ namespace Kernel void Heap::release_page(paddr_t addr) { + LockGuard _(m_lock); for (auto& range : m_physical_ranges) { if (range.contains(addr)) diff --git a/kernel/kernel/Memory/PhysicalRange.cpp b/kernel/kernel/Memory/PhysicalRange.cpp new file mode 100644 index 0000000000..63a7f8c212 --- /dev/null +++ b/kernel/kernel/Memory/PhysicalRange.cpp @@ -0,0 +1,115 @@ +#include +#include +#include +#include + +extern uint8_t g_kernel_end[]; + +namespace Kernel +{ + + PhysicalRange::PhysicalRange(paddr_t start, size_t size) + { + // We can't use the memory ovelapping with kernel + if (start + size <= (paddr_t)g_kernel_end) + return; + + // Align start to page boundary and after the kernel memory + m_start = BAN::Math::max(start, (paddr_t)g_kernel_end); + if (auto rem = m_start % PAGE_SIZE) + m_start += PAGE_SIZE - rem; + + if (size <= m_start - start) + return; + + // Align size to page boundary + m_size = size - (m_start - start); + if (auto rem = m_size % PAGE_SIZE) + m_size -= rem; + + // We need atleast 2 pages + m_total_pages = m_size / PAGE_SIZE; + if (m_total_pages <= 1) + return; + + // FIXME: if total pages is just over multiple of (PAGE_SIZE / sizeof(node)) we might make + // couple of pages unallocatable + m_list_pages = BAN::Math::div_round_up(m_total_pages * sizeof(node), PAGE_SIZE); + m_reservable_pages = m_total_pages - m_list_pages; + + MMU::kernel().identity_map_range(m_start, m_list_pages * PAGE_SIZE, MMU::Flags::ReadWrite | MMU::Flags::Present); + + // Initialize page list so that every page points to the next one + node* page_list = (node*)m_start; + + ASSERT((paddr_t)&page_list[m_reservable_pages - 1] <= m_start + m_size); + + for (uint64_t i = 0; i < m_reservable_pages; i++) + page_list[i] = { page_list + i - 1, page_list + i + 1 }; + page_list[ 0 ].next = nullptr; + page_list[m_reservable_pages - 1].prev = nullptr; + + m_free_list = page_list; + m_used_list = nullptr; + } + + paddr_t PhysicalRange::reserve_page() + { + if (m_free_list == nullptr) + return 0; + + node* page = m_free_list; + ASSERT(page->next == nullptr); + + // Detatch page from top of the free list + m_free_list = m_free_list->prev ? m_free_list->prev : nullptr; + if (m_free_list) + m_free_list->next = nullptr; + + // Add page to used list + if (m_used_list) + m_used_list->next = page; + page->prev = m_used_list; + m_used_list = page; + + return page_address(page); + } + + void PhysicalRange::release_page(paddr_t page_address) + { + ASSERT(m_used_list); + + node* page = node_address(page_address); + + // Detach page from used list + if (page->prev) + page->prev->next = page->next; + if (page->next) + page->next->prev = page->prev; + if (m_used_list == page) + m_used_list = page->prev; + + // Add page to the top of free list + page->prev = m_free_list; + page->next = nullptr; + if (m_free_list) + m_free_list->next = page; + m_free_list = page; + } + + paddr_t PhysicalRange::page_address(const node* page) const + { + ASSERT((paddr_t)page <= m_start + m_reservable_pages * sizeof(node)); + uint64_t page_index = page - (node*)m_start; + return m_start + (page_index + m_list_pages) * PAGE_SIZE; + } + + PhysicalRange::node* PhysicalRange::node_address(paddr_t page_address) const + { + ASSERT(page_address % PAGE_SIZE == 0); + ASSERT(m_start + m_list_pages * PAGE_SIZE <= page_address && page_address < m_start + m_size); + uint64_t page_offset = page_address - (m_start + m_list_pages * PAGE_SIZE); + return (node*)m_start + page_offset / PAGE_SIZE; + } + +} diff --git a/kernel/kernel/Memory/VirtualRange.cpp b/kernel/kernel/Memory/VirtualRange.cpp new file mode 100644 index 0000000000..ba1d4023d4 --- /dev/null +++ b/kernel/kernel/Memory/VirtualRange.cpp @@ -0,0 +1,93 @@ +#include +#include +#include + +namespace Kernel +{ + + VirtualRange* VirtualRange::create(MMU& mmu, vaddr_t vaddr, size_t size, uint8_t flags) + { + ASSERT(size % PAGE_SIZE == 0); + ASSERT(vaddr % PAGE_SIZE == 0); + ASSERT(&mmu != &MMU::kernel()); + + VirtualRange* result = new VirtualRange(mmu); + ASSERT(result); + + result->m_size = size; + result->m_flags = flags; + MUST(result->m_physical_pages.reserve(size / PAGE_SIZE)); + + mmu.lock(); + + if (vaddr == 0) + { + vaddr = mmu.get_free_contiguous_pages(size / PAGE_SIZE); + ASSERT(vaddr); + } + + result->m_vaddr = vaddr; + + ASSERT(mmu.is_range_free(vaddr, size)); + for (size_t offset = 0; offset < size; offset += PAGE_SIZE) + { + paddr_t paddr = Heap::get().take_free_page(); + ASSERT(paddr); + MUST(result->m_physical_pages.push_back(paddr)); + mmu.map_page_at(paddr, vaddr + offset, flags); + } + mmu.unlock(); + + return result; + } + + VirtualRange* VirtualRange::create_kmalloc(size_t size) + { + VirtualRange* result = new VirtualRange(MMU::kernel()); + if (result == nullptr) + return nullptr; + result->m_size = size; + result->m_flags = MMU::Flags::ReadWrite | MMU::Flags::Present; + result->m_vaddr = (vaddr_t)kmalloc(size); + if (result->m_vaddr == 0) + { + delete result; + return nullptr; + } + return result; + } + + VirtualRange::VirtualRange(MMU& mmu) + : m_mmu(mmu) + { } + + VirtualRange::~VirtualRange() + { + if (&m_mmu == &MMU::kernel()) + { + kfree((void*)m_vaddr); + return; + } + + m_mmu.unmap_range(vaddr(), size()); + for (paddr_t page : m_physical_pages) + Heap::get().release_page(page); + } + + VirtualRange* VirtualRange::clone(MMU& mmu) + { + VirtualRange* result = create(mmu, vaddr(), size(), flags()); + + MMUScope _(m_mmu); + ASSERT(m_mmu.is_page_free(0)); + for (size_t i = 0; i < result->m_physical_pages.size(); i++) + { + m_mmu.map_page_at(result->m_physical_pages[i], 0, MMU::Flags::ReadWrite | MMU::Flags::Present); + memcpy((void*)0, (void*)(vaddr() + i * PAGE_SIZE), PAGE_SIZE); + } + m_mmu.unmap_page(0); + + return result; + } + +} \ No newline at end of file