Kernel: Be more clever with physical memory

Initially allocate all physical memory except kernel memory and boot
modules. Before we just skipped all memory before kernel boot modules.
Also release memory used by boot modules after the kernel is up and
running. Once the boot modules are loaded, there is no need to keep them
in memory.
This commit is contained in:
2026-04-05 18:07:55 +03:00
parent 34b59f062b
commit e4c6539964
3 changed files with 113 additions and 21 deletions

View File

@@ -18,6 +18,8 @@ namespace Kernel
static void initialize();
static Heap& get();
void release_boot_modules();
paddr_t take_free_page();
void release_page(paddr_t);

View File

@@ -2,11 +2,62 @@
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/PageTable.h>
#include <BAN/Sort.h>
extern uint8_t g_kernel_end[];
namespace Kernel
{
struct ReservedRegion
{
paddr_t paddr;
size_t size;
};
static BAN::Vector<ReservedRegion> get_reserved_regions()
{
BAN::Vector<ReservedRegion> reserved_regions;
MUST(reserved_regions.reserve(2 + g_boot_info.modules.size()));
MUST(reserved_regions.emplace_back(0, 0x100000));
MUST(reserved_regions.emplace_back(g_boot_info.kernel_paddr, reinterpret_cast<size_t>(g_kernel_end - KERNEL_OFFSET)));
for (const auto& module : g_boot_info.modules)
MUST(reserved_regions.emplace_back(module.start, module.size));
// page align regions
for (auto& region : reserved_regions)
{
const auto rem = region.paddr % PAGE_SIZE;
region.paddr -= rem;
region.size += rem;
if (const auto rem = region.size % PAGE_SIZE)
region.size += PAGE_SIZE - rem;
}
// sort regions
BAN::sort::sort(reserved_regions.begin(), reserved_regions.end(),
[](const auto& lhs, const auto& rhs) -> bool {
if (lhs.paddr == rhs.paddr)
return lhs.size < rhs.size;
return lhs.paddr < rhs.paddr;
}
);
// combine overlapping regions
for (size_t i = 1; i < reserved_regions.size(); i++)
{
auto& prev = reserved_regions[i - 1];
auto& curr = reserved_regions[i - 0];
if (prev.paddr > curr.paddr + curr.size || curr.paddr > prev.paddr + prev.size)
continue;
prev.size = BAN::Math::max(prev.size, curr.paddr + curr.size - prev.paddr);
reserved_regions.remove(i--);
}
return reserved_regions;
}
static Heap* s_instance = nullptr;
void Heap::initialize()
@@ -28,6 +79,7 @@ namespace Kernel
if (g_boot_info.memory_map_entries.empty())
panic("Bootloader did not provide a memory map");
auto reserved_regions = get_reserved_regions();
for (const auto& entry : g_boot_info.memory_map_entries)
{
const char* entry_type_string = nullptr;
@@ -58,33 +110,71 @@ namespace Kernel
if (entry.type != MemoryMapEntry::Type::Available)
continue;
// FIXME: only reserve kernel area and modules, not everything from 0 -> kernel end
paddr_t start = entry.address;
if (start < (vaddr_t)g_kernel_end - KERNEL_OFFSET + g_boot_info.kernel_paddr)
start = (vaddr_t)g_kernel_end - KERNEL_OFFSET + g_boot_info.kernel_paddr;
for (const auto& module : g_boot_info.modules)
if (start < module.start + module.size)
start = module.start + module.size;
paddr_t e_start = entry.address;
if (auto rem = e_start % PAGE_SIZE)
e_start = PAGE_SIZE - rem;
paddr_t e_end = entry.address + entry.length;
if (auto rem = e_end % PAGE_SIZE)
e_end -= rem;
for (const auto& reserved_region : reserved_regions)
{
const paddr_t r_start = reserved_region.paddr;
const paddr_t r_end = reserved_region.paddr + reserved_region.size;
if (r_end < e_start)
continue;
if (r_start > e_end)
break;
const paddr_t end = BAN::Math::max(e_start, r_start);
if (e_start + 2 * PAGE_SIZE <= end)
MUST(m_physical_ranges.emplace_back(e_start, end - e_start));
e_start = BAN::Math::max(e_start, BAN::Math::min(e_end, r_end));
}
if (e_start + 2 * PAGE_SIZE <= e_end)
MUST(m_physical_ranges.emplace_back(e_start, e_end - e_start));
}
size_t total_kibi_bytes = 0;
for (auto& range : m_physical_ranges)
{
const size_t kibi_bytes = range.usable_memory() / 1024;
dprintln("RAM {8H}->{8H} ({}.{3} MiB)", range.start(), range.end(), kibi_bytes / 1024, kibi_bytes % 1024);
total_kibi_bytes += kibi_bytes;
}
dprintln("Total RAM {}.{3} MiB", total_kibi_bytes / 1024, total_kibi_bytes % 1024);
}
void Heap::release_boot_modules()
{
const auto modules = BAN::move(g_boot_info.modules);
size_t kibi_bytes = 0;
for (const auto& module : modules)
{
vaddr_t start = module.start;
if (auto rem = start % PAGE_SIZE)
start += PAGE_SIZE - rem;
paddr_t end = entry.address + entry.length;
vaddr_t end = module.start + module.size;
if (auto rem = end % PAGE_SIZE)
end -= rem;
// Physical pages needs atleast 2 pages
if (end > start + PAGE_SIZE)
MUST(m_physical_ranges.emplace_back(start, end - start));
const size_t size = end - start;
if (size < 2 * PAGE_SIZE)
continue;
SpinLockGuard _(m_lock);
MUST(m_physical_ranges.emplace_back(start, size));
kibi_bytes += m_physical_ranges.back().usable_memory() / 1024;
}
size_t total = 0;
for (auto& range : m_physical_ranges)
{
size_t bytes = range.usable_memory();
dprintln("RAM {8H}->{8H} ({}.{} MB)", range.start(), range.end(), bytes / (1 << 20), bytes % (1 << 20) * 1000 / (1 << 20));
total += bytes;
}
dprintln("Total RAM {}.{} MB", total / (1 << 20), total % (1 << 20) * 1000 / (1 << 20));
if (kibi_bytes)
dprintln("Released {}.{3} MiB of RAM from boot modules", kibi_bytes / 1024, kibi_bytes % 1024);
}
paddr_t Heap::take_free_page()

View File

@@ -255,8 +255,8 @@ static void init2(void*)
VirtualFileSystem::initialize(cmdline.root);
dprintln("VFS initialized");
// FIXME: release memory used by modules. If modules are used
// they are already loaded in here
// NOTE: All modules should be loaded
Heap::get().release_boot_modules();
TTY::initialize_devices();