forked from Bananymous/banan-os
				
			
			update main #1
			
				
			
		
		
		
	| 
						 | 
				
			
			@ -1,4 +1,5 @@
 | 
			
		|||
#include <BAN/ScopeGuard.h>
 | 
			
		||||
#include <kernel/CriticalScope.h>
 | 
			
		||||
#include <kernel/Memory/Heap.h>
 | 
			
		||||
#include <kernel/LockGuard.h>
 | 
			
		||||
#include <LibELF/LoadableELF.h>
 | 
			
		||||
| 
						 | 
				
			
			@ -306,9 +307,12 @@ namespace LibELF
 | 
			
		|||
						if (paddr == 0)
 | 
			
		||||
							return BAN::Error::from_errno(ENOMEM);
 | 
			
		||||
 | 
			
		||||
						m_page_table.map_page_at(paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
 | 
			
		||||
						memcpy((void*)0, (void*)(start + i * PAGE_SIZE), PAGE_SIZE);
 | 
			
		||||
						m_page_table.unmap_page(0);
 | 
			
		||||
						{
 | 
			
		||||
							CriticalScope _;
 | 
			
		||||
							PageTable::map_fast_page(paddr);
 | 
			
		||||
							memcpy(PageTable::fast_page_as_ptr(), (void*)(start + i * PAGE_SIZE), PAGE_SIZE);
 | 
			
		||||
							PageTable::unmap_fast_page();
 | 
			
		||||
						}
 | 
			
		||||
 | 
			
		||||
						new_page_table.map_page_at(paddr, start + i * PAGE_SIZE, flags);
 | 
			
		||||
						elf->m_physical_page_count++;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1,6 +1,6 @@
 | 
			
		|||
#include <BAN/Errors.h>
 | 
			
		||||
#include <kernel/Arch.h>
 | 
			
		||||
#include <kernel/CPUID.h>
 | 
			
		||||
#include <kernel/InterruptController.h>
 | 
			
		||||
#include <kernel/LockGuard.h>
 | 
			
		||||
#include <kernel/Memory/kmalloc.h>
 | 
			
		||||
#include <kernel/Memory/PageTable.h>
 | 
			
		||||
| 
						 | 
				
			
			@ -143,6 +143,8 @@ namespace Kernel
 | 
			
		|||
		uint64_t* pml4 = (uint64_t*)P2V(m_highest_paging_struct);
 | 
			
		||||
		pml4[511] = s_global_pml4e;
 | 
			
		||||
 | 
			
		||||
		prepare_fast_page();
 | 
			
		||||
 | 
			
		||||
		// Map (phys_kernel_start -> phys_kernel_end) to (virt_kernel_start -> virt_kernel_end)
 | 
			
		||||
		ASSERT((vaddr_t)g_kernel_start % PAGE_SIZE == 0);
 | 
			
		||||
		map_range_at(
 | 
			
		||||
| 
						 | 
				
			
			@ -185,6 +187,76 @@ namespace Kernel
 | 
			
		|||
		g_multiboot2_info = (multiboot2_info_t*)(multiboot2_vaddr + ((vaddr_t)g_multiboot2_info % PAGE_SIZE));
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	void PageTable::prepare_fast_page()
 | 
			
		||||
	{
 | 
			
		||||
		constexpr vaddr_t uc_vaddr = uncanonicalize(fast_page());
 | 
			
		||||
		constexpr uint64_t pml4e = (uc_vaddr >> 39) & 0x1FF;
 | 
			
		||||
		constexpr uint64_t pdpte = (uc_vaddr >> 30) & 0x1FF;
 | 
			
		||||
		constexpr uint64_t pde   = (uc_vaddr >> 21) & 0x1FF;
 | 
			
		||||
		constexpr uint64_t pte   = (uc_vaddr >> 12) & 0x1FF;
 | 
			
		||||
 | 
			
		||||
		uint64_t* pml4 = (uint64_t*)P2V(m_highest_paging_struct);
 | 
			
		||||
		ASSERT(!(pml4[pml4e] & Flags::Present));
 | 
			
		||||
		pml4[pml4e] = V2P(allocate_zeroed_page_aligned_page()) | Flags::ReadWrite | Flags::Present;
 | 
			
		||||
 | 
			
		||||
		uint64_t* pdpt = (uint64_t*)P2V(pml4[pml4e] & PAGE_ADDR_MASK);
 | 
			
		||||
		ASSERT(!(pdpt[pdpte] & Flags::Present));
 | 
			
		||||
		pdpt[pdpte] = V2P(allocate_zeroed_page_aligned_page()) | Flags::ReadWrite | Flags::Present;
 | 
			
		||||
 | 
			
		||||
		uint64_t* pd = (uint64_t*)P2V(pdpt[pdpte] & PAGE_ADDR_MASK);
 | 
			
		||||
		ASSERT(!(pd[pde] & Flags::Present));
 | 
			
		||||
		pd[pde] = V2P(allocate_zeroed_page_aligned_page()) | Flags::ReadWrite | Flags::Present;
 | 
			
		||||
 | 
			
		||||
		uint64_t* pt = (uint64_t*)P2V(pd[pde] & PAGE_ADDR_MASK);
 | 
			
		||||
		ASSERT(!(pt[pte] & Flags::Present));
 | 
			
		||||
		pt[pte] = V2P(allocate_zeroed_page_aligned_page());
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	void PageTable::map_fast_page(paddr_t paddr)
 | 
			
		||||
	{
 | 
			
		||||
		ASSERT(s_kernel);
 | 
			
		||||
		ASSERT_GE(paddr, 0);
 | 
			
		||||
		ASSERT(!interrupts_enabled());
 | 
			
		||||
 | 
			
		||||
		constexpr vaddr_t uc_vaddr = uncanonicalize(fast_page());
 | 
			
		||||
		constexpr uint64_t pml4e = (uc_vaddr >> 39) & 0x1FF;
 | 
			
		||||
		constexpr uint64_t pdpte = (uc_vaddr >> 30) & 0x1FF;
 | 
			
		||||
		constexpr uint64_t pde   = (uc_vaddr >> 21) & 0x1FF;
 | 
			
		||||
		constexpr uint64_t pte   = (uc_vaddr >> 12) & 0x1FF;
 | 
			
		||||
 | 
			
		||||
		uint64_t* pml4 = (uint64_t*)P2V(s_kernel->m_highest_paging_struct);
 | 
			
		||||
		uint64_t* pdpt = (uint64_t*)P2V(pml4[pml4e] & PAGE_ADDR_MASK);
 | 
			
		||||
		uint64_t* pd   = (uint64_t*)P2V(pdpt[pdpte] & PAGE_ADDR_MASK);
 | 
			
		||||
		uint64_t* pt   = (uint64_t*)P2V(pd[pde] & PAGE_ADDR_MASK);
 | 
			
		||||
 | 
			
		||||
		ASSERT(!(pt[pte] & Flags::Present));
 | 
			
		||||
		pt[pte] = paddr | Flags::ReadWrite | Flags::Present;
 | 
			
		||||
 | 
			
		||||
		invalidate(fast_page());
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	void PageTable::unmap_fast_page()
 | 
			
		||||
	{
 | 
			
		||||
		ASSERT(s_kernel);
 | 
			
		||||
		ASSERT(!interrupts_enabled());
 | 
			
		||||
 | 
			
		||||
		constexpr vaddr_t uc_vaddr = uncanonicalize(fast_page());
 | 
			
		||||
		constexpr uint64_t pml4e = (uc_vaddr >> 39) & 0x1FF;
 | 
			
		||||
		constexpr uint64_t pdpte = (uc_vaddr >> 30) & 0x1FF;
 | 
			
		||||
		constexpr uint64_t pde   = (uc_vaddr >> 21) & 0x1FF;
 | 
			
		||||
		constexpr uint64_t pte   = (uc_vaddr >> 12) & 0x1FF;
 | 
			
		||||
 | 
			
		||||
		uint64_t* pml4 = (uint64_t*)P2V(s_kernel->m_highest_paging_struct);
 | 
			
		||||
		uint64_t* pdpt = (uint64_t*)P2V(pml4[pml4e] & PAGE_ADDR_MASK);
 | 
			
		||||
		uint64_t* pd   = (uint64_t*)P2V(pdpt[pdpte] & PAGE_ADDR_MASK);
 | 
			
		||||
		uint64_t* pt   = (uint64_t*)P2V(pd[pde] & PAGE_ADDR_MASK);
 | 
			
		||||
 | 
			
		||||
		ASSERT(pt[pte] & Flags::Present);
 | 
			
		||||
		pt[pte] = 0;
 | 
			
		||||
 | 
			
		||||
		invalidate(fast_page());
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	BAN::ErrorOr<PageTable*> PageTable::create_userspace()
 | 
			
		||||
	{
 | 
			
		||||
		LockGuard _(s_kernel->m_lock);
 | 
			
		||||
| 
						 | 
				
			
			@ -246,13 +318,16 @@ namespace Kernel
 | 
			
		|||
	void PageTable::invalidate(vaddr_t vaddr)
 | 
			
		||||
	{
 | 
			
		||||
		ASSERT(vaddr % PAGE_SIZE == 0);
 | 
			
		||||
		if (this == s_current)
 | 
			
		||||
			asm volatile("invlpg (%0)" :: "r"(vaddr) : "memory");
 | 
			
		||||
		asm volatile("invlpg (%0)" :: "r"(vaddr) : "memory");
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	void PageTable::unmap_page(vaddr_t vaddr)
 | 
			
		||||
	{
 | 
			
		||||
		if (vaddr && (vaddr >= KERNEL_OFFSET) != (this == s_kernel))
 | 
			
		||||
		ASSERT(vaddr);
 | 
			
		||||
		ASSERT(vaddr != fast_page());
 | 
			
		||||
		if (vaddr >= KERNEL_OFFSET)
 | 
			
		||||
			ASSERT_GE(vaddr, (vaddr_t)g_kernel_start);
 | 
			
		||||
		if ((vaddr >= KERNEL_OFFSET) != (this == s_kernel))
 | 
			
		||||
			Kernel::panic("unmapping {8H}, kernel: {}", vaddr, this == s_kernel);
 | 
			
		||||
 | 
			
		||||
		ASSERT(is_canonical(vaddr));
 | 
			
		||||
| 
						 | 
				
			
			@ -294,7 +369,11 @@ namespace Kernel
 | 
			
		|||
 | 
			
		||||
	void PageTable::map_page_at(paddr_t paddr, vaddr_t vaddr, flags_t flags)
 | 
			
		||||
	{
 | 
			
		||||
		if (vaddr && (vaddr >= KERNEL_OFFSET) != (this == s_kernel))
 | 
			
		||||
		ASSERT(vaddr);
 | 
			
		||||
		ASSERT(vaddr != fast_page());
 | 
			
		||||
		if (vaddr >= KERNEL_OFFSET)
 | 
			
		||||
			ASSERT_GE(vaddr, (vaddr_t)g_kernel_start);
 | 
			
		||||
		if ((vaddr >= KERNEL_OFFSET) != (this == s_kernel))
 | 
			
		||||
			Kernel::panic("mapping {8H} to {8H}, kernel: {}", paddr, vaddr, this == s_kernel);
 | 
			
		||||
 | 
			
		||||
		ASSERT(is_canonical(vaddr));
 | 
			
		||||
| 
						 | 
				
			
			@ -361,12 +440,11 @@ namespace Kernel
 | 
			
		|||
	{
 | 
			
		||||
		ASSERT(is_canonical(vaddr));
 | 
			
		||||
 | 
			
		||||
		ASSERT(vaddr);
 | 
			
		||||
		ASSERT(paddr % PAGE_SIZE == 0);
 | 
			
		||||
		ASSERT(vaddr % PAGE_SIZE == 0);
 | 
			
		||||
 | 
			
		||||
		size_t first_page = vaddr / PAGE_SIZE;
 | 
			
		||||
		size_t last_page = (vaddr + size - 1) / PAGE_SIZE;
 | 
			
		||||
		size_t page_count = last_page - first_page + 1;
 | 
			
		||||
		size_t page_count = range_page_count(vaddr, size);
 | 
			
		||||
		
 | 
			
		||||
		LockGuard _(m_lock);
 | 
			
		||||
		for (size_t page = 0; page < page_count; page++)
 | 
			
		||||
| 
						 | 
				
			
			@ -527,6 +605,8 @@ namespace Kernel
 | 
			
		|||
 | 
			
		||||
	vaddr_t PageTable::reserve_free_contiguous_pages(size_t page_count, vaddr_t first_address, vaddr_t last_address)
 | 
			
		||||
	{
 | 
			
		||||
		if (first_address >= KERNEL_OFFSET && first_address < (vaddr_t)g_kernel_start)
 | 
			
		||||
			first_address = (vaddr_t)g_kernel_start;
 | 
			
		||||
		if (size_t rem = first_address % PAGE_SIZE)
 | 
			
		||||
			first_address += PAGE_SIZE - rem;
 | 
			
		||||
		if (size_t rem = last_address % PAGE_SIZE)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -38,6 +38,7 @@ namespace Kernel
 | 
			
		|||
		BAN::RefPtr<Inode> m_inode;
 | 
			
		||||
		const off_t m_offset;
 | 
			
		||||
 | 
			
		||||
		// FIXME: is this even synchronized?
 | 
			
		||||
		BAN::RefPtr<SharedFileData> m_shared_data;
 | 
			
		||||
	};
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -29,6 +29,24 @@ namespace Kernel
 | 
			
		|||
		static PageTable& kernel();
 | 
			
		||||
		static PageTable& current();
 | 
			
		||||
 | 
			
		||||
		static void map_fast_page(paddr_t);
 | 
			
		||||
		static void unmap_fast_page();
 | 
			
		||||
		static constexpr vaddr_t fast_page() { return KERNEL_OFFSET; }
 | 
			
		||||
 | 
			
		||||
		// FIXME: implement sized checks, return span, etc
 | 
			
		||||
		static void* fast_page_as_ptr(size_t offset = 0)
 | 
			
		||||
		{
 | 
			
		||||
			ASSERT(offset <= PAGE_SIZE);
 | 
			
		||||
			return reinterpret_cast<void*>(fast_page() + offset);
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		template<typename T>
 | 
			
		||||
		static T& fast_page_as(size_t offset = 0)
 | 
			
		||||
		{
 | 
			
		||||
			ASSERT(offset + sizeof(T) <= PAGE_SIZE);
 | 
			
		||||
			return *reinterpret_cast<T*>(fast_page() + offset);
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		static bool is_valid_pointer(uintptr_t);
 | 
			
		||||
 | 
			
		||||
		static BAN::ErrorOr<PageTable*> create_userspace();
 | 
			
		||||
| 
						 | 
				
			
			@ -64,7 +82,8 @@ namespace Kernel
 | 
			
		|||
		uint64_t get_page_data(vaddr_t) const;
 | 
			
		||||
		void initialize_kernel();
 | 
			
		||||
		void map_kernel_memory();
 | 
			
		||||
		void invalidate(vaddr_t);
 | 
			
		||||
		void prepare_fast_page();		
 | 
			
		||||
		static void invalidate(vaddr_t);
 | 
			
		||||
 | 
			
		||||
	private:
 | 
			
		||||
		paddr_t						m_highest_paging_struct { 0 };
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -117,33 +117,33 @@ namespace Kernel
 | 
			
		|||
 | 
			
		||||
		if (rsdp->revision >= 2)
 | 
			
		||||
		{
 | 
			
		||||
			PageTable::kernel().map_page_at(rsdp->xsdt_address & PAGE_ADDR_MASK, 0, PageTable::Flags::Present);
 | 
			
		||||
			const XSDT* xsdt = (const XSDT*)(rsdp->xsdt_address % PAGE_SIZE);
 | 
			
		||||
			BAN::ScopeGuard _([xsdt] { PageTable::kernel().unmap_page(0); });
 | 
			
		||||
			PageTable::map_fast_page(rsdp->xsdt_address & PAGE_ADDR_MASK);
 | 
			
		||||
			auto& xsdt = PageTable::fast_page_as<const XSDT>(rsdp->xsdt_address % PAGE_SIZE);
 | 
			
		||||
			BAN::ScopeGuard _([] { PageTable::unmap_fast_page(); });
 | 
			
		||||
 | 
			
		||||
			if (memcmp(xsdt->signature, "XSDT", 4) != 0)
 | 
			
		||||
			if (memcmp(xsdt.signature, "XSDT", 4) != 0)
 | 
			
		||||
				return BAN::Error::from_error_code(ErrorCode::ACPI_RootInvalid);
 | 
			
		||||
			if (!is_valid_std_header(xsdt))
 | 
			
		||||
			if (!is_valid_std_header(&xsdt))
 | 
			
		||||
				return BAN::Error::from_error_code(ErrorCode::ACPI_RootInvalid);
 | 
			
		||||
 | 
			
		||||
			m_header_table_paddr = (paddr_t)xsdt->entries + (rsdp->rsdt_address & PAGE_ADDR_MASK);
 | 
			
		||||
			m_header_table_paddr = rsdp->xsdt_address + offsetof(XSDT, entries);
 | 
			
		||||
			m_entry_size = 8;
 | 
			
		||||
			root_entry_count = (xsdt->length - sizeof(SDTHeader)) / 8;
 | 
			
		||||
			root_entry_count = (xsdt.length - sizeof(SDTHeader)) / 8;
 | 
			
		||||
		}
 | 
			
		||||
		else
 | 
			
		||||
		{
 | 
			
		||||
			PageTable::kernel().map_page_at(rsdp->rsdt_address & PAGE_ADDR_MASK, 0, PageTable::Flags::Present);
 | 
			
		||||
			const RSDT* rsdt = (const RSDT*)((vaddr_t)rsdp->rsdt_address % PAGE_SIZE);
 | 
			
		||||
			BAN::ScopeGuard _([rsdt] { PageTable::kernel().unmap_page(0); });
 | 
			
		||||
			PageTable::map_fast_page(rsdp->rsdt_address & PAGE_ADDR_MASK);
 | 
			
		||||
			auto& rsdt = PageTable::fast_page_as<const RSDT>(rsdp->rsdt_address % PAGE_SIZE);
 | 
			
		||||
			BAN::ScopeGuard _([] { PageTable::unmap_fast_page(); });
 | 
			
		||||
 | 
			
		||||
			if (memcmp(rsdt->signature, "RSDT", 4) != 0)
 | 
			
		||||
			if (memcmp(rsdt.signature, "RSDT", 4) != 0)
 | 
			
		||||
				return BAN::Error::from_error_code(ErrorCode::ACPI_RootInvalid);
 | 
			
		||||
			if (!is_valid_std_header(rsdt))
 | 
			
		||||
			if (!is_valid_std_header(&rsdt))
 | 
			
		||||
				return BAN::Error::from_error_code(ErrorCode::ACPI_RootInvalid);
 | 
			
		||||
 | 
			
		||||
			m_header_table_paddr = (paddr_t)rsdt->entries + (rsdp->rsdt_address & PAGE_ADDR_MASK);
 | 
			
		||||
			m_header_table_paddr = rsdp->rsdt_address + offsetof(RSDT, entries);
 | 
			
		||||
			m_entry_size = 4;
 | 
			
		||||
			root_entry_count = (rsdt->length - sizeof(SDTHeader)) / 4;
 | 
			
		||||
			root_entry_count = (rsdt.length - sizeof(SDTHeader)) / 4;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		size_t needed_pages = range_page_count(m_header_table_paddr, root_entry_count * m_entry_size);
 | 
			
		||||
| 
						 | 
				
			
			@ -162,9 +162,9 @@ namespace Kernel
 | 
			
		|||
		auto map_header =
 | 
			
		||||
			[](paddr_t header_paddr) -> vaddr_t
 | 
			
		||||
			{
 | 
			
		||||
				PageTable::kernel().map_page_at(header_paddr & PAGE_ADDR_MASK, 0, PageTable::Flags::Present);
 | 
			
		||||
				size_t header_length = ((SDTHeader*)(header_paddr % PAGE_SIZE))->length;
 | 
			
		||||
				PageTable::kernel().unmap_page(0);
 | 
			
		||||
				PageTable::map_fast_page(header_paddr & PAGE_ADDR_MASK);
 | 
			
		||||
				size_t header_length = PageTable::fast_page_as<SDTHeader>(header_paddr % PAGE_SIZE).length;
 | 
			
		||||
				PageTable::unmap_fast_page();
 | 
			
		||||
 | 
			
		||||
				size_t needed_pages = range_page_count(header_paddr, header_length);
 | 
			
		||||
				vaddr_t page_vaddr = PageTable::kernel().reserve_free_contiguous_pages(needed_pages, KERNEL_OFFSET);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1,3 +1,4 @@
 | 
			
		|||
#include <kernel/CriticalScope.h>
 | 
			
		||||
#include <kernel/LockGuard.h>
 | 
			
		||||
#include <kernel/Memory/FileBackedRegion.h>
 | 
			
		||||
#include <kernel/Memory/Heap.h>
 | 
			
		||||
| 
						 | 
				
			
			@ -71,13 +72,10 @@ namespace Kernel
 | 
			
		|||
				continue;
 | 
			
		||||
			
 | 
			
		||||
			{
 | 
			
		||||
				auto& page_table = PageTable::current();
 | 
			
		||||
				LockGuard _(page_table);
 | 
			
		||||
				ASSERT(page_table.is_page_free(0));
 | 
			
		||||
 | 
			
		||||
				page_table.map_page_at(pages[i], 0, PageTable::Flags::Present);
 | 
			
		||||
				memcpy(page_buffer, (void*)0, PAGE_SIZE);
 | 
			
		||||
				page_table.unmap_page(0);
 | 
			
		||||
				CriticalScope _;
 | 
			
		||||
				PageTable::map_fast_page(pages[i]);
 | 
			
		||||
				memcpy(page_buffer, PageTable::fast_page_as_ptr(), PAGE_SIZE);
 | 
			
		||||
				PageTable::unmap_fast_page();
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if (auto ret = inode->write(i * PAGE_SIZE, BAN::ConstByteSpan::from(page_buffer)); ret.is_error())
 | 
			
		||||
| 
						 | 
				
			
			@ -105,23 +103,8 @@ namespace Kernel
 | 
			
		|||
			size_t file_offset = m_offset + (vaddr - m_vaddr);
 | 
			
		||||
			size_t bytes = BAN::Math::min<size_t>(m_size - file_offset, PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
			BAN::ErrorOr<size_t> read_ret = 0;
 | 
			
		||||
 | 
			
		||||
			// Zero out the new page
 | 
			
		||||
			if (&PageTable::current() == &m_page_table)
 | 
			
		||||
				read_ret = m_inode->read(file_offset, BAN::ByteSpan((uint8_t*)vaddr, bytes));
 | 
			
		||||
			else
 | 
			
		||||
			{
 | 
			
		||||
				auto& page_table = PageTable::current();
 | 
			
		||||
 | 
			
		||||
				LockGuard _(page_table);
 | 
			
		||||
				ASSERT(page_table.is_page_free(0));
 | 
			
		||||
 | 
			
		||||
				page_table.map_page_at(paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
 | 
			
		||||
				read_ret = m_inode->read(file_offset, BAN::ByteSpan((uint8_t*)0, bytes));
 | 
			
		||||
				memset((void*)0, 0x00, PAGE_SIZE);
 | 
			
		||||
				page_table.unmap_page(0);
 | 
			
		||||
			}
 | 
			
		||||
			ASSERT_EQ(&PageTable::current(), &m_page_table);
 | 
			
		||||
			auto read_ret = m_inode->read(file_offset, BAN::ByteSpan((uint8_t*)vaddr, bytes));
 | 
			
		||||
 | 
			
		||||
			if (read_ret.is_error())
 | 
			
		||||
			{
 | 
			
		||||
| 
						 | 
				
			
			@ -158,15 +141,10 @@ namespace Kernel
 | 
			
		|||
 | 
			
		||||
				TRY(m_inode->read(offset, BAN::ByteSpan(m_shared_data->page_buffer, bytes)));
 | 
			
		||||
 | 
			
		||||
				auto& page_table = PageTable::current();
 | 
			
		||||
 | 
			
		||||
				// TODO: check if this can cause deadlock?
 | 
			
		||||
				LockGuard page_table_lock(page_table);
 | 
			
		||||
				ASSERT(page_table.is_page_free(0));
 | 
			
		||||
 | 
			
		||||
				page_table.map_page_at(pages[page_index], 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
 | 
			
		||||
				memcpy((void*)0, m_shared_data->page_buffer, PAGE_SIZE);
 | 
			
		||||
				page_table.unmap_page(0);
 | 
			
		||||
				CriticalScope _;
 | 
			
		||||
				PageTable::map_fast_page(pages[page_index]);
 | 
			
		||||
				memcpy(PageTable::fast_page_as_ptr(), m_shared_data->page_buffer, PAGE_SIZE);
 | 
			
		||||
				PageTable::unmap_fast_page();
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			paddr_t paddr = pages[page_index];
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1,3 +1,4 @@
 | 
			
		|||
#include <kernel/CriticalScope.h>
 | 
			
		||||
#include <kernel/LockGuard.h>
 | 
			
		||||
#include <kernel/Memory/Heap.h>
 | 
			
		||||
#include <kernel/Memory/MemoryBackedRegion.h>
 | 
			
		||||
| 
						 | 
				
			
			@ -60,12 +61,10 @@ namespace Kernel
 | 
			
		|||
			memset((void*)vaddr, 0x00, PAGE_SIZE);
 | 
			
		||||
		else
 | 
			
		||||
		{
 | 
			
		||||
			LockGuard _(PageTable::current());
 | 
			
		||||
			ASSERT(PageTable::current().is_page_free(0));
 | 
			
		||||
 | 
			
		||||
			PageTable::current().map_page_at(paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
 | 
			
		||||
			memset((void*)0, 0x00, PAGE_SIZE);
 | 
			
		||||
			PageTable::current().unmap_page(0);
 | 
			
		||||
			CriticalScope _;
 | 
			
		||||
			PageTable::map_fast_page(paddr);
 | 
			
		||||
			memset(PageTable::fast_page_as_ptr(), 0x00, PAGE_SIZE);
 | 
			
		||||
			PageTable::unmap_fast_page();
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return true;
 | 
			
		||||
| 
						 | 
				
			
			@ -105,15 +104,10 @@ namespace Kernel
 | 
			
		|||
				memcpy((void*)write_vaddr, (void*)(buffer + written), bytes);
 | 
			
		||||
			else
 | 
			
		||||
			{
 | 
			
		||||
				paddr_t paddr = m_page_table.physical_address_of(write_vaddr & PAGE_ADDR_MASK);
 | 
			
		||||
				ASSERT(paddr);
 | 
			
		||||
 | 
			
		||||
				LockGuard _(PageTable::current());
 | 
			
		||||
				ASSERT(PageTable::current().is_page_free(0));
 | 
			
		||||
 | 
			
		||||
				PageTable::current().map_page_at(paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
 | 
			
		||||
				memcpy((void*)page_offset, (void*)(buffer + written), bytes);
 | 
			
		||||
				PageTable::current().unmap_page(0);
 | 
			
		||||
				CriticalScope _;
 | 
			
		||||
				PageTable::map_fast_page(m_page_table.physical_address_of(write_vaddr & PAGE_ADDR_MASK));
 | 
			
		||||
				memcpy(PageTable::fast_page_as_ptr(page_offset), (void*)(buffer + written), bytes);
 | 
			
		||||
				PageTable::unmap_fast_page();
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			written += bytes;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1,3 +1,4 @@
 | 
			
		|||
#include <kernel/CriticalScope.h>
 | 
			
		||||
#include <kernel/LockGuard.h>
 | 
			
		||||
#include <kernel/Memory/Heap.h>
 | 
			
		||||
#include <kernel/Memory/VirtualRange.h>
 | 
			
		||||
| 
						 | 
				
			
			@ -124,7 +125,6 @@ namespace Kernel
 | 
			
		|||
		auto result = TRY(create_to_vaddr(page_table, vaddr(), size(), flags(), m_preallocated));
 | 
			
		||||
 | 
			
		||||
		LockGuard _(m_page_table);
 | 
			
		||||
		ASSERT(m_page_table.is_page_free(0));
 | 
			
		||||
		for (size_t offset = 0; offset < size(); offset += PAGE_SIZE)
 | 
			
		||||
		{
 | 
			
		||||
			if (!m_preallocated && m_page_table.physical_address_of(vaddr() + offset))
 | 
			
		||||
| 
						 | 
				
			
			@ -134,10 +134,12 @@ namespace Kernel
 | 
			
		|||
					return BAN::Error::from_errno(ENOMEM);
 | 
			
		||||
				result->m_page_table.map_page_at(paddr, vaddr() + offset, m_flags);					
 | 
			
		||||
			}
 | 
			
		||||
			m_page_table.map_page_at(result->m_page_table.physical_address_of(vaddr() + offset), 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
 | 
			
		||||
			memcpy((void*)0, (void*)(vaddr() + offset), PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
			CriticalScope _;
 | 
			
		||||
			PageTable::map_fast_page(result->m_page_table.physical_address_of(vaddr() + offset));
 | 
			
		||||
			memcpy(PageTable::fast_page_as_ptr(), (void*)(vaddr() + offset), PAGE_SIZE);
 | 
			
		||||
			PageTable::unmap_fast_page();
 | 
			
		||||
		}
 | 
			
		||||
		m_page_table.unmap_page(0);
 | 
			
		||||
 | 
			
		||||
		return result;
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -172,14 +174,13 @@ namespace Kernel
 | 
			
		|||
			return;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		LockGuard _(page_table);
 | 
			
		||||
		ASSERT(page_table.is_page_free(0));
 | 
			
		||||
		for (size_t offset = 0; offset < size(); offset += PAGE_SIZE)
 | 
			
		||||
		{
 | 
			
		||||
			page_table.map_page_at(m_page_table.physical_address_of(vaddr() + offset), 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
 | 
			
		||||
			memset((void*)0, 0, PAGE_SIZE);
 | 
			
		||||
			CriticalScope _;
 | 
			
		||||
			PageTable::map_fast_page(m_page_table.physical_address_of(vaddr() + offset));
 | 
			
		||||
			memset(PageTable::fast_page_as_ptr(), 0x00, PAGE_SIZE);
 | 
			
		||||
			PageTable::unmap_fast_page();
 | 
			
		||||
		}
 | 
			
		||||
		page_table.unmap_page(0);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	void VirtualRange::copy_from(size_t offset, const uint8_t* buffer, size_t bytes)
 | 
			
		||||
| 
						 | 
				
			
			@ -187,47 +188,34 @@ namespace Kernel
 | 
			
		|||
		if (bytes == 0)
 | 
			
		||||
			return;
 | 
			
		||||
 | 
			
		||||
		// NOTE: Handling overflow
 | 
			
		||||
		ASSERT(offset <= size());
 | 
			
		||||
		ASSERT(bytes <= size());
 | 
			
		||||
		ASSERT(offset + bytes <= size());
 | 
			
		||||
		// Verify no overflow
 | 
			
		||||
		ASSERT_LE(bytes, size());
 | 
			
		||||
		ASSERT_LE(offset, size());
 | 
			
		||||
		ASSERT_LE(offset, size() - bytes);
 | 
			
		||||
 | 
			
		||||
		PageTable& page_table = PageTable::current();
 | 
			
		||||
 | 
			
		||||
		if (m_kmalloc || &page_table == &m_page_table)
 | 
			
		||||
		if (m_kmalloc || &PageTable::current() == &m_page_table)
 | 
			
		||||
		{
 | 
			
		||||
			memcpy((void*)(vaddr() + offset), buffer, bytes);
 | 
			
		||||
			return;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		LockGuard _(page_table);
 | 
			
		||||
		ASSERT(page_table.is_page_free(0));
 | 
			
		||||
 | 
			
		||||
		size_t off = offset % PAGE_SIZE;
 | 
			
		||||
		size_t i = offset / PAGE_SIZE;
 | 
			
		||||
 | 
			
		||||
		// NOTE: we map the first page separately since it needs extra calculations
 | 
			
		||||
		page_table.map_page_at(m_page_table.physical_address_of(vaddr() + i * PAGE_SIZE), 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
 | 
			
		||||
 | 
			
		||||
		memcpy((void*)off, buffer, PAGE_SIZE - off);
 | 
			
		||||
 | 
			
		||||
		buffer += PAGE_SIZE - off;
 | 
			
		||||
		bytes  -= PAGE_SIZE - off;
 | 
			
		||||
		i++;
 | 
			
		||||
		size_t page_offset = offset % PAGE_SIZE;
 | 
			
		||||
		size_t page_index = offset / PAGE_SIZE;
 | 
			
		||||
 | 
			
		||||
		while (bytes > 0)
 | 
			
		||||
		{
 | 
			
		||||
			size_t len = BAN::Math::min<size_t>(PAGE_SIZE, bytes);
 | 
			
		||||
			{
 | 
			
		||||
				CriticalScope _;
 | 
			
		||||
				PageTable::map_fast_page(m_page_table.physical_address_of(vaddr() + page_index * PAGE_SIZE));
 | 
			
		||||
				memcpy(PageTable::fast_page_as_ptr(page_offset), buffer, PAGE_SIZE - page_offset);
 | 
			
		||||
				PageTable::unmap_fast_page();
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			page_table.map_page_at(m_page_table.physical_address_of(vaddr() + i * PAGE_SIZE), 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
 | 
			
		||||
 | 
			
		||||
			memcpy((void*)0, buffer, len);
 | 
			
		||||
 | 
			
		||||
			buffer += len;
 | 
			
		||||
			bytes  -= len;
 | 
			
		||||
			i++;
 | 
			
		||||
			buffer += PAGE_SIZE - page_offset;
 | 
			
		||||
			bytes  -= PAGE_SIZE - page_offset;
 | 
			
		||||
			page_offset = 0;
 | 
			
		||||
			page_index++;
 | 
			
		||||
		}
 | 
			
		||||
		page_table.unmap_page(0);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -47,9 +47,9 @@ namespace Kernel
 | 
			
		|||
				continue;
 | 
			
		||||
 | 
			
		||||
			CriticalScope _;
 | 
			
		||||
			page_table.map_page_at(cache.paddr, 0, PageTable::Flags::Present);
 | 
			
		||||
			memcpy(buffer.data(), (void*)(page_cache_offset * m_sector_size), m_sector_size);
 | 
			
		||||
			page_table.unmap_page(0);
 | 
			
		||||
			PageTable::map_fast_page(cache.paddr);
 | 
			
		||||
			memcpy(buffer.data(), PageTable::fast_page_as_ptr(page_cache_offset * m_sector_size), m_sector_size);
 | 
			
		||||
			PageTable::unmap_fast_page();
 | 
			
		||||
 | 
			
		||||
			return true;
 | 
			
		||||
		}
 | 
			
		||||
| 
						 | 
				
			
			@ -82,9 +82,9 @@ namespace Kernel
 | 
			
		|||
			
 | 
			
		||||
			{
 | 
			
		||||
				CriticalScope _;
 | 
			
		||||
				page_table.map_page_at(cache.paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
 | 
			
		||||
				memcpy((void*)(page_cache_offset * m_sector_size), buffer.data(), m_sector_size);
 | 
			
		||||
				page_table.unmap_page(0);
 | 
			
		||||
				PageTable::map_fast_page(cache.paddr);
 | 
			
		||||
				memcpy(PageTable::fast_page_as_ptr(page_cache_offset * m_sector_size), buffer.data(), m_sector_size);
 | 
			
		||||
				PageTable::unmap_fast_page();
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			cache.sector_mask |= 1 << page_cache_offset;
 | 
			
		||||
| 
						 | 
				
			
			@ -113,9 +113,9 @@ namespace Kernel
 | 
			
		|||
 | 
			
		||||
		{
 | 
			
		||||
			CriticalScope _;
 | 
			
		||||
			page_table.map_page_at(cache.paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
 | 
			
		||||
			memcpy((void*)(page_cache_offset * m_sector_size), buffer.data(), m_sector_size);
 | 
			
		||||
			page_table.unmap_page(0);
 | 
			
		||||
			PageTable::map_fast_page(cache.paddr);
 | 
			
		||||
			memcpy(PageTable::fast_page_as_ptr(page_cache_offset * m_sector_size), buffer.data(), m_sector_size);
 | 
			
		||||
			PageTable::unmap_fast_page();
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return {};
 | 
			
		||||
| 
						 | 
				
			
			@ -123,21 +123,16 @@ namespace Kernel
 | 
			
		|||
 | 
			
		||||
	BAN::ErrorOr<void> DiskCache::sync()
 | 
			
		||||
	{
 | 
			
		||||
		ASSERT(&PageTable::current() == &PageTable::kernel());
 | 
			
		||||
		auto& page_table = PageTable::kernel();
 | 
			
		||||
 | 
			
		||||
		for (auto& cache : m_cache)
 | 
			
		||||
		{
 | 
			
		||||
			if (cache.dirty_mask == 0)
 | 
			
		||||
				continue;
 | 
			
		||||
 | 
			
		||||
			{
 | 
			
		||||
				LockGuard _(page_table);
 | 
			
		||||
				ASSERT(page_table.is_page_free(0));
 | 
			
		||||
 | 
			
		||||
				page_table.map_page_at(cache.paddr, 0, PageTable::Flags::Present);
 | 
			
		||||
				memcpy(m_sync_cache.data(), (void*)0, PAGE_SIZE);
 | 
			
		||||
				page_table.unmap_page(0);
 | 
			
		||||
				CriticalScope _;
 | 
			
		||||
				PageTable::map_fast_page(cache.paddr);
 | 
			
		||||
				memcpy(m_sync_cache.data(), PageTable::fast_page_as_ptr(), PAGE_SIZE);
 | 
			
		||||
				PageTable::unmap_fast_page();
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			uint8_t sector_start = 0;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in New Issue