forked from Bananymous/banan-os
				
			
			update main #1
			
				
			
		
		
		
	| 
						 | 
				
			
			@ -101,6 +101,29 @@ namespace IDT
 | 
			
		|||
		UnkownException0x1F,
 | 
			
		||||
	};
 | 
			
		||||
 | 
			
		||||
	struct PageFaultError
 | 
			
		||||
	{
 | 
			
		||||
		union
 | 
			
		||||
		{
 | 
			
		||||
			uint32_t raw;
 | 
			
		||||
			struct
 | 
			
		||||
			{
 | 
			
		||||
				uint32_t present		: 1;
 | 
			
		||||
				uint32_t write			: 1;
 | 
			
		||||
				uint32_t userspace		: 1;
 | 
			
		||||
				uint32_t reserved_write	: 1;
 | 
			
		||||
				uint32_t instruction	: 1;
 | 
			
		||||
				uint32_t protection_key	: 1;
 | 
			
		||||
				uint32_t shadow_stack	: 1;
 | 
			
		||||
				uint32_t reserved1		: 8;
 | 
			
		||||
				uint32_t sgx_violation	: 1;
 | 
			
		||||
				uint32_t reserved2		: 16;
 | 
			
		||||
			};
 | 
			
		||||
		};
 | 
			
		||||
		
 | 
			
		||||
	};
 | 
			
		||||
	static_assert(sizeof(PageFaultError) == 4);
 | 
			
		||||
 | 
			
		||||
	static const char* isr_exceptions[] =
 | 
			
		||||
	{
 | 
			
		||||
		"Division Error",
 | 
			
		||||
| 
						 | 
				
			
			@ -148,6 +171,29 @@ namespace IDT
 | 
			
		|||
		pid_t tid = Kernel::Scheduler::current_tid();
 | 
			
		||||
		pid_t pid = tid ? Kernel::Process::current().pid() : 0;
 | 
			
		||||
 | 
			
		||||
		if (pid && isr == ISR::PageFault)
 | 
			
		||||
		{
 | 
			
		||||
			PageFaultError page_fault_error;
 | 
			
		||||
			page_fault_error.raw = error;
 | 
			
		||||
 | 
			
		||||
			// Try demand paging on non present pages
 | 
			
		||||
			if (!page_fault_error.present)
 | 
			
		||||
			{
 | 
			
		||||
				asm volatile("sti");
 | 
			
		||||
				auto result = Kernel::Process::current().allocate_page_for_demand_paging(regs->cr2);
 | 
			
		||||
				asm volatile("cli");
 | 
			
		||||
 | 
			
		||||
				if (!result.is_error() && result.value())
 | 
			
		||||
					return;
 | 
			
		||||
 | 
			
		||||
				if (result.is_error())
 | 
			
		||||
				{
 | 
			
		||||
					dwarnln("Demand paging: {}", result.error());
 | 
			
		||||
					Kernel::Thread::current().handle_signal(SIGTERM);
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if (tid)
 | 
			
		||||
		{
 | 
			
		||||
			auto start = Kernel::Thread::current().stack_base();
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -15,9 +15,9 @@ namespace Kernel
 | 
			
		|||
 | 
			
		||||
	public:
 | 
			
		||||
		// Create virtual range to fixed virtual address
 | 
			
		||||
		static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_to_vaddr(PageTable&, vaddr_t, size_t, PageTable::flags_t flags);
 | 
			
		||||
		static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_to_vaddr(PageTable&, vaddr_t, size_t, PageTable::flags_t flags, bool preallocate_pages);
 | 
			
		||||
		// Create virtual range to virtual address range
 | 
			
		||||
		static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_to_vaddr_range(PageTable&, vaddr_t vaddr_start, vaddr_t vaddr_end, size_t, PageTable::flags_t flags);
 | 
			
		||||
		static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_to_vaddr_range(PageTable&, vaddr_t vaddr_start, vaddr_t vaddr_end, size_t, PageTable::flags_t flags, bool preallocate_pages);
 | 
			
		||||
		// Create virtual range in kernel memory with kmalloc
 | 
			
		||||
		static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_kmalloc(size_t);
 | 
			
		||||
		~VirtualRange();
 | 
			
		||||
| 
						 | 
				
			
			@ -28,15 +28,21 @@ namespace Kernel
 | 
			
		|||
		size_t size() const { return m_size; }
 | 
			
		||||
		PageTable::flags_t flags() const { return m_flags; }
 | 
			
		||||
 | 
			
		||||
		void set_zero();
 | 
			
		||||
		bool contains(vaddr_t address) const { return vaddr() <= address && address < vaddr() + size(); }
 | 
			
		||||
 | 
			
		||||
		BAN::ErrorOr<void> allocate_page_for_demand_paging(vaddr_t address);
 | 
			
		||||
 | 
			
		||||
		void copy_from(size_t offset, const uint8_t* buffer, size_t bytes);
 | 
			
		||||
 | 
			
		||||
	private:
 | 
			
		||||
		VirtualRange(PageTable&);
 | 
			
		||||
		VirtualRange(PageTable&, bool preallocated, bool kmalloc);
 | 
			
		||||
 | 
			
		||||
		void set_zero();
 | 
			
		||||
 | 
			
		||||
	private:
 | 
			
		||||
		PageTable&				m_page_table;
 | 
			
		||||
		bool					m_kmalloc { false };
 | 
			
		||||
		const bool				m_preallocated;
 | 
			
		||||
		const bool				m_kmalloc;
 | 
			
		||||
		vaddr_t					m_vaddr { 0 };
 | 
			
		||||
		size_t					m_size { 0 };
 | 
			
		||||
		PageTable::flags_t		m_flags { 0 };
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -141,6 +141,11 @@ namespace Kernel
 | 
			
		|||
		bool is_userspace() const { return m_is_userspace; }
 | 
			
		||||
		const userspace_info_t& userspace_info() const { return m_userspace_info; }
 | 
			
		||||
 | 
			
		||||
		// Returns error if page could not be allocated
 | 
			
		||||
		// Returns true if the page was allocated successfully
 | 
			
		||||
		// Return false if access was page violation (segfault)
 | 
			
		||||
		BAN::ErrorOr<bool> allocate_page_for_demand_paging(vaddr_t addr);
 | 
			
		||||
 | 
			
		||||
	private:
 | 
			
		||||
		Process(const Credentials&, pid_t pid, pid_t parent, pid_t sid, pid_t pgrp);
 | 
			
		||||
		static Process* create_process(const Credentials&, pid_t parent, pid_t sid = 0, pid_t pgrp = 0);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -70,6 +70,7 @@ namespace Kernel
 | 
			
		|||
 | 
			
		||||
		vaddr_t stack_base() const { return m_stack->vaddr(); }
 | 
			
		||||
		size_t stack_size() const { return m_stack->size(); }
 | 
			
		||||
		VirtualRange& stack() { return *m_stack; }
 | 
			
		||||
 | 
			
		||||
		vaddr_t interrupt_stack_base() const { return m_interrupt_stack ? m_interrupt_stack->vaddr() : 0; }
 | 
			
		||||
		size_t interrupt_stack_size() const { return m_interrupt_stack ? m_interrupt_stack->size() : 0; }
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -5,26 +5,27 @@
 | 
			
		|||
namespace Kernel
 | 
			
		||||
{
 | 
			
		||||
 | 
			
		||||
	BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_to_vaddr(PageTable& page_table, vaddr_t vaddr, size_t size, PageTable::flags_t flags)
 | 
			
		||||
	BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_to_vaddr(PageTable& page_table, vaddr_t vaddr, size_t size, PageTable::flags_t flags, bool preallocate_pages)
 | 
			
		||||
	{
 | 
			
		||||
		ASSERT(size % PAGE_SIZE == 0);
 | 
			
		||||
		ASSERT(vaddr % PAGE_SIZE == 0);
 | 
			
		||||
		ASSERT(vaddr > 0);
 | 
			
		||||
 | 
			
		||||
		VirtualRange* result_ptr = new VirtualRange(page_table);
 | 
			
		||||
		VirtualRange* result_ptr = new VirtualRange(page_table, preallocate_pages, false);
 | 
			
		||||
		if (result_ptr == nullptr)
 | 
			
		||||
			return BAN::Error::from_errno(ENOMEM);
 | 
			
		||||
		auto result = BAN::UniqPtr<VirtualRange>::adopt(result_ptr);
 | 
			
		||||
 | 
			
		||||
		result->m_kmalloc = false;
 | 
			
		||||
		auto result = BAN::UniqPtr<VirtualRange>::adopt(result_ptr);
 | 
			
		||||
		result->m_vaddr = vaddr;
 | 
			
		||||
		result->m_size = size;
 | 
			
		||||
		result->m_flags = flags;
 | 
			
		||||
 | 
			
		||||
		ASSERT(page_table.reserve_range(vaddr, size));
 | 
			
		||||
 | 
			
		||||
		size_t needed_pages = size / PAGE_SIZE;
 | 
			
		||||
		if (!preallocate_pages)
 | 
			
		||||
			return result;
 | 
			
		||||
 | 
			
		||||
		size_t needed_pages = size / PAGE_SIZE;
 | 
			
		||||
		for (size_t i = 0; i < needed_pages; i++)
 | 
			
		||||
		{
 | 
			
		||||
			paddr_t paddr = Heap::get().take_free_page();
 | 
			
		||||
| 
						 | 
				
			
			@ -39,10 +40,12 @@ namespace Kernel
 | 
			
		|||
			page_table.map_page_at(paddr, vaddr + i * PAGE_SIZE, flags);
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		result->set_zero();
 | 
			
		||||
 | 
			
		||||
		return result;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_to_vaddr_range(PageTable& page_table, vaddr_t vaddr_start, vaddr_t vaddr_end, size_t size, PageTable::flags_t flags)
 | 
			
		||||
	BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_to_vaddr_range(PageTable& page_table, vaddr_t vaddr_start, vaddr_t vaddr_end, size_t size, PageTable::flags_t flags, bool preallocate_pages)
 | 
			
		||||
	{
 | 
			
		||||
		ASSERT(size % PAGE_SIZE == 0);
 | 
			
		||||
		ASSERT(vaddr_start > 0);
 | 
			
		||||
| 
						 | 
				
			
			@ -58,20 +61,22 @@ namespace Kernel
 | 
			
		|||
 | 
			
		||||
		vaddr_t vaddr = page_table.reserve_free_contiguous_pages(size / PAGE_SIZE, vaddr_start, vaddr_end);
 | 
			
		||||
		if (vaddr == 0)
 | 
			
		||||
		{
 | 
			
		||||
			dprintln("no free {} byte area", size);
 | 
			
		||||
			return BAN::Error::from_errno(ENOMEM);
 | 
			
		||||
		}
 | 
			
		||||
		ASSERT(vaddr + size <= vaddr_end);
 | 
			
		||||
 | 
			
		||||
		LockGuard _(page_table);
 | 
			
		||||
		page_table.unmap_range(vaddr, size); // We have to unmap here to allow reservation in create_to_vaddr()
 | 
			
		||||
		return create_to_vaddr(page_table, vaddr, size, flags);
 | 
			
		||||
		return create_to_vaddr(page_table, vaddr, size, flags, preallocate_pages);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_kmalloc(size_t size)
 | 
			
		||||
	{
 | 
			
		||||
		VirtualRange* result = new VirtualRange(PageTable::kernel());
 | 
			
		||||
		VirtualRange* result = new VirtualRange(PageTable::kernel(), false, true);
 | 
			
		||||
		ASSERT(result);
 | 
			
		||||
 | 
			
		||||
		result->m_kmalloc = true;
 | 
			
		||||
		result->m_size = size;
 | 
			
		||||
		result->m_flags = PageTable::Flags::ReadWrite | PageTable::Flags::Present;
 | 
			
		||||
		result->m_vaddr = (vaddr_t)kmalloc(size);
 | 
			
		||||
| 
						 | 
				
			
			@ -81,11 +86,15 @@ namespace Kernel
 | 
			
		|||
			return BAN::Error::from_errno(ENOMEM);
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		result->set_zero();
 | 
			
		||||
 | 
			
		||||
		return BAN::UniqPtr<VirtualRange>::adopt(result);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	VirtualRange::VirtualRange(PageTable& page_table)
 | 
			
		||||
	VirtualRange::VirtualRange(PageTable& page_table, bool preallocated, bool kmalloc)
 | 
			
		||||
		: m_page_table(page_table)
 | 
			
		||||
		, m_preallocated(preallocated)
 | 
			
		||||
		, m_kmalloc(kmalloc)
 | 
			
		||||
	{ }
 | 
			
		||||
 | 
			
		||||
	VirtualRange::~VirtualRange()
 | 
			
		||||
| 
						 | 
				
			
			@ -98,7 +107,11 @@ namespace Kernel
 | 
			
		|||
		else
 | 
			
		||||
		{
 | 
			
		||||
			for (size_t offset = 0; offset < size(); offset += PAGE_SIZE)
 | 
			
		||||
				Heap::get().release_page(m_page_table.physical_address_of(vaddr() + offset));
 | 
			
		||||
			{
 | 
			
		||||
				paddr_t paddr = m_page_table.physical_address_of(vaddr() + offset);
 | 
			
		||||
				if (paddr)
 | 
			
		||||
					Heap::get().release_page(paddr);
 | 
			
		||||
			}
 | 
			
		||||
			m_page_table.unmap_range(vaddr(), size());
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -107,12 +120,19 @@ namespace Kernel
 | 
			
		|||
	{
 | 
			
		||||
		ASSERT(&PageTable::current() == &m_page_table);
 | 
			
		||||
 | 
			
		||||
		auto result = TRY(create_to_vaddr(page_table, vaddr(), size(), flags()));
 | 
			
		||||
		auto result = TRY(create_to_vaddr(page_table, vaddr(), size(), flags(), m_preallocated));
 | 
			
		||||
 | 
			
		||||
		LockGuard _(m_page_table);
 | 
			
		||||
		ASSERT(m_page_table.is_page_free(0));
 | 
			
		||||
		for (size_t offset = 0; offset < size(); offset += PAGE_SIZE)
 | 
			
		||||
		{
 | 
			
		||||
			if (!m_preallocated && m_page_table.physical_address_of(vaddr() + offset))
 | 
			
		||||
			{
 | 
			
		||||
				paddr_t paddr = Heap::get().take_free_page();
 | 
			
		||||
				if (paddr == 0)
 | 
			
		||||
					return BAN::Error::from_errno(ENOMEM);
 | 
			
		||||
				result->m_page_table.map_page_at(paddr, vaddr() + offset, m_flags);					
 | 
			
		||||
			}
 | 
			
		||||
			m_page_table.map_page_at(result->m_page_table.physical_address_of(vaddr() + offset), 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
 | 
			
		||||
			memcpy((void*)0, (void*)(vaddr() + offset), PAGE_SIZE);
 | 
			
		||||
		}
 | 
			
		||||
| 
						 | 
				
			
			@ -121,11 +141,31 @@ namespace Kernel
 | 
			
		|||
		return result;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	BAN::ErrorOr<void> VirtualRange::allocate_page_for_demand_paging(vaddr_t address)
 | 
			
		||||
	{
 | 
			
		||||
		ASSERT(!m_kmalloc);
 | 
			
		||||
		ASSERT(!m_preallocated);
 | 
			
		||||
		ASSERT(contains(address));
 | 
			
		||||
		ASSERT(&PageTable::current() == &m_page_table);
 | 
			
		||||
 | 
			
		||||
		vaddr_t vaddr = address & PAGE_ADDR_MASK;
 | 
			
		||||
		ASSERT(m_page_table.physical_address_of(vaddr) == 0);
 | 
			
		||||
 | 
			
		||||
		paddr_t paddr = Heap::get().take_free_page();
 | 
			
		||||
		if (paddr == 0)
 | 
			
		||||
			return BAN::Error::from_errno(ENOMEM);
 | 
			
		||||
 | 
			
		||||
		m_page_table.map_page_at(paddr, vaddr, m_flags);
 | 
			
		||||
		memset((void*)vaddr, 0x00, PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
		return {};
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	void VirtualRange::set_zero()
 | 
			
		||||
	{
 | 
			
		||||
		PageTable& page_table = PageTable::current();
 | 
			
		||||
 | 
			
		||||
		if (&page_table == &m_page_table)
 | 
			
		||||
		if (m_kmalloc || &page_table == &m_page_table)
 | 
			
		||||
		{
 | 
			
		||||
			memset((void*)vaddr(), 0, size());
 | 
			
		||||
			return;
 | 
			
		||||
| 
						 | 
				
			
			@ -153,7 +193,7 @@ namespace Kernel
 | 
			
		|||
 | 
			
		||||
		PageTable& page_table = PageTable::current();
 | 
			
		||||
 | 
			
		||||
		if (&page_table == &m_page_table)
 | 
			
		||||
		if (m_kmalloc || &page_table == &m_page_table)
 | 
			
		||||
		{
 | 
			
		||||
			memcpy((void*)(vaddr() + offset), buffer, bytes);
 | 
			
		||||
			return;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -136,9 +136,9 @@ namespace Kernel
 | 
			
		|||
				process->page_table(),
 | 
			
		||||
				0x400000, KERNEL_OFFSET,
 | 
			
		||||
				needed_bytes,
 | 
			
		||||
				PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present
 | 
			
		||||
				PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present,
 | 
			
		||||
				true
 | 
			
		||||
			));
 | 
			
		||||
			argv_range->set_zero();
 | 
			
		||||
 | 
			
		||||
			uintptr_t temp = argv_range->vaddr() + sizeof(char*) * 2;
 | 
			
		||||
			argv_range->copy_from(0, (uint8_t*)&temp, sizeof(char*));			
 | 
			
		||||
| 
						 | 
				
			
			@ -437,9 +437,9 @@ namespace Kernel
 | 
			
		|||
						page_table(),
 | 
			
		||||
						0x400000, KERNEL_OFFSET,
 | 
			
		||||
						bytes,
 | 
			
		||||
						PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present
 | 
			
		||||
						PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present,
 | 
			
		||||
						true
 | 
			
		||||
					));
 | 
			
		||||
					range->set_zero();
 | 
			
		||||
 | 
			
		||||
					size_t data_offset = sizeof(char*) * (container.size() + 1);
 | 
			
		||||
					for (size_t i = 0; i < container.size(); i++)
 | 
			
		||||
| 
						 | 
				
			
			@ -584,8 +584,7 @@ namespace Kernel
 | 
			
		|||
 | 
			
		||||
				{
 | 
			
		||||
					LockGuard _(m_lock);
 | 
			
		||||
					auto range = MUST(VirtualRange::create_to_vaddr(page_table(), page_start * PAGE_SIZE, page_count * PAGE_SIZE, flags));
 | 
			
		||||
					range->set_zero();
 | 
			
		||||
					auto range = MUST(VirtualRange::create_to_vaddr(page_table(), page_start * PAGE_SIZE, page_count * PAGE_SIZE, flags, true));
 | 
			
		||||
					range->copy_from(elf_program_header.p_vaddr % PAGE_SIZE, elf.data() + elf_program_header.p_offset, elf_program_header.p_filesz);
 | 
			
		||||
 | 
			
		||||
					MUST(m_mapped_ranges.emplace_back(false, BAN::move(range)));
 | 
			
		||||
| 
						 | 
				
			
			@ -623,6 +622,29 @@ namespace Kernel
 | 
			
		|||
		return {};
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	BAN::ErrorOr<bool> Process::allocate_page_for_demand_paging(vaddr_t addr)
 | 
			
		||||
	{
 | 
			
		||||
		ASSERT(&Process::current() == this);
 | 
			
		||||
 | 
			
		||||
		LockGuard _(m_lock);
 | 
			
		||||
 | 
			
		||||
		if (Thread::current().stack().contains(addr))
 | 
			
		||||
		{
 | 
			
		||||
			TRY(Thread::current().stack().allocate_page_for_demand_paging(addr));
 | 
			
		||||
			return true;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		for (auto& mapped_range : m_mapped_ranges)
 | 
			
		||||
		{
 | 
			
		||||
			if (!mapped_range.range->contains(addr))
 | 
			
		||||
				continue;
 | 
			
		||||
			TRY(mapped_range.range->allocate_page_for_demand_paging(addr));
 | 
			
		||||
			return true;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return false;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	BAN::ErrorOr<long> Process::open_file(BAN::StringView path, int flags, mode_t mode)
 | 
			
		||||
	{
 | 
			
		||||
		BAN::String absolute_path = TRY(absolute_path_of(path));
 | 
			
		||||
| 
						 | 
				
			
			@ -890,9 +912,9 @@ namespace Kernel
 | 
			
		|||
				page_table(),
 | 
			
		||||
				0x400000, KERNEL_OFFSET,
 | 
			
		||||
				args->len,
 | 
			
		||||
				PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present
 | 
			
		||||
				PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present,
 | 
			
		||||
				false
 | 
			
		||||
			));
 | 
			
		||||
			range->set_zero();
 | 
			
		||||
 | 
			
		||||
			LockGuard _(m_lock);
 | 
			
		||||
			TRY(m_mapped_ranges.emplace_back(true, BAN::move(range)));
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -122,8 +122,8 @@ namespace Kernel
 | 
			
		|||
 | 
			
		||||
		thread->m_is_userspace = true;
 | 
			
		||||
 | 
			
		||||
		thread->m_stack = TRY(VirtualRange::create_to_vaddr_range(process->page_table(), 0x300000, KERNEL_OFFSET, m_userspace_stack_size, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present));
 | 
			
		||||
		thread->m_interrupt_stack = TRY(VirtualRange::create_to_vaddr_range(process->page_table(), 0x300000, KERNEL_OFFSET, m_interrupt_stack_size, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present));
 | 
			
		||||
		thread->m_stack = TRY(VirtualRange::create_to_vaddr_range(process->page_table(), 0x300000, KERNEL_OFFSET, m_userspace_stack_size, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present, true));
 | 
			
		||||
		thread->m_interrupt_stack = TRY(VirtualRange::create_to_vaddr_range(process->page_table(), 0x300000, KERNEL_OFFSET, m_interrupt_stack_size, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present, true));
 | 
			
		||||
 | 
			
		||||
		thread->setup_exec();
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in New Issue