Compare commits

..

No commits in common. "2ae2ede0b28c37c1b87f52d6b17bf355acd50a86" and "50ab391133817163641b31f0a0265d2af0af5671" have entirely different histories.

8 changed files with 256 additions and 254 deletions

View File

@ -211,7 +211,7 @@ elseif("${BANAN_ARCH}" STREQUAL "i686")
target_link_options(kernel PRIVATE LINKER:-T,${CMAKE_CURRENT_SOURCE_DIR}/arch/i686/linker.ld)
endif()
target_link_options(kernel PRIVATE -ffreestanding -nostdlib -orphan-handling=error)
target_link_options(kernel PRIVATE -ffreestanding -nostdlib)
execute_process(COMMAND ${CMAKE_CXX_COMPILER} -print-file-name=crtbegin.o OUTPUT_VARIABLE CRTBEGIN OUTPUT_STRIP_TRAILING_WHITESPACE)
execute_process(COMMAND ${CMAKE_CXX_COMPILER} -print-file-name=crtend.o OUTPUT_VARIABLE CRTEND OUTPUT_STRIP_TRAILING_WHITESPACE)

View File

@ -21,20 +21,20 @@ SECTIONS
g_userspace_end = .;
g_kernel_execute_end = .;
}
.ap_init ALIGN(4K) : AT(ADDR(.ap_init) - KERNEL_OFFSET)
.ap_init ALIGN(4K) : AT(ADDR(.ap_init))
{
g_ap_init_addr = .;
*(.ap_init)
}
.rodata ALIGN(4K) : AT(ADDR(.rodata) - KERNEL_OFFSET)
{
*(.rodata.*)
}
.data ALIGN(4K) : AT(ADDR(.data) - KERNEL_OFFSET)
{
g_kernel_writable_start = .;
*(.data)
}
.rodata ALIGN(4K) : AT(ADDR(.rodata) - KERNEL_OFFSET)
{
*(.rodata.*)
}
.bss ALIGN(4K) : AT(ADDR(.bss) - KERNEL_OFFSET)
{
*(COMMON)

View File

@ -21,20 +21,20 @@ SECTIONS
g_userspace_end = .;
g_kernel_execute_end = .;
}
.ap_init ALIGN(4K) : AT(ADDR(.ap_init) - KERNEL_OFFSET)
.ap_init ALIGN(4K) : AT(ADDR(.ap_init))
{
g_ap_init_addr = .;
*(.ap_init)
}
.rodata ALIGN(4K) : AT(ADDR(.rodata) - KERNEL_OFFSET)
{
*(.rodata.*)
}
.data ALIGN(4K) : AT(ADDR(.data) - KERNEL_OFFSET)
{
g_kernel_writable_start = .;
*(.data)
}
.rodata ALIGN(4K) : AT(ADDR(.rodata) - KERNEL_OFFSET)
{
*(.rodata.*)
}
.bss ALIGN(4K) : AT(ADDR(.bss) - KERNEL_OFFSET)
{
*(COMMON)

View File

@ -35,7 +35,7 @@ namespace Kernel
bool MemoryRegion::contains_fully(vaddr_t address, size_t size) const
{
return m_vaddr <= address && address + size <= m_vaddr + m_size;
return m_vaddr <= address && address + size < m_vaddr + m_size;
}
bool MemoryRegion::overlaps(vaddr_t address, size_t size) const

View File

@ -519,8 +519,8 @@ namespace Kernel
{
VirtualFileSystem::File file;
TRY(file.canonical_path.append("<self>"));
file.inode = m_loadable_elf->executable();
m_userspace_info.file_fd = TRY(m_open_file_descriptors.open(BAN::move(file), O_RDONLY));
file.inode = m_loadable_elf->inode();
m_userspace_info.file_fd = TRY(m_open_file_descriptors.open(BAN::move(file), O_EXEC));
}
for (size_t i = 0; i < sizeof(m_signal_handlers) / sizeof(*m_signal_handlers); i++)
@ -2376,7 +2376,7 @@ namespace Kernel
return {};
unauthorized_access:
dwarnln("process {}, thread {} attempted to make an invalid pointer access to 0x{H}->0x{H}", pid(), Thread::current().tid(), vaddr, vaddr + size);
dwarnln("process {}, thread {} attempted to make an invalid pointer access", pid(), Thread::current().tid());
Debug::dump_stack_trace();
MUST(sys_kill(pid(), SIGSEGV));
return BAN::Error::from_errno(EINTR);

View File

@ -26,20 +26,26 @@ namespace LibELF
LoadableELF::~LoadableELF()
{
const auto cleanup_program_headers =
[&](BAN::Span<const ElfNativeProgramHeader> headers)
{
for (const auto& header : headers)
{
ASSERT(header.p_type == PT_LOAD);
const vaddr_t vaddr = header.p_vaddr & PAGE_ADDR_MASK;
const size_t pages = range_page_count(header.p_vaddr, header.p_memsz);
for (size_t i = 0; i < pages; i++)
if (paddr_t paddr = m_page_table.physical_address_of(vaddr + i * PAGE_SIZE))
Heap::get().release_page(paddr);
m_page_table.unmap_range(vaddr, pages * PAGE_SIZE);
}
};
if (!m_is_loaded)
return;
for (const auto& header : m_program_headers)
{
ASSERT(header.p_type == PT_LOAD);
const vaddr_t vaddr = header.p_vaddr & PAGE_ADDR_MASK;
const size_t pages = range_page_count(header.p_vaddr, header.p_memsz);
for (size_t i = 0; i < pages; i++)
if (paddr_t paddr = m_page_table.physical_address_of(vaddr + i * PAGE_SIZE))
Heap::get().release_page(paddr);
m_page_table.unmap_range(vaddr, pages * PAGE_SIZE);
}
cleanup_program_headers(m_executable.program_headers.span());
cleanup_program_headers(m_interpreter.program_headers.span());
}
static BAN::ErrorOr<ElfNativeFileHeader> read_and_validate_file_header(BAN::RefPtr<Inode> inode)
@ -160,26 +166,30 @@ namespace LibELF
}
return LoadResult {
.inode = inode,
.interp = interp,
.file_header = file_header,
.program_headers = BAN::move(program_headers)
.elf_file = {
.inode = inode,
.file_header = file_header,
.program_headers = BAN::move(program_headers),
.dynamic_base = 0
},
.interp = interp
};
}
static bool do_program_headers_overlap(BAN::Span<const ElfNativeProgramHeader> pheaders1, BAN::Span<const ElfNativeProgramHeader> pheaders2, vaddr_t base2)
bool LoadableELF::does_executable_and_interpreter_overlap() const
{
for (const auto& pheader1 : pheaders1)
ASSERT(m_executable.inode);
ASSERT(m_interpreter.inode);
for (const auto& epheader : m_executable.program_headers)
{
for (const auto& pheader2 : pheaders2)
for (const auto& ipheader : m_interpreter.program_headers)
{
const vaddr_t s1 = pheader1.p_vaddr & PAGE_ADDR_MASK;
const vaddr_t e1 = (pheader1.p_vaddr + pheader1.p_memsz + PAGE_SIZE - 1) & PAGE_ADDR_MASK;
const vaddr_t s2 = pheader2.p_vaddr & PAGE_ADDR_MASK;
const vaddr_t e2 = (pheader2.p_vaddr + pheader2.p_memsz + PAGE_SIZE - 1) & PAGE_ADDR_MASK;
if (s1 < e2 + base2 && s2 + base2 < e1)
const vaddr_t e1 = epheader.p_vaddr & PAGE_ADDR_MASK;
const vaddr_t i1 = ipheader.p_vaddr & PAGE_ADDR_MASK;
const vaddr_t e2 = (epheader.p_vaddr + epheader.p_memsz + PAGE_SIZE - 1) & PAGE_ADDR_MASK;
const vaddr_t i2 = (ipheader.p_vaddr + ipheader.p_memsz + PAGE_SIZE - 1) & PAGE_ADDR_MASK;
if (e1 < i2 && i1 < e2)
return true;
}
}
@ -189,95 +199,74 @@ namespace LibELF
BAN::ErrorOr<void> LoadableELF::initialize(const Credentials& credentials, BAN::RefPtr<Inode> inode)
{
const auto generate_random_dynamic_base =
[]() -> vaddr_t
{
// 1 MiB -> 2 GiB + 1 MiB
return (Random::get_u32() & 0x7FFFF000) + 0x100000;
};
auto executable_load_result = TRY(load_elf_file(credentials, inode));
m_executable = executable_load_result.elf_file;
m_executable = executable_load_result.inode;
m_interpreter = executable_load_result.interp;
vaddr_t dynamic_base = 0;
if (m_interpreter)
if (m_executable.file_header.e_type == ET_DYN)
{
auto interp_load_result = TRY(load_elf_file(credentials, m_interpreter));
m_executable.dynamic_base = (Random::get_u32() & 0x7FFFF000) + 0x100000;
m_executable.file_header.e_entry += m_executable.dynamic_base;
for (auto& program_header : m_executable.program_headers)
program_header.p_vaddr += m_executable.dynamic_base;
}
if (executable_load_result.interp)
{
auto interp_load_result = TRY(load_elf_file(credentials, executable_load_result.interp));
m_interpreter = interp_load_result.elf_file;
if (interp_load_result.interp)
{
dwarnln("ELF interpreter has an interpreter");
dwarnln("Executable has specified interpreter for its interpreter");
return BAN::Error::from_errno(EINVAL);
}
if (executable_load_result.file_header.e_type == ET_EXEC)
if (m_interpreter.file_header.e_type == ET_DYN)
{
if (interp_load_result.file_header.e_type == ET_EXEC)
for (int attempt = 0; attempt < 100; attempt++)
{
const bool has_overlap = do_program_headers_overlap(
executable_load_result.program_headers.span(),
interp_load_result.program_headers.span(),
0
);
if (has_overlap)
const vaddr_t dynamic_base = (Random::get_u32() & 0x3FFFF000) + 0x40000000;
for (auto& program_header : m_interpreter.program_headers)
program_header.p_vaddr += dynamic_base;
if (does_executable_and_interpreter_overlap())
{
dwarnln("Executable and interpreter LOAD segments overlap");
return BAN::Error::from_errno(EINVAL);
}
}
else
{
for (int attempt = 0; attempt < 100; attempt++)
{
const vaddr_t test_dynamic_base = generate_random_dynamic_base();
const bool has_overlap = do_program_headers_overlap(
executable_load_result.program_headers.span(),
interp_load_result.program_headers.span(),
test_dynamic_base
);
if (has_overlap)
continue;
dynamic_base = test_dynamic_base;
break;
}
if (dynamic_base == 0)
{
dwarnln("Could not find space to load interpreter");
return BAN::Error::from_errno(EINVAL);
for (auto& program_header : m_interpreter.program_headers)
program_header.p_vaddr -= dynamic_base;
continue;
}
m_interpreter.dynamic_base = dynamic_base;
m_interpreter.file_header.e_entry += dynamic_base;
break;
}
}
m_file_header = interp_load_result.file_header;
m_program_headers = BAN::move(interp_load_result.program_headers);
}
else
{
m_file_header = executable_load_result.file_header;
m_program_headers = BAN::move(executable_load_result.program_headers);
}
const bool can_load_interpreter = (m_interpreter.file_header.e_type == ET_DYN)
? (m_interpreter.dynamic_base != 0)
: !does_executable_and_interpreter_overlap();
if (m_file_header.e_type == ET_DYN && dynamic_base == 0)
dynamic_base = generate_random_dynamic_base();
if (dynamic_base)
{
m_file_header.e_entry += dynamic_base;
for (auto& program_header : m_program_headers)
program_header.p_vaddr += dynamic_base;
if (!can_load_interpreter)
{
dwarnln("Could not find space to load interpreter");
return BAN::Error::from_errno(EINVAL);
}
}
return {};
}
vaddr_t LoadableELF::entry_point() const
{
if (m_interpreter.inode)
return m_interpreter.file_header.e_entry;
return m_executable.file_header.e_entry;
}
bool LoadableELF::contains(vaddr_t address) const
{
for (const auto& program_header : m_program_headers)
for (const auto& program_header : m_executable.program_headers)
if (program_header.p_vaddr <= address && address < program_header.p_vaddr + program_header.p_memsz)
return true;
for (const auto& program_header : m_interpreter.program_headers)
if (program_header.p_vaddr <= address && address < program_header.p_vaddr + program_header.p_memsz)
return true;
return false;
@ -285,88 +274,112 @@ namespace LibELF
bool LoadableELF::is_address_space_free() const
{
for (const auto& program_header : m_program_headers)
{
ASSERT(program_header.p_type == PT_LOAD);
const vaddr_t page_vaddr = program_header.p_vaddr & PAGE_ADDR_MASK;
const size_t pages = range_page_count(program_header.p_vaddr, program_header.p_memsz);
if (!m_page_table.is_range_free(page_vaddr, pages * PAGE_SIZE))
return false;
}
const auto are_program_headers_free =
[&](BAN::Span<const ElfNativeProgramHeader> program_headers) -> bool
{
for (const auto& program_header : program_headers)
{
ASSERT(program_header.p_type == PT_LOAD);
const vaddr_t page_vaddr = program_header.p_vaddr & PAGE_ADDR_MASK;
const size_t pages = range_page_count(program_header.p_vaddr, program_header.p_memsz);
if (!m_page_table.is_range_free(page_vaddr, pages * PAGE_SIZE))
return false;
}
return true;
};
if (!are_program_headers_free(m_executable.program_headers.span()))
return false;
if (!are_program_headers_free(m_interpreter.program_headers.span()))
return false;
return true;
}
void LoadableELF::reserve_address_space()
{
for (const auto& program_header : m_program_headers)
{
ASSERT(program_header.p_type == PT_LOAD);
const vaddr_t page_vaddr = program_header.p_vaddr & PAGE_ADDR_MASK;
const size_t pages = range_page_count(program_header.p_vaddr, program_header.p_memsz);
if (!m_page_table.reserve_range(page_vaddr, pages * PAGE_SIZE))
ASSERT_NOT_REACHED();
m_virtual_page_count += pages;
}
const auto reserve_program_headers =
[&](BAN::Span<const ElfNativeProgramHeader> program_headers)
{
for (const auto& program_header : program_headers)
{
ASSERT(program_header.p_type == PT_LOAD);
const vaddr_t page_vaddr = program_header.p_vaddr & PAGE_ADDR_MASK;
const size_t pages = range_page_count(program_header.p_vaddr, program_header.p_memsz);
if (!m_page_table.reserve_range(page_vaddr, pages * PAGE_SIZE))
ASSERT_NOT_REACHED();
m_virtual_page_count += pages;
}
};
reserve_program_headers(m_executable.program_headers.span());
reserve_program_headers(m_interpreter.program_headers.span());
m_is_loaded = true;
}
void LoadableELF::update_suid_sgid(Kernel::Credentials& credentials)
{
if (m_executable->mode().mode & +Inode::Mode::ISUID)
credentials.set_euid(m_executable->uid());
if (m_executable->mode().mode & +Inode::Mode::ISGID)
credentials.set_egid(m_executable->gid());
auto inode = m_executable.inode;
ASSERT(inode);
if (inode->mode().mode & +Inode::Mode::ISUID)
credentials.set_euid(inode->uid());
if (inode->mode().mode & +Inode::Mode::ISGID)
credentials.set_egid(inode->gid());
}
BAN::ErrorOr<void> LoadableELF::load_page_to_memory(vaddr_t address)
{
auto inode = has_interpreter() ? m_interpreter : m_executable;
// FIXME: use MemoryBackedRegion/FileBackedRegion instead of manually mapping and allocating pages
for (const auto& program_header : m_program_headers)
{
ASSERT(program_header.p_type == PT_LOAD);
if (!(program_header.p_vaddr <= address && address < program_header.p_vaddr + program_header.p_memsz))
continue;
PageTable::flags_t flags = PageTable::Flags::UserSupervisor | PageTable::Flags::Present;
if (program_header.p_flags & LibELF::PF_W)
flags |= PageTable::Flags::ReadWrite;
if (program_header.p_flags & LibELF::PF_X)
flags |= PageTable::Flags::Execute;
const vaddr_t vaddr = address & PAGE_ADDR_MASK;
const paddr_t paddr = Heap::get().take_free_page();
if (paddr == 0)
return BAN::Error::from_errno(ENOMEM);
// Temporarily map page as RW so kernel can write to it
m_page_table.map_page_at(paddr, vaddr, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
m_physical_page_count++;
memset((void*)vaddr, 0x00, PAGE_SIZE);
if (vaddr / PAGE_SIZE < BAN::Math::div_round_up<size_t>(program_header.p_vaddr + program_header.p_filesz, PAGE_SIZE))
const auto load_page_from_program_header =
[&](BAN::RefPtr<Inode> inode, BAN::Span<const ElfNativeProgramHeader> program_headers) -> BAN::ErrorOr<bool>
{
size_t vaddr_offset = 0;
if (vaddr < program_header.p_vaddr)
vaddr_offset = program_header.p_vaddr - vaddr;
for (const auto& program_header : program_headers)
{
ASSERT(program_header.p_type == PT_LOAD);
if (!(program_header.p_vaddr <= address && address < program_header.p_vaddr + program_header.p_memsz))
continue;
size_t file_offset = 0;
if (vaddr > program_header.p_vaddr)
file_offset = vaddr - program_header.p_vaddr;
PageTable::flags_t flags = PageTable::Flags::UserSupervisor | PageTable::Flags::Present;
if (program_header.p_flags & LibELF::PF_W)
flags |= PageTable::Flags::ReadWrite;
if (program_header.p_flags & LibELF::PF_X)
flags |= PageTable::Flags::Execute;
size_t bytes = BAN::Math::min<size_t>(PAGE_SIZE - vaddr_offset, program_header.p_filesz - file_offset);
TRY(inode->read(program_header.p_offset + file_offset, { (uint8_t*)vaddr + vaddr_offset, bytes }));
}
const vaddr_t vaddr = address & PAGE_ADDR_MASK;
const paddr_t paddr = Heap::get().take_free_page();
if (paddr == 0)
return BAN::Error::from_errno(ENOMEM);
// Map page with the correct flags
m_page_table.map_page_at(paddr, vaddr, flags);
// Temporarily map page as RW so kernel can write to it
m_page_table.map_page_at(paddr, vaddr, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
m_physical_page_count++;
memset((void*)vaddr, 0x00, PAGE_SIZE);
if (vaddr / PAGE_SIZE < BAN::Math::div_round_up<size_t>(program_header.p_vaddr + program_header.p_filesz, PAGE_SIZE))
{
size_t vaddr_offset = 0;
if (vaddr < program_header.p_vaddr)
vaddr_offset = program_header.p_vaddr - vaddr;
size_t file_offset = 0;
if (vaddr > program_header.p_vaddr)
file_offset = vaddr - program_header.p_vaddr;
size_t bytes = BAN::Math::min<size_t>(PAGE_SIZE - vaddr_offset, program_header.p_filesz - file_offset);
TRY(inode->read(program_header.p_offset + file_offset, { (uint8_t*)vaddr + vaddr_offset, bytes }));
}
// Map page with the correct flags
m_page_table.map_page_at(paddr, vaddr, flags);
return true;
}
return false;
};
if (TRY(load_page_from_program_header(m_executable.inode, m_executable.program_headers.span())))
return {};
if (TRY(load_page_from_program_header(m_interpreter.inode, m_interpreter.program_headers.span())))
return {};
}
ASSERT_NOT_REACHED();
}
@ -374,47 +387,68 @@ namespace LibELF
{
auto elf = TRY(BAN::UniqPtr<LoadableELF>::create(new_page_table));
elf->m_executable = m_executable;
elf->m_interpreter = m_interpreter;
elf->m_file_header = m_file_header;
TRY(elf->m_program_headers.reserve(m_program_headers.size()));
for (const auto& program_header : m_program_headers)
MUST(elf->m_program_headers.emplace_back(program_header));
const auto clone_loadable_file =
[](const LoadableElfFile& source, LoadableElfFile& destination) -> BAN::ErrorOr<void>
{
if (!source.inode)
return {};
destination.inode = source.inode;
destination.file_header = source.file_header;
destination.dynamic_base = source.dynamic_base;
TRY(destination.program_headers.reserve(source.program_headers.size()));
for (const auto& program_header : source.program_headers)
MUST(destination.program_headers.emplace_back(program_header));
return {};
};
const auto map_loadable_file =
[&](BAN::Span<const ElfNativeProgramHeader> program_headers) -> BAN::ErrorOr<void>
{
for (const auto& program_header : program_headers)
{
ASSERT(program_header.p_type == PT_LOAD);
if (!(program_header.p_flags & LibELF::PF_W))
continue;
PageTable::flags_t flags = PageTable::Flags::UserSupervisor | PageTable::Flags::Present;
if (program_header.p_flags & LibELF::PF_W)
flags |= PageTable::Flags::ReadWrite;
if (program_header.p_flags & LibELF::PF_X)
flags |= PageTable::Flags::Execute;
vaddr_t start = program_header.p_vaddr & PAGE_ADDR_MASK;
size_t pages = range_page_count(program_header.p_vaddr, program_header.p_memsz);
for (size_t i = 0; i < pages; i++)
{
if (m_page_table.physical_address_of(start + i * PAGE_SIZE) == 0)
continue;
paddr_t paddr = Heap::get().take_free_page();
if (paddr == 0)
return BAN::Error::from_errno(ENOMEM);
PageTable::with_fast_page(paddr, [&] {
memcpy(PageTable::fast_page_as_ptr(), (void*)(start + i * PAGE_SIZE), PAGE_SIZE);
});
new_page_table.map_page_at(paddr, start + i * PAGE_SIZE, flags);
elf->m_physical_page_count++;
}
}
return {};
};
TRY(clone_loadable_file(m_executable, elf->m_executable));
TRY(clone_loadable_file(m_interpreter, elf->m_interpreter));
elf->reserve_address_space();
for (const auto& program_header : m_program_headers)
{
ASSERT(program_header.p_type == PT_LOAD);
if (!(program_header.p_flags & LibELF::PF_W))
continue;
PageTable::flags_t flags = PageTable::Flags::UserSupervisor | PageTable::Flags::Present;
if (program_header.p_flags & LibELF::PF_W)
flags |= PageTable::Flags::ReadWrite;
if (program_header.p_flags & LibELF::PF_X)
flags |= PageTable::Flags::Execute;
vaddr_t start = program_header.p_vaddr & PAGE_ADDR_MASK;
size_t pages = range_page_count(program_header.p_vaddr, program_header.p_memsz);
for (size_t i = 0; i < pages; i++)
{
if (m_page_table.physical_address_of(start + i * PAGE_SIZE) == 0)
continue;
paddr_t paddr = Heap::get().take_free_page();
if (paddr == 0)
return BAN::Error::from_errno(ENOMEM);
PageTable::with_fast_page(paddr, [&] {
memcpy(PageTable::fast_page_as_ptr(), (void*)(start + i * PAGE_SIZE), PAGE_SIZE);
});
new_page_table.map_page_at(paddr, start + i * PAGE_SIZE, flags);
elf->m_physical_page_count++;
}
}
TRY(map_loadable_file(elf->m_executable.program_headers.span()));
TRY(map_loadable_file(elf->m_interpreter.program_headers.span()));
return elf;
}

View File

@ -25,10 +25,10 @@ namespace LibELF
static BAN::ErrorOr<BAN::UniqPtr<LoadableELF>> load_from_inode(Kernel::PageTable&, const Kernel::Credentials&, BAN::RefPtr<Kernel::Inode>);
~LoadableELF();
Kernel::vaddr_t entry_point() const { return m_file_header.e_entry; }
Kernel::vaddr_t entry_point() const;
bool has_interpreter() const { return !!m_interpreter; }
BAN::RefPtr<Kernel::Inode> executable() { return m_executable; }
bool has_interpreter() const { return !!m_interpreter.inode; }
BAN::RefPtr<Kernel::Inode> inode() { return m_executable.inode; }
bool contains(Kernel::vaddr_t address) const;
bool is_address_space_free() const;
@ -44,25 +44,30 @@ namespace LibELF
size_t physical_page_count() const { return m_physical_page_count; }
private:
struct LoadResult
struct LoadableElfFile
{
BAN::RefPtr<Kernel::Inode> inode;
BAN::RefPtr<Kernel::Inode> interp;
ElfNativeFileHeader file_header;
BAN::Vector<ElfNativeProgramHeader> program_headers;
Kernel::vaddr_t dynamic_base;
};
struct LoadResult
{
LoadableElfFile elf_file;
BAN::RefPtr<Kernel::Inode> interp;
};
private:
LoadableELF(Kernel::PageTable&);
BAN::ErrorOr<void> initialize(const Kernel::Credentials&, BAN::RefPtr<Kernel::Inode>);
bool does_executable_and_interpreter_overlap() const;
BAN::ErrorOr<LoadResult> load_elf_file(const Kernel::Credentials&, BAN::RefPtr<Kernel::Inode>) const;
private:
BAN::RefPtr<Kernel::Inode> m_executable;
BAN::RefPtr<Kernel::Inode> m_interpreter;
ElfNativeFileHeader m_file_header;
BAN::Vector<ElfNativeProgramHeader> m_program_headers;
LoadableElfFile m_executable;
LoadableElfFile m_interpreter;
Kernel::PageTable& m_page_table;
size_t m_virtual_page_count { 0 };
size_t m_physical_page_count { 0 };

View File

@ -137,41 +137,4 @@ namespace LibELF
PF_MASKPROC = 0xFF000000,
};
enum ELF_DT
{
DT_NULL = 0,
DT_NEEDED = 1,
DT_PLTRELSZ = 2,
DT_PLTGOT = 3,
DT_HASH = 4,
DT_STRTAB = 5,
DT_SYMTAB = 6,
DT_RELA = 7,
DT_RELASZ = 8,
DT_RELAENT = 9,
DT_STRSZ = 10,
DT_SYMENT = 11,
DT_INIT = 12,
DT_FINI = 13,
DT_SONAME = 14,
DT_RPATH = 15,
DT_SYMBOLIC = 16,
DT_REL = 17,
DT_RELSZ = 18,
DT_RELENT = 19,
DT_PLTREL = 20,
DT_DEBUG = 21,
DT_TEXTREL = 22,
DT_JMPREL = 23,
DT_BIND_NOW = 24,
DT_INIT_ARRAY = 25,
DT_FINI_ARRAY = 26,
DT_INIT_ARRAYSZ = 27,
DT_FINI_ARRAYSZ = 28,
DT_LOOS = 0x60000000,
DT_HIOS = 0x6FFFFFFF,
DT_LOPROC = 0x70000000,
DT_HIPROC = 0x7FFFFFFF,
};
}