Compare commits

...

16 Commits

Author SHA1 Message Date
Bananymous 8b2bb95b81 Kernel: VirtualRange doesn't store physical addresses of pages
This was unnecessarry allocation, since the page table allready
contains virtual address -> physical address mappings.
2023-09-24 01:29:34 +03:00
Bananymous 2ef496a24a Kernel: all mapped ranges are now stored in one container
We just now have a flag if a mapping is unmappable
2023-09-23 23:45:26 +03:00
Bananymous c0a89e8951 Kernel: Fully remove sys_alloc and sys_free
I could delete the whole FixedWidthAllocator as it was now obsolete.
GeneralAllocator is still used by kmalloc. Kmalloc cannot actually
use it since, GeneralAllocator depends on SpinLock and kmalloc runs
without interrupts.
2023-09-23 03:53:30 +03:00
Bananymous fc953df281 Kernel/LibC: remove PATH resoltion from kernel
I have no idea why I had made PATH environment variable parsing
to be part of the kernel. Now the shell does the parsing and
environment syscall is no longer needed.
2023-09-23 03:08:14 +03:00
Bananymous fe2dca16f0 Kernel/LibC: add flag to enable/disable sse support
SSE support is very experimental and causes GP. I decided to make
SSE not default until I get to fixing it :)
2023-09-23 02:28:25 +03:00
Bananymous f662aa6da2 Kernel/LibC: userspace malloc now uses mmap to get memory
We could remove syscalls to allocate more memory. This was not
something the kernel should have done.
2023-09-23 02:26:23 +03:00
Bananymous fee3677fb9 Kernel/LibC: add mmap for private anonymous mappings
This will be used by the userspace to get more memory. Currently
kernel handles all allocations, which is not preferable.
2023-09-22 23:01:14 +03:00
Bananymous 4818c6e3dd BuildSystem: Add cmake target for debugging qemu 2023-09-22 17:20:35 +03:00
Bananymous 971eb737c1 BAN: Fix LinkedList::pop_back() 2023-09-22 17:20:35 +03:00
Bananymous 9a3286ad57 Kernel: Add constexpr conditional debug prints 2023-09-22 17:20:35 +03:00
Bananymous c9e09b840e Kernel: Add LAI as a dependency
I did not feel like implementing AML interpreter now, and wanted
everything AML has to offer. I will be writing my own AML interperter
at some point.
2023-09-22 17:20:35 +03:00
Bananymous 8136248a67 Kernel: Fix timer includes 2023-09-22 17:20:35 +03:00
Bananymous 0d67e46041 Kernel: Add config read/write api to PCI 2023-09-22 17:20:35 +03:00
Bananymous bc1087f5a7 Kernel: Add pointer validation API to page table 2023-09-22 17:20:35 +03:00
Bananymous 3a9c6fc51a General: remove linecount.sh 2023-09-22 17:20:35 +03:00
Bananymous 7774f56ab6 Kernel: PCI devices can now create region for BAR
This creates either MEM or IO region for read/write access to PCI
device.
2023-09-22 17:20:35 +03:00
41 changed files with 1182 additions and 868 deletions

4
.gitmodules vendored Normal file
View File

@ -0,0 +1,4 @@
[submodule "kernel/lai"]
path = kernel/lai
url = https://github.com/managarm/lai.git
ignore = untracked

View File

@ -195,7 +195,7 @@ namespace BAN
template<typename T>
void LinkedList<T>::pop_back()
{
return remove(m_last);
remove(iterator(m_last, false));
}
template<typename T>

View File

@ -13,10 +13,16 @@ set(CMAKE_CXX_STANDARD_REQUIRED True)
set(CMAKE_CXX_COMPILER ${TOOLCHAIN_PREFIX}/bin/${BANAN_ARCH}-banan_os-g++)
set(CMAKE_CXX_COMPILER_WORKS True)
set(CMAKE_C_COMPILER ${TOOLCHAIN_PREFIX}/bin/${BANAN_ARCH}-banan_os-gcc)
set(CMAKE_C_COMPILER_WORKS True)
if(NOT EXISTS ${CMAKE_CXX_COMPILER})
set(CMAKE_CXX_COMPILER g++)
endif()
add_compile_options(-mno-sse -mno-sse2)
add_compile_definitions(__enable_sse=0)
project(banan-os CXX)
set(BANAN_BASE_SYSROOT ${CMAKE_SOURCE_DIR}/base-sysroot.tar.gz)
@ -84,7 +90,13 @@ add_custom_target(check-fs
)
add_custom_target(qemu
COMMAND ${CMAKE_COMMAND} -E env BANAN_ARCH="${BANAN_ARCH}" DISK_IMAGE_PATH="${DISK_IMAGE_PATH}" ${CMAKE_SOURCE_DIR}/qemu.sh
COMMAND ${CMAKE_COMMAND} -E env BANAN_ARCH="${BANAN_ARCH}" DISK_IMAGE_PATH="${DISK_IMAGE_PATH}" ${CMAKE_SOURCE_DIR}/qemu.sh -accel kvm
DEPENDS image
USES_TERMINAL
)
add_custom_target(qemu-debug
COMMAND ${CMAKE_COMMAND} -E env BANAN_ARCH="${BANAN_ARCH}" DISK_IMAGE_PATH="${DISK_IMAGE_PATH}" ${CMAKE_SOURCE_DIR}/qemu.sh -d int -no-reboot
DEPENDS image
USES_TERMINAL
)

View File

@ -1,6 +1,6 @@
cmake_minimum_required(VERSION 3.26)
project(kernel CXX ASM)
project(kernel CXX C ASM)
if("${BANAN_ARCH}" STREQUAL "x86_64")
set(ELF_FORMAT elf64-x86-64)
@ -32,7 +32,6 @@ set(KERNEL_SOURCES
kernel/Input/PS2Keymap.cpp
kernel/InterruptController.cpp
kernel/kernel.cpp
kernel/Memory/FixedWidthAllocator.cpp
kernel/Memory/GeneralAllocator.cpp
kernel/Memory/Heap.cpp
kernel/Memory/kmalloc.cpp
@ -98,6 +97,14 @@ else()
message(FATAL_ERROR "unsupported architecure ${BANAN_ARCH}")
endif()
file(GLOB_RECURSE LAI_SOURCES
lai/*.c
)
set(LAI_SOURCES
${LAI_SOURCES}
kernel/lai_host.cpp
)
set(BAN_SOURCES
../BAN/BAN/New.cpp
../BAN/BAN/String.cpp
@ -116,6 +123,7 @@ set(LIBELF_SOURCES
set(KERNEL_SOURCES
${KERNEL_SOURCES}
${LAI_SOURCES}
${BAN_SOURCES}
${LIBC_SOURCES}
${LIBELF_SOURCES}
@ -128,10 +136,9 @@ target_compile_definitions(kernel PUBLIC __is_kernel)
target_compile_definitions(kernel PUBLIC __arch=${BANAN_ARCH})
target_compile_options(kernel PUBLIC -O2 -g)
target_compile_options(kernel PUBLIC -Wno-literal-suffix)
target_compile_options(kernel PUBLIC -fno-rtti -fno-exceptions)
target_compile_options(kernel PUBLIC $<$<COMPILE_LANGUAGE:CXX>:-Wno-literal-suffix -fno-rtti -fno-exceptions>)
target_compile_options(kernel PUBLIC -fmacro-prefix-map=${CMAKE_CURRENT_SOURCE_DIR}=.)
target_compile_options(kernel PUBLIC -fstack-protector -ffreestanding -Wall -Wextra -Werror=return-type -Wstack-usage=1024 -fno-omit-frame-pointer -mgeneral-regs-only)
target_compile_options(kernel PUBLIC -fstack-protector -ffreestanding -Wall -Werror=return-type -Wstack-usage=1024 -fno-omit-frame-pointer -mgeneral-regs-only)
if(ENABLE_KERNEL_UBSAN)
target_compile_options(kernel PUBLIC -fsanitize=undefined)
@ -160,6 +167,7 @@ add_custom_command(
add_custom_target(kernel-headers
COMMAND sudo rsync -a ${CMAKE_CURRENT_SOURCE_DIR}/include/ ${BANAN_INCLUDE}/
COMMAND sudo rsync -a ${CMAKE_CURRENT_SOURCE_DIR}/lai/include/ ${BANAN_INCLUDE}/
DEPENDS sysroot
USES_TERMINAL
)

View File

@ -139,10 +139,11 @@ namespace IDT
extern "C" void cpp_isr_handler(uint64_t isr, uint64_t error, Kernel::InterruptStack& interrupt_stack, const Registers* regs)
{
#if __enable_sse
bool from_userspace = (interrupt_stack.cs & 0b11) == 0b11;
if (from_userspace)
Kernel::Thread::current().save_sse();
#endif
pid_t tid = Kernel::Scheduler::current_tid();
pid_t pid = tid ? Kernel::Process::current().pid() : 0;
@ -205,19 +206,22 @@ namespace IDT
ASSERT(Kernel::Thread::current().state() != Kernel::Thread::State::Terminated);
#if __enable_sse
if (from_userspace)
{
ASSERT(Kernel::Thread::current().state() == Kernel::Thread::State::Executing);
Kernel::Thread::current().load_sse();
}
#endif
}
extern "C" void cpp_irq_handler(uint64_t irq, Kernel::InterruptStack& interrupt_stack)
{
#if __enable_sse
bool from_userspace = (interrupt_stack.cs & 0b11) == 0b11;
if (from_userspace)
Kernel::Thread::current().save_sse();
#endif
if (Kernel::Scheduler::current_tid())
{
@ -240,11 +244,13 @@ namespace IDT
ASSERT(Kernel::Thread::current().state() != Kernel::Thread::State::Terminated);
#if __enable_sse
if (from_userspace)
{
ASSERT(Kernel::Thread::current().state() == Kernel::Thread::State::Executing);
Kernel::Thread::current().load_sse();
}
#endif
}
static void flush_idt()

View File

@ -110,6 +110,13 @@ namespace Kernel
return *s_current;
}
bool PageTable::is_valid_pointer(uintptr_t pointer)
{
if (!is_canonical(pointer))
return false;
return true;
}
static uint64_t* allocate_zeroed_page_aligned_page()
{
void* page = kmalloc(PAGE_SIZE, PAGE_SIZE, true);

View File

@ -114,20 +114,31 @@ namespace Kernel
ACPI() = default;
BAN::ErrorOr<void> initialize_impl();
const SDTHeader* get_header_from_index(size_t);
private:
paddr_t m_header_table_paddr = 0;
vaddr_t m_header_table_vaddr = 0;
uint32_t m_entry_size = 0;
uint32_t m_entry_count = 0;
struct MappedPage
{
Kernel::paddr_t paddr;
Kernel::vaddr_t vaddr;
SDTHeader* as_header() { return (SDTHeader*)vaddr; }
};
BAN::Vector<MappedPage> m_mapped_headers;
};
}
}
namespace BAN::Formatter
{
template<typename F>
void print_argument(F putc, const Kernel::ACPI::SDTHeader& header, const ValueFormat& format)
{
putc(header.signature[0]);
putc(header.signature[1]);
putc(header.signature[2]);
putc(header.signature[3]);
}
}

View File

@ -21,4 +21,8 @@
#include <stdint.h>
#ifdef __cplusplus
extern "C" uintptr_t read_rip();
#else
extern uintptr_t read_rip();
#endif

View File

@ -29,6 +29,24 @@
Debug::DebugLock::unlock(); \
} while(false)
#define dprintln_if(cond, ...) \
do { \
if constexpr(cond) \
dprintln(__VA_ARGS__); \
} while(false)
#define dwarnln_if(cond, ...) \
do { \
if constexpr(cond) \
dwarnln(__VA_ARGS__); \
} while(false)
#define derrorln_if(cond, ...) \
do { \
if constexpr(cond) \
derrorln(__VA_ARGS__); \
} while(false)
#define BOCHS_BREAK() asm volatile("xchgw %bx, %bx")
namespace Debug

View File

@ -1,64 +0,0 @@
#pragma once
#include <BAN/Errors.h>
#include <BAN/UniqPtr.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/PageTable.h>
namespace Kernel
{
class FixedWidthAllocator
{
BAN_NON_COPYABLE(FixedWidthAllocator);
BAN_NON_MOVABLE(FixedWidthAllocator);
public:
static BAN::ErrorOr<BAN::UniqPtr<FixedWidthAllocator>> create(PageTable&, uint32_t);
~FixedWidthAllocator();
BAN::ErrorOr<BAN::UniqPtr<FixedWidthAllocator>> clone(PageTable&);
vaddr_t allocate();
bool deallocate(vaddr_t);
uint32_t allocation_size() const { return m_allocation_size; }
uint32_t allocations() const { return m_allocations; }
uint32_t max_allocations() const;
private:
FixedWidthAllocator(PageTable&, uint32_t);
BAN::ErrorOr<void> initialize();
bool allocate_page_if_needed(vaddr_t, uint8_t flags);
struct node
{
node* prev { nullptr };
node* next { nullptr };
bool allocated { false };
};
vaddr_t address_of_node(const node*) const;
node* node_from_address(vaddr_t) const;
void allocate_page_for_node_if_needed(const node*);
void allocate_node(node*);
void deallocate_node(node*);
private:
static constexpr uint32_t m_min_allocation_size = 16;
PageTable& m_page_table;
const uint32_t m_allocation_size;
vaddr_t m_nodes_page { 0 };
vaddr_t m_allocated_pages { 0 };
node* m_free_list { nullptr };
node* m_used_list { nullptr };
uint32_t m_allocations { 0 };
};
}

View File

@ -29,13 +29,15 @@ namespace Kernel
static PageTable& kernel();
static PageTable& current();
static bool is_valid_pointer(uintptr_t);
static BAN::ErrorOr<PageTable*> create_userspace();
~PageTable();
void unmap_page(vaddr_t);
void unmap_range(vaddr_t, size_t bytes);
void map_range_at(paddr_t, vaddr_t, size_t, flags_t);
void map_range_at(paddr_t, vaddr_t, size_t bytes, flags_t);
void map_page_at(paddr_t, vaddr_t, flags_t);
paddr_t physical_address_of(vaddr_t) const;

View File

@ -40,7 +40,6 @@ namespace Kernel
vaddr_t m_vaddr { 0 };
size_t m_size { 0 };
PageTable::flags_t m_flags { 0 };
BAN::Vector<paddr_t> m_physical_pages;
};
}

View File

@ -13,7 +13,7 @@ namespace Kernel
class E1000 final : public NetworkDriver
{
public:
static BAN::ErrorOr<BAN::UniqPtr<E1000>> create(const PCIDevice&);
static BAN::ErrorOr<BAN::UniqPtr<E1000>> create(PCI::Device&);
~E1000();
virtual uint8_t* get_mac_address() override { return m_mac_address; }
@ -24,12 +24,12 @@ namespace Kernel
private:
E1000() = default;
BAN::ErrorOr<void> initialize(const PCIDevice&);
BAN::ErrorOr<void> initialize(PCI::Device&);
static void interrupt_handler();
void write32(uint16_t reg, uint32_t value);
uint32_t read32(uint16_t reg);
void write32(uint16_t reg, uint32_t value);
void detect_eeprom();
uint32_t eeprom_read(uint8_t addr);
@ -44,8 +44,7 @@ namespace Kernel
void handle_receive();
private:
PCIDevice::BarType m_bar_type {};
uint64_t m_bar_addr {};
BAN::UniqPtr<PCI::BarRegion> m_bar_region;
bool m_has_eerprom { false };
uint8_t m_mac_address[6] {};
uint16_t m_rx_current {};

View File

@ -1,28 +1,65 @@
#pragma once
#include <BAN/UniqPtr.h>
#include <BAN/Vector.h>
#include <kernel/Memory/Types.h>
namespace Kernel
namespace Kernel::PCI
{
class PCIDevice
enum class BarType
{
public:
enum class BarType
{
INVAL,
MEM,
IO,
};
INVALID,
MEM,
IO,
};
class Device;
class BarRegion
{
BAN_NON_COPYABLE(BarRegion);
public:
PCIDevice(uint8_t, uint8_t, uint8_t);
static BAN::ErrorOr<BAN::UniqPtr<BarRegion>> create(PCI::Device&, uint8_t bar_num);
~BarRegion();
BarType type() const { return m_type; }
vaddr_t vaddr() const { return m_vaddr; }
paddr_t paddr() const { return m_paddr; }
size_t size() const { return m_size; }
void write8(off_t, uint8_t);
void write16(off_t, uint16_t);
void write32(off_t, uint32_t);
uint8_t read8(off_t);
uint16_t read16(off_t);
uint32_t read32(off_t);
private:
BarRegion(BarType, paddr_t, size_t);
BAN::ErrorOr<void> initialize();
private:
const BarType m_type {};
const paddr_t m_paddr {};
const size_t m_size {};
vaddr_t m_vaddr {};
};
class Device
{
public:
Device(uint8_t, uint8_t, uint8_t);
uint32_t read_dword(uint8_t) const;
uint16_t read_word(uint8_t) const;
uint8_t read_byte(uint8_t) const;
void write_dword(uint8_t, uint32_t) const;
void write_dword(uint8_t, uint32_t);
void write_word(uint8_t, uint16_t);
void write_byte(uint8_t, uint8_t);
uint8_t bus() const { return m_bus; }
uint8_t dev() const { return m_dev; }
@ -32,17 +69,24 @@ namespace Kernel
uint8_t subclass() const { return m_subclass; }
uint8_t prog_if() const { return m_prog_if; }
BarType read_bar_type(uint8_t) const;
uint64_t read_bar_address(uint8_t) const;
uint8_t header_type() const { return m_header_type; }
void enable_bus_mastering() const;
void disable_bus_mastering() const;
BAN::ErrorOr<BAN::UniqPtr<BarRegion>> allocate_bar_region(uint8_t bar_num);
void enable_memory_space() const;
void disable_memory_space() const;
void enable_bus_mastering();
void disable_bus_mastering();
void enable_pin_interrupts() const;
void disable_pin_interrupts() const;
void enable_memory_space();
void disable_memory_space();
void enable_io_space();
void disable_io_space();
void enable_pin_interrupts();
void disable_pin_interrupts();
private:
void enumerate_capabilites();
private:
uint8_t m_bus;
@ -56,19 +100,27 @@ namespace Kernel
uint8_t m_header_type;
};
class PCI
class PCIManager
{
BAN_NON_COPYABLE(PCI);
BAN_NON_MOVABLE(PCI);
BAN_NON_COPYABLE(PCIManager);
BAN_NON_MOVABLE(PCIManager);
public:
static void initialize();
static PCI& get();
static PCIManager& get();
const BAN::Vector<PCIDevice>& devices() const { return m_devices; }
const BAN::Vector<PCI::Device>& devices() const { return m_devices; }
static uint32_t read_config_dword(uint8_t bus, uint8_t dev, uint8_t func, uint8_t offset);
static uint16_t read_config_word(uint8_t bus, uint8_t dev, uint8_t func, uint8_t offset);
static uint8_t read_config_byte(uint8_t bus, uint8_t dev, uint8_t func, uint8_t offset);
static void write_config_dword(uint8_t bus, uint8_t dev, uint8_t func, uint8_t offset, uint32_t value);
static void write_config_word(uint8_t bus, uint8_t dev, uint8_t func, uint8_t offset, uint16_t value);
static void write_config_byte(uint8_t bus, uint8_t dev, uint8_t func, uint8_t offset, uint8_t value);
private:
PCI() = default;
PCIManager() = default;
void check_function(uint8_t bus, uint8_t dev, uint8_t func);
void check_device(uint8_t bus, uint8_t dev);
void check_bus(uint8_t bus);
@ -76,7 +128,7 @@ namespace Kernel
void initialize_devices();
private:
BAN::Vector<PCIDevice> m_devices;
BAN::Vector<PCI::Device> m_devices;
};
}

View File

@ -6,8 +6,6 @@
#include <BAN/Vector.h>
#include <kernel/Credentials.h>
#include <kernel/FS/Inode.h>
#include <kernel/Memory/FixedWidthAllocator.h>
#include <kernel/Memory/GeneralAllocator.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/VirtualRange.h>
#include <kernel/OpenFileDescriptorSet.h>
@ -15,6 +13,7 @@
#include <kernel/Terminal/TTY.h>
#include <kernel/Thread.h>
#include <sys/mman.h>
#include <termios.h>
namespace LibELF { class ELF; }
@ -70,8 +69,6 @@ namespace Kernel
BAN::ErrorOr<long> sys_sleep(int seconds);
BAN::ErrorOr<long> sys_nanosleep(const timespec* rqtp, timespec* rmtp);
BAN::ErrorOr<long> sys_setenvp(char** envp);
BAN::ErrorOr<long> sys_setpwd(const char* path);
BAN::ErrorOr<long> sys_getpwd(char* buffer, size_t size);
@ -115,8 +112,8 @@ namespace Kernel
BAN::ErrorOr<long> sys_read_dir_entries(int fd, DirectoryEntryList* buffer, size_t buffer_size);
BAN::ErrorOr<long> sys_alloc(size_t);
BAN::ErrorOr<long> sys_free(void*);
BAN::ErrorOr<long> sys_mmap(const sys_mmap_t&);
BAN::ErrorOr<long> sys_munmap(void* addr, size_t len);
BAN::ErrorOr<long> sys_signal(int, void (*)(int));
BAN::ErrorOr<long> sys_raise(int signal);
@ -143,7 +140,7 @@ namespace Kernel
static void register_process(Process*);
// Load an elf file to virtual address space of the current page table
static BAN::ErrorOr<BAN::UniqPtr<LibELF::ELF>> load_elf_for_exec(const Credentials&, BAN::StringView file_path, const BAN::String& cwd, const BAN::Vector<BAN::StringView>& path_env);
static BAN::ErrorOr<BAN::UniqPtr<LibELF::ELF>> load_elf_for_exec(const Credentials&, BAN::StringView file_path, const BAN::String& cwd);
// Copy an elf file from the current page table to the processes own
void load_elf_to_memory(LibELF::ELF&);
@ -161,11 +158,17 @@ namespace Kernel
int waiting { 0 };
};
struct MappedRange
{
bool can_be_unmapped;
BAN::UniqPtr<VirtualRange> range;
};
Credentials m_credentials;
OpenFileDescriptorSet m_open_file_descriptors;
BAN::Vector<BAN::UniqPtr<VirtualRange>> m_mapped_ranges;
BAN::Vector<MappedRange> m_mapped_ranges;
pid_t m_sid;
pid_t m_pgrp;
@ -177,9 +180,6 @@ namespace Kernel
BAN::String m_working_directory;
BAN::Vector<Thread*> m_threads;
BAN::Vector<BAN::UniqPtr<FixedWidthAllocator>> m_fixed_width_allocators;
BAN::UniqPtr<GeneralAllocator> m_general_allocator;
vaddr_t m_signal_handlers[_SIGMAX + 1] { };
uint64_t m_signal_pending_mask { 0 };

View File

@ -11,13 +11,13 @@ namespace Kernel
class ATAController final : public StorageController
{
public:
static BAN::ErrorOr<BAN::RefPtr<ATAController>> create(const PCIDevice&);
static BAN::ErrorOr<BAN::RefPtr<ATAController>> create(const PCI::Device&);
virtual BAN::Vector<BAN::RefPtr<StorageDevice>> devices() override;
private:
ATAController();
BAN::ErrorOr<void> initialize(const PCIDevice& device);
BAN::ErrorOr<void> initialize(const PCI::Device& device);
private:
ATABus* m_buses[2] { nullptr, nullptr };

View File

@ -82,8 +82,10 @@ namespace Kernel
bool is_userspace() const { return m_is_userspace; }
#if __enable_sse
void save_sse() { asm volatile("fxsave %0" :: "m"(m_sse_storage)); }
void load_sse() { asm volatile("fxrstor %0" :: "m"(m_sse_storage)); }
#endif
private:
Thread(pid_t tid, Process*);
@ -114,7 +116,9 @@ namespace Kernel
uint64_t m_terminate_blockers { 0 };
#if __enable_sse
alignas(16) uint8_t m_sse_storage[512] {};
#endif
friend class Scheduler;
};

View File

@ -4,6 +4,8 @@
#include <BAN/Vector.h>
#include <kernel/Timer/RTC.h>
#include <time.h>
namespace Kernel
{

View File

@ -3,6 +3,8 @@
#include <kernel/ACPI.h>
#include <kernel/Memory/PageTable.h>
#include <lai/core.h>
#define RSPD_SIZE 20
#define RSPDv2_SIZE 36
@ -43,6 +45,7 @@ namespace Kernel
if (s_instance == nullptr)
return BAN::Error::from_errno(ENOMEM);
TRY(s_instance->initialize_impl());
lai_create_namespace();
return {};
}
@ -101,6 +104,9 @@ namespace Kernel
const RSDP* rsdp = locate_rsdp();
if (rsdp == nullptr)
return BAN::Error::from_error_code(ErrorCode::ACPI_NoRootSDT);
lai_set_acpi_revision(rsdp->revision);
uint32_t root_entry_count = 0;
if (rsdp->revision >= 2)
{
@ -115,7 +121,7 @@ namespace Kernel
m_header_table_paddr = (paddr_t)xsdt->entries + (rsdp->rsdt_address & PAGE_ADDR_MASK);
m_entry_size = 8;
m_entry_count = (xsdt->length - sizeof(SDTHeader)) / 8;
root_entry_count = (xsdt->length - sizeof(SDTHeader)) / 8;
}
else
{
@ -130,10 +136,10 @@ namespace Kernel
m_header_table_paddr = (paddr_t)rsdt->entries + (rsdp->rsdt_address & PAGE_ADDR_MASK);
m_entry_size = 4;
m_entry_count = (rsdt->length - sizeof(SDTHeader)) / 4;
root_entry_count = (rsdt->length - sizeof(SDTHeader)) / 4;
}
size_t needed_pages = range_page_count(m_header_table_paddr, m_entry_count * m_entry_size);
size_t needed_pages = range_page_count(m_header_table_paddr, root_entry_count * m_entry_size);
m_header_table_vaddr = PageTable::kernel().reserve_free_contiguous_pages(needed_pages, KERNEL_OFFSET);
ASSERT(m_header_table_vaddr);
@ -146,61 +152,85 @@ namespace Kernel
PageTable::Flags::Present
);
for (uint32_t i = 0; i < m_entry_count; i++)
auto map_header =
[](paddr_t header_paddr) -> vaddr_t
{
PageTable::kernel().map_page_at(header_paddr & PAGE_ADDR_MASK, 0, PageTable::Flags::Present);
size_t header_length = ((SDTHeader*)(header_paddr % PAGE_SIZE))->length;
PageTable::kernel().unmap_page(0);
size_t needed_pages = range_page_count(header_paddr, header_length);
vaddr_t page_vaddr = PageTable::kernel().reserve_free_contiguous_pages(needed_pages, KERNEL_OFFSET);
ASSERT(page_vaddr);
PageTable::kernel().map_range_at(
header_paddr & PAGE_ADDR_MASK,
page_vaddr,
needed_pages * PAGE_SIZE,
PageTable::Flags::Present
);
auto* header = (SDTHeader*)(page_vaddr + (header_paddr % PAGE_SIZE));
if (!is_valid_std_header(header))
{
PageTable::kernel().unmap_range(page_vaddr, needed_pages * PAGE_SIZE);
return 0;
}
return page_vaddr + (header_paddr % PAGE_SIZE);
};
for (uint32_t i = 0; i < root_entry_count; i++)
{
paddr_t header_paddr = (m_entry_size == 4) ?
((uint32_t*)m_header_table_vaddr)[i] :
((uint64_t*)m_header_table_vaddr)[i];
PageTable::kernel().map_page_at(header_paddr & PAGE_ADDR_MASK, 0, PageTable::Flags::Present);
size_t header_length = ((SDTHeader*)(header_paddr % PAGE_SIZE))->length;
PageTable::kernel().unmap_page(0);
size_t needed_pages = range_page_count(header_paddr, header_length);
vaddr_t page_vaddr = PageTable::kernel().reserve_free_contiguous_pages(needed_pages, KERNEL_OFFSET);
ASSERT(page_vaddr);
PageTable::kernel().map_range_at(
header_paddr & PAGE_ADDR_MASK,
page_vaddr,
needed_pages * PAGE_SIZE,
PageTable::Flags::Present
);
vaddr_t header_vaddr = map_header(header_paddr);
if (header_vaddr == 0)
continue;
MUST(m_mapped_headers.push_back({
.paddr = header_paddr,
.vaddr = page_vaddr + (header_paddr % PAGE_SIZE)
.vaddr = header_vaddr
}));
}
for (size_t i = 0; i < m_mapped_headers.size(); i++)
{
auto* header = m_mapped_headers[i].as_header();
dprintln("found header {}", *header);
if (memcmp(header->signature, "FACP", 4) == 0)
{
auto* fadt = (FADT*)header;
paddr_t dsdt_paddr = fadt->x_dsdt;
if (dsdt_paddr == 0 || !PageTable::is_valid_pointer(dsdt_paddr))
dsdt_paddr = fadt->dsdt;
vaddr_t dsdt_vaddr = map_header(dsdt_paddr);
if (dsdt_vaddr == 0)
continue;
MUST(m_mapped_headers.push_back({
.paddr = dsdt_paddr,
.vaddr = dsdt_vaddr
}));
}
}
return {};
}
const ACPI::SDTHeader* ACPI::get_header(const char signature[4])
{
for (uint32_t i = 0; i < m_entry_count; i++)
for (auto& mapped_header : m_mapped_headers)
{
const SDTHeader* header = get_header_from_index(i);
if (is_valid_std_header(header) && memcmp(header->signature, signature, 4) == 0)
auto* header = mapped_header.as_header();
if (memcmp(header->signature, signature, 4) == 0)
return header;
}
return nullptr;
}
const ACPI::SDTHeader* ACPI::get_header_from_index(size_t index)
{
ASSERT(index < m_entry_count);
ASSERT(m_entry_size == 4 || m_entry_size == 8);
paddr_t header_paddr = (m_entry_size == 4) ?
((uint32_t*)m_header_table_vaddr)[index] :
((uint64_t*)m_header_table_vaddr)[index];
for (const auto& page : m_mapped_headers)
if (page.paddr == header_paddr)
return (SDTHeader*)page.vaddr;
ASSERT_NOT_REACHED();
}
}

View File

@ -1,288 +0,0 @@
#include <kernel/Memory/FixedWidthAllocator.h>
namespace Kernel
{
BAN::ErrorOr<BAN::UniqPtr<FixedWidthAllocator>> FixedWidthAllocator::create(PageTable& page_table, uint32_t allocation_size)
{
auto* allocator_ptr = new FixedWidthAllocator(page_table, allocation_size);
if (allocator_ptr == nullptr)
return BAN::Error::from_errno(ENOMEM);
auto allocator = BAN::UniqPtr<FixedWidthAllocator>::adopt(allocator_ptr);
TRY(allocator->initialize());
return allocator;
}
FixedWidthAllocator::FixedWidthAllocator(PageTable& page_table, uint32_t allocation_size)
: m_page_table(page_table)
, m_allocation_size(BAN::Math::max(allocation_size, m_min_allocation_size))
{
ASSERT(BAN::Math::is_power_of_two(allocation_size));
}
BAN::ErrorOr<void> FixedWidthAllocator::initialize()
{
m_nodes_page = (vaddr_t)kmalloc(PAGE_SIZE);
if (!m_nodes_page)
return BAN::Error::from_errno(ENOMEM);
m_allocated_pages = (vaddr_t)kmalloc(PAGE_SIZE);
if (!m_allocated_pages)
{
kfree((void*)m_nodes_page);
m_nodes_page = 0;
return BAN::Error::from_errno(ENOMEM);
}
memset((void*)m_nodes_page, 0, PAGE_SIZE);
memset((void*)m_allocated_pages, 0, PAGE_SIZE);
node* node_table = (node*)m_nodes_page;
for (uint32_t i = 0; i < PAGE_SIZE / sizeof(node); i++)
{
node_table[i].next = &node_table[i + 1];
node_table[i].prev = &node_table[i - 1];
}
node_table[0].prev = nullptr;
node_table[PAGE_SIZE / sizeof(node) - 1].next = nullptr;
m_free_list = node_table;
m_used_list = nullptr;
return {};
}
FixedWidthAllocator::~FixedWidthAllocator()
{
if (m_nodes_page && m_allocated_pages)
{
for (uint32_t page_index = 0; page_index < PAGE_SIZE / sizeof(vaddr_t); page_index++)
{
vaddr_t page_vaddr = ((vaddr_t*)m_allocated_pages)[page_index];
if (page_vaddr == 0)
continue;
ASSERT(!m_page_table.is_page_free(page_vaddr));
Heap::get().release_page(m_page_table.physical_address_of(page_vaddr));
m_page_table.unmap_page(page_vaddr);
}
}
if (m_nodes_page)
kfree((void*)m_nodes_page);
if (m_allocated_pages)
kfree((void*)m_allocated_pages);
}
paddr_t FixedWidthAllocator::allocate()
{
if (m_free_list == nullptr)
return 0;
node* node = m_free_list;
allocate_node(node);
allocate_page_for_node_if_needed(node);
return address_of_node(node);
}
bool FixedWidthAllocator::deallocate(vaddr_t address)
{
if (address % m_allocation_size)
return false;
if (m_allocations == 0)
return false;
node* node = node_from_address(address);
if (node == nullptr)
return false;
if (!node->allocated)
{
dwarnln("deallocate called on unallocated address");
return true;
}
deallocate_node(node);
return true;
}
void FixedWidthAllocator::allocate_node(node* node)
{
ASSERT(!node->allocated);
node->allocated = true;
if (node == m_free_list)
m_free_list = node->next;
if (node->prev)
node->prev->next = node->next;
if (node->next)
node->next->prev = node->prev;
node->next = m_used_list;
node->prev = nullptr;
if (m_used_list)
m_used_list->prev = node;
m_used_list = node;
m_allocations++;
}
void FixedWidthAllocator::deallocate_node(node* node)
{
ASSERT(node->allocated);
node->allocated = false;
if (node == m_used_list)
m_used_list = node->next;
if (node->prev)
node->prev->next = node->next;
if (node->next)
node->next->prev = node->prev;
node->next = m_free_list;
node->prev = nullptr;
if (m_free_list)
m_free_list->prev = node;
m_free_list = node;
m_allocations--;
}
uint32_t FixedWidthAllocator::max_allocations() const
{
return PAGE_SIZE / sizeof(node);
}
vaddr_t FixedWidthAllocator::address_of_node(const node* node) const
{
uint32_t index = node - (struct node*)m_nodes_page;
uint32_t page_index = index / (PAGE_SIZE / m_allocation_size);
ASSERT(page_index < PAGE_SIZE / sizeof(vaddr_t));
uint32_t offset = index % (PAGE_SIZE / m_allocation_size);
vaddr_t page_begin = ((vaddr_t*)m_allocated_pages)[page_index];
ASSERT(page_begin);
return page_begin + offset * m_allocation_size;
}
FixedWidthAllocator::node* FixedWidthAllocator::node_from_address(vaddr_t address) const
{
// TODO: This probably should be optimized from O(n) preferably to O(1) but I
// don't want to think about performance now.
ASSERT(address % m_allocation_size == 0);
vaddr_t page_begin = address / PAGE_SIZE * PAGE_SIZE;
for (uint32_t page_index = 0; page_index < PAGE_SIZE / sizeof(vaddr_t); page_index++)
{
vaddr_t vaddr = ((vaddr_t*)m_allocated_pages)[page_index];
if (vaddr != page_begin)
continue;
uint32_t offset = (address - page_begin) / m_allocation_size;
node* result = (node*)m_nodes_page;
result += page_index * PAGE_SIZE / m_allocation_size;
result += offset;
ASSERT(address_of_node(result) == address);
return result;
}
return nullptr;
}
void FixedWidthAllocator::allocate_page_for_node_if_needed(const node* node)
{
uint32_t index = node - (struct node*)m_nodes_page;
uint32_t page_index = index / (PAGE_SIZE / m_allocation_size);
ASSERT(page_index < PAGE_SIZE / sizeof(vaddr_t));
vaddr_t& page_vaddr = ((vaddr_t*)m_allocated_pages)[page_index];
if (page_vaddr)
return;
paddr_t page_paddr = Heap::get().take_free_page();
ASSERT(page_paddr);
page_vaddr = m_page_table.reserve_free_page(0x300000);
ASSERT(page_vaddr);
m_page_table.map_page_at(page_paddr, page_vaddr, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present);
}
bool FixedWidthAllocator::allocate_page_if_needed(vaddr_t vaddr, uint8_t flags)
{
ASSERT(vaddr % PAGE_SIZE == 0);
// Check if page is already allocated
for (uint32_t page_index = 0; page_index < PAGE_SIZE / sizeof(vaddr_t); page_index++)
{
vaddr_t page_begin = ((vaddr_t*)m_allocated_pages)[page_index];
if (vaddr == page_begin)
return false;
}
// Page is not allocated so the vaddr must not be in use
ASSERT(m_page_table.is_page_free(vaddr));
// Allocate the vaddr on empty page
for (uint32_t page_index = 0; page_index < PAGE_SIZE / sizeof(vaddr_t); page_index++)
{
vaddr_t& page_begin = ((vaddr_t*)m_allocated_pages)[page_index];
if (page_begin == 0)
{
paddr_t paddr = Heap::get().take_free_page();
ASSERT(paddr);
m_page_table.map_page_at(paddr, vaddr, flags);
page_begin = vaddr;
return true;
}
}
ASSERT_NOT_REACHED();
}
BAN::ErrorOr<BAN::UniqPtr<FixedWidthAllocator>> FixedWidthAllocator::clone(PageTable& new_page_table)
{
auto allocator = TRY(FixedWidthAllocator::create(new_page_table, allocation_size()));
m_page_table.lock();
ASSERT(m_page_table.is_page_free(0));
for (node* node = m_used_list; node; node = node->next)
{
ASSERT(node->allocated);
vaddr_t vaddr = address_of_node(node);
vaddr_t page_begin = vaddr & PAGE_ADDR_MASK;
PageTable::flags_t flags = m_page_table.get_page_flags(page_begin);
// Allocate and copy all data from this allocation to the new one
if (allocator->allocate_page_if_needed(page_begin, flags))
{
paddr_t paddr = new_page_table.physical_address_of(page_begin);
m_page_table.map_page_at(paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
memcpy((void*)0, (void*)page_begin, PAGE_SIZE);
}
// Now that we are sure the page is allocated, we can access the node
struct node* new_node = allocator->node_from_address(vaddr);
allocator->allocate_node(new_node);
}
m_page_table.unmap_page(0);
m_page_table.unlock();
return allocator;
}
}

View File

@ -1,4 +1,3 @@
#include <BAN/ScopeGuard.h>
#include <kernel/LockGuard.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/VirtualRange.h>
@ -21,27 +20,25 @@ namespace Kernel
result->m_vaddr = vaddr;
result->m_size = size;
result->m_flags = flags;
TRY(result->m_physical_pages.reserve(size / PAGE_SIZE));
ASSERT(page_table.reserve_range(vaddr, size));
BAN::ScopeGuard unmapper([vaddr, size, &page_table] { page_table.unmap_range(vaddr, size); });
TRY(result->m_physical_pages.reserve(size / PAGE_SIZE));
for (size_t offset = 0; offset < size; offset += PAGE_SIZE)
size_t needed_pages = size / PAGE_SIZE;
for (size_t i = 0; i < needed_pages; i++)
{
paddr_t paddr = Heap::get().take_free_page();
if (paddr == 0)
{
for (paddr_t release : result->m_physical_pages)
Heap::get().release_page(release);
for (size_t j = 0; j < i; j++)
Heap::get().release_page(page_table.physical_address_of(vaddr + j * PAGE_SIZE));
page_table.unmap_range(vaddr, size);
result->m_vaddr = 0;
return BAN::Error::from_errno(ENOMEM);
}
MUST(result->m_physical_pages.push_back(paddr));
page_table.map_page_at(paddr, vaddr + offset, flags);
page_table.map_page_at(paddr, vaddr + i * PAGE_SIZE, flags);
}
unmapper.disable();
return result;
}
@ -49,6 +46,7 @@ namespace Kernel
{
ASSERT(size % PAGE_SIZE == 0);
ASSERT(vaddr_start > 0);
ASSERT(vaddr_start + size <= vaddr_end);
// Align vaddr range to page boundaries
if (size_t rem = vaddr_start % PAGE_SIZE)
@ -64,37 +62,32 @@ namespace Kernel
auto result = BAN::UniqPtr<VirtualRange>::adopt(result_ptr);
result->m_kmalloc = false;
result->m_vaddr = 0;
result->m_size = size;
result->m_flags = flags;
TRY(result->m_physical_pages.reserve(size / PAGE_SIZE));
vaddr_t vaddr = page_table.reserve_free_contiguous_pages(size / PAGE_SIZE, vaddr_start, vaddr_end);
if (vaddr == 0)
return BAN::Error::from_errno(ENOMEM);
ASSERT(vaddr + size <= vaddr_end);
result->m_vaddr = vaddr;
BAN::ScopeGuard unmapper([vaddr, size, &page_table] { page_table.unmap_range(vaddr, size); });
if (vaddr + size > vaddr_end)
return BAN::Error::from_errno(ENOMEM);
size_t needed_pages = size / PAGE_SIZE;
result->m_vaddr = vaddr;
TRY(result->m_physical_pages.reserve(size / PAGE_SIZE));
for (size_t offset = 0; offset < size; offset += PAGE_SIZE)
for (size_t i = 0; i < needed_pages; i++)
{
paddr_t paddr = Heap::get().take_free_page();
if (paddr == 0)
{
for (paddr_t release : result->m_physical_pages)
Heap::get().release_page(release);
for (size_t j = 0; j < i; j++)
Heap::get().release_page(page_table.physical_address_of(vaddr + j * PAGE_SIZE));
page_table.unmap_range(vaddr, size);
result->m_vaddr = 0;
return BAN::Error::from_errno(ENOMEM);
}
MUST(result->m_physical_pages.push_back(paddr));
page_table.map_page_at(paddr, vaddr + offset, flags);
page_table.map_page_at(paddr, vaddr + i * PAGE_SIZE, flags);
}
unmapper.disable();
return result;
}
@ -122,33 +115,34 @@ namespace Kernel
VirtualRange::~VirtualRange()
{
if (m_kmalloc)
{
kfree((void*)m_vaddr);
if (m_vaddr == 0)
return;
}
m_page_table.unmap_range(vaddr(), size());
for (paddr_t page : m_physical_pages)
Heap::get().release_page(page);
if (m_kmalloc)
kfree((void*)m_vaddr);
else
{
for (size_t offset = 0; offset < size(); offset += PAGE_SIZE)
Heap::get().release_page(m_page_table.physical_address_of(vaddr() + offset));
m_page_table.unmap_range(vaddr(), size());
}
}
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::clone(PageTable& page_table)
{
ASSERT(&PageTable::current() == &m_page_table);
auto result = TRY(create_to_vaddr(page_table, vaddr(), size(), flags()));
m_page_table.lock();
LockGuard _(m_page_table);
ASSERT(m_page_table.is_page_free(0));
for (size_t i = 0; i < result->m_physical_pages.size(); i++)
for (size_t offset = 0; offset < size(); offset += PAGE_SIZE)
{
m_page_table.map_page_at(result->m_physical_pages[i], 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
memcpy((void*)0, (void*)(vaddr() + i * PAGE_SIZE), PAGE_SIZE);
m_page_table.map_page_at(result->m_page_table.physical_address_of(vaddr() + offset), 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
memcpy((void*)0, (void*)(vaddr() + offset), PAGE_SIZE);
}
m_page_table.unmap_page(0);
m_page_table.unlock();
return result;
}
@ -162,17 +156,14 @@ namespace Kernel
return;
}
page_table.lock();
LockGuard _(page_table);
ASSERT(page_table.is_page_free(0));
for (size_t i = 0; i < m_physical_pages.size(); i++)
for (size_t offset = 0; offset < size(); offset += PAGE_SIZE)
{
page_table.map_page_at(m_physical_pages[i], 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
page_table.map_page_at(m_page_table.physical_address_of(vaddr() + offset), 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
memset((void*)0, 0, PAGE_SIZE);
}
page_table.unmap_page(0);
page_table.unlock();
}
void VirtualRange::copy_from(size_t offset, const uint8_t* buffer, size_t bytes)
@ -193,14 +184,14 @@ namespace Kernel
return;
}
page_table.lock();
LockGuard _(page_table);
ASSERT(page_table.is_page_free(0));
size_t off = offset % PAGE_SIZE;
size_t i = offset / PAGE_SIZE;
// NOTE: we map the first page separately since it needs extra calculations
page_table.map_page_at(m_physical_pages[i], 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
page_table.map_page_at(m_page_table.physical_address_of(vaddr() + i * PAGE_SIZE), 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
memcpy((void*)off, buffer, PAGE_SIZE - off);
@ -212,7 +203,7 @@ namespace Kernel
{
size_t len = BAN::Math::min<size_t>(PAGE_SIZE, bytes);
page_table.map_page_at(m_physical_pages[i], 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
page_table.map_page_at(m_page_table.physical_address_of(vaddr() + i * PAGE_SIZE), 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
memcpy((void*)0, buffer, len);
@ -221,8 +212,6 @@ namespace Kernel
i++;
}
page_table.unmap_page(0);
page_table.unlock();
}
}

View File

@ -5,7 +5,7 @@
#include <kernel/MMIO.h>
#include <kernel/Networking/E1000.h>
#define E1000_GENERAL_MEM_SIZE (128 * 1024)
#define DEBUG_E1000 1
#define E1000_REG_CTRL 0x0000
#define E1000_REG_STATUS 0x0008
@ -112,7 +112,7 @@ namespace Kernel
volatile uint16_t special;
} __attribute__((packed));
BAN::ErrorOr<BAN::UniqPtr<E1000>> E1000::create(const PCIDevice& pci_device)
BAN::ErrorOr<BAN::UniqPtr<E1000>> E1000::create(PCI::Device& pci_device)
{
E1000* e1000 = new E1000();
ASSERT(e1000);
@ -126,42 +126,26 @@ namespace Kernel
E1000::~E1000()
{
if (m_bar_type == PCIDevice::BarType::MEM && m_bar_addr)
PageTable::kernel().unmap_range(m_bar_addr & PAGE_ADDR_MASK, E1000_GENERAL_MEM_SIZE);
}
BAN::ErrorOr<void> E1000::initialize(const PCIDevice& pci_device)
BAN::ErrorOr<void> E1000::initialize(PCI::Device& pci_device)
{
m_bar_type = pci_device.read_bar_type(0);
if (m_bar_type == PCIDevice::BarType::INVAL)
{
dwarnln("invalid bar0 type");
return BAN::Error::from_errno(EINVAL);
}
if (m_bar_type == PCIDevice::BarType::MEM)
{
uint64_t bar_addr = pci_device.read_bar_address(0);
vaddr_t page_vaddr = PageTable::kernel().reserve_free_contiguous_pages(E1000_GENERAL_MEM_SIZE / PAGE_SIZE, KERNEL_OFFSET);
paddr_t page_paddr = bar_addr & PAGE_ADDR_MASK;
PageTable::kernel().map_range_at(page_paddr, page_vaddr, E1000_GENERAL_MEM_SIZE, PageTable::Flags::CacheDisable | PageTable::Flags::ReadWrite | PageTable::Flags::Present);
m_bar_addr = page_vaddr + (bar_addr % PAGE_SIZE);
}
else if (m_bar_type == PCIDevice::BarType::IO)
{
m_bar_addr = pci_device.read_bar_address(0);
}
m_bar_region = TRY(pci_device.allocate_bar_region(0));
pci_device.enable_bus_mastering();
detect_eeprom();
TRY(read_mac_address());
dprintln("E1000 at PCI {}:{}.{}", pci_device.bus(), pci_device.dev(), pci_device.func());
initialize_rx();
initialize_tx();
enable_link();
enable_interrupts();
#if DEBUG_E1000
dprintln("E1000 at PCI {}:{}.{}", pci_device.bus(), pci_device.dev(), pci_device.func());
dprintln(" MAC: {2H}:{2H}:{2H}:{2H}:{2H}:{2H}",
m_mac_address[0],
m_mac_address[1],
@ -170,52 +154,22 @@ namespace Kernel
m_mac_address[4],
m_mac_address[5]
);
initialize_rx();
initialize_tx();
enable_link();
enable_interrupts();
dprintln(" link up: {}", link_up());
if (link_up())
dprintln(" link speed: {} Mbps", link_speed());
#endif
return {};
}
void E1000::write32(uint16_t reg, uint32_t value)
{
switch (m_bar_type)
{
case PCIDevice::BarType::MEM:
MMIO::write32(m_bar_addr + reg, value);
break;
case PCIDevice::BarType::IO:
IO::outl(m_bar_addr, reg);
IO::outl(m_bar_addr + 4, value);
break;
default:
ASSERT_NOT_REACHED();
}
m_bar_region->write32(reg, value);
}
uint32_t E1000::read32(uint16_t reg)
{
uint32_t result = 0;
switch (m_bar_type)
{
case PCIDevice::BarType::MEM:
result = MMIO::read32(m_bar_addr + reg);
break;
case PCIDevice::BarType::IO:
IO::outl(m_bar_addr, reg);
result = IO::inl(m_bar_addr + 4);
break;
default:
ASSERT_NOT_REACHED();
}
return result;
return m_bar_region->read32(reg);
}
void E1000::detect_eeprom()

View File

@ -1,61 +1,99 @@
#include <kernel/IO.h>
#include <kernel/Memory/PageTable.h>
#include <kernel/MMIO.h>
#include <kernel/Networking/E1000.h>
#include <kernel/PCI.h>
#include <kernel/Storage/ATAController.h>
#define INVALID 0xFFFF
#define INVALID_VENDOR 0xFFFF
#define MULTI_FUNCTION 0x80
#define CONFIG_ADDRESS 0xCF8
#define CONFIG_DATA 0xCFC
namespace Kernel
#define DEBUG_PCI 1
namespace Kernel::PCI
{
static PCI* s_instance = nullptr;
static PCIManager* s_instance = nullptr;
void PCI::initialize()
{
ASSERT(s_instance == nullptr);
s_instance = new PCI();
ASSERT(s_instance);
s_instance->check_all_buses();
s_instance->initialize_devices();
}
PCI& PCI::get()
{
ASSERT(s_instance);
return *s_instance;
}
static uint32_t read_config_dword(uint8_t bus, uint8_t dev, uint8_t func, uint8_t offset)
uint32_t PCIManager::read_config_dword(uint8_t bus, uint8_t dev, uint8_t func, uint8_t offset)
{
ASSERT(offset % 4 == 0);
uint32_t config_addr = 0x80000000 | ((uint32_t)bus << 16) | ((uint32_t)dev << 11) | ((uint32_t)func << 8) | offset;
IO::outl(CONFIG_ADDRESS, config_addr);
return IO::inl(CONFIG_DATA);
}
static void write_config_dword(uint8_t bus, uint8_t dev, uint8_t func, uint8_t offset, uint32_t value)
uint16_t PCIManager::read_config_word(uint8_t bus, uint8_t dev, uint8_t func, uint8_t offset)
{
ASSERT(offset % 2 == 0);
uint32_t dword = read_config_dword(bus, dev, func, offset & ~3);
return (dword >> ((offset & 3) * 8)) & 0xFFFF;
}
uint8_t PCIManager::read_config_byte(uint8_t bus, uint8_t dev, uint8_t func, uint8_t offset)
{
uint32_t dword = read_config_dword(bus, dev, func, offset & ~3);
return (dword >> ((offset & 3) * 8)) & 0xFF;
}
void PCIManager::write_config_dword(uint8_t bus, uint8_t dev, uint8_t func, uint8_t offset, uint32_t value)
{
ASSERT(offset % 4 == 0);
uint32_t config_addr = 0x80000000 | ((uint32_t)bus << 16) | ((uint32_t)dev << 11) | ((uint32_t)func << 8) | offset;
IO::outl(CONFIG_ADDRESS, config_addr);
IO::outl(CONFIG_DATA, value);
}
void PCIManager::write_config_word(uint8_t bus, uint8_t dev, uint8_t func, uint8_t offset, uint16_t value)
{
ASSERT(offset % 2 == 0);
uint32_t byte = (offset & 3) * 8;
uint32_t temp = read_config_dword(bus, dev, func, offset & ~3);
temp &= ~(0xFFFF << byte);
temp |= (uint32_t)value << byte;
write_config_dword(bus, dev, func, offset & ~3, temp);
}
void PCIManager::write_config_byte(uint8_t bus, uint8_t dev, uint8_t func, uint8_t offset, uint8_t value)
{
uint32_t byte = (offset & 3) * 8;
uint32_t temp = read_config_dword(bus, dev, func, offset & ~3);
temp &= ~(0xFF << byte);
temp |= (uint32_t)value << byte;
write_config_dword(bus, dev, func, offset, temp);
}
static uint16_t get_vendor_id(uint8_t bus, uint8_t dev, uint8_t func)
{
uint32_t dword = read_config_dword(bus, dev, func, 0x00);
uint32_t dword = PCIManager::read_config_dword(bus, dev, func, 0x00);
return dword & 0xFFFF;
}
static uint8_t get_header_type(uint8_t bus, uint8_t dev, uint8_t func)
{
uint32_t dword = read_config_dword(bus, dev, func, 0x0C);
uint32_t dword = PCIManager::read_config_dword(bus, dev, func, 0x0C);
return (dword >> 16) & 0xFF;
}
void PCI::check_function(uint8_t bus, uint8_t dev, uint8_t func)
void PCIManager::initialize()
{
ASSERT(s_instance == nullptr);
s_instance = new PCIManager();
ASSERT(s_instance);
s_instance->check_all_buses();
s_instance->initialize_devices();
}
PCIManager& PCIManager::get()
{
ASSERT(s_instance);
return *s_instance;
}
void PCIManager::check_function(uint8_t bus, uint8_t dev, uint8_t func)
{
MUST(m_devices.emplace_back(bus, dev, func));
auto& device = m_devices.back();
@ -63,29 +101,29 @@ namespace Kernel
check_bus(device.read_byte(0x19));
}
void PCI::check_device(uint8_t bus, uint8_t dev)
void PCIManager::check_device(uint8_t bus, uint8_t dev)
{
if (get_vendor_id(bus, dev, 0) == INVALID)
if (get_vendor_id(bus, dev, 0) == INVALID_VENDOR)
return;
check_function(bus, dev, 0);
if (get_header_type(bus, dev, 0) & MULTI_FUNCTION)
for (uint8_t func = 1; func < 8; func++)
if (get_vendor_id(bus, dev, func) != INVALID)
if (get_vendor_id(bus, dev, func) != INVALID_VENDOR)
check_function(bus, dev, func);
}
void PCI::check_bus(uint8_t bus)
void PCIManager::check_bus(uint8_t bus)
{
for (uint8_t dev = 0; dev < 32; dev++)
check_device(bus, dev);
}
void PCI::check_all_buses()
void PCIManager::check_all_buses()
{
if (get_header_type(0, 0, 0) & MULTI_FUNCTION)
{
for (int func = 0; func < 8 && get_vendor_id(0, 0, func) != INVALID; func++)
for (int func = 0; func < 8 && get_vendor_id(0, 0, func) != INVALID_VENDOR; func++)
check_bus(func);
}
else
@ -94,9 +132,9 @@ namespace Kernel
}
}
void PCI::initialize_devices()
void PCIManager::initialize_devices()
{
for (const auto& pci_device : PCI::get().devices())
for (auto& pci_device : m_devices)
{
switch (pci_device.class_code())
{
@ -134,7 +172,144 @@ namespace Kernel
}
}
PCIDevice::PCIDevice(uint8_t bus, uint8_t dev, uint8_t func)
BAN::ErrorOr<BAN::UniqPtr<BarRegion>> BarRegion::create(PCI::Device& device, uint8_t bar_num)
{
ASSERT(device.header_type() == 0x00);
uint32_t command_status = device.read_dword(0x04);
// disable io/mem space while reading bar
device.write_dword(0x04, command_status & ~3);
uint8_t offset = 0x10 + bar_num * 8;
uint64_t addr = device.read_dword(offset);
device.write_dword(offset, 0xFFFFFFFF);
uint32_t size = device.read_dword(0x10 + bar_num * 8);
size = ~size + 1;
device.write_dword(offset, addr);
// determine bar type
BarType type = BarType::INVALID;
if (addr & 1)
{
type = BarType::IO;
addr &= 0xFFFFFFFC;
}
else if ((addr & 0b110) == 0b000)
{
type = BarType::MEM;
addr &= 0xFFFFFFF0;
}
else if ((addr & 0b110) == 0b100)
{
type = BarType::MEM;
addr &= 0xFFFFFFF0;
addr |= (uint64_t)device.read_dword(offset + 8) << 32;
}
if (type == BarType::INVALID)
{
dwarnln("invalid pci device bar");
return BAN::Error::from_errno(EINVAL);
}
auto* region_ptr = new BarRegion(type, addr, size);
ASSERT(region_ptr);
auto region = BAN::UniqPtr<BarRegion>::adopt(region_ptr);
TRY(region->initialize());
// restore old command register and enable correct IO/MEM
command_status |= (type == BarType::IO) ? 1 : 2;
device.write_dword(0x04, command_status);
#if DEBUG_PCI
dprintln("created BAR region for PCI {}:{}.{}",
device.bus(),
device.dev(),
device.func()
);
dprintln(" type: {}", region->type() == BarType::IO ? "IO" : "MEM");
dprintln(" paddr {}", (void*)region->paddr());
dprintln(" vaddr {}", (void*)region->vaddr());
dprintln(" size {}", region->size());
#endif
return region;
}
BarRegion::BarRegion(BarType type, paddr_t paddr, size_t size)
: m_type(type)
, m_paddr(paddr)
, m_size(size)
{ }
BarRegion::~BarRegion()
{
if (m_type == BarType::MEM && m_vaddr)
PageTable::kernel().unmap_range(m_vaddr, m_size);
m_vaddr = 0;
}
BAN::ErrorOr<void> BarRegion::initialize()
{
if (m_type == BarType::IO)
return {};
size_t needed_pages = BAN::Math::div_round_up<size_t>(m_size, PAGE_SIZE);
m_vaddr = PageTable::kernel().reserve_free_contiguous_pages(needed_pages, KERNEL_OFFSET);
if (m_vaddr == 0)
return BAN::Error::from_errno(ENOMEM);
PageTable::kernel().map_range_at(m_paddr, m_vaddr, m_size, PageTable::Flags::CacheDisable | PageTable::Flags::ReadWrite | PageTable::Flags::Present);
return {};
}
void BarRegion::write8(off_t reg, uint8_t val)
{
if (m_type == BarType::IO)
return IO::outb(m_vaddr + reg, val);
MMIO::write8(m_vaddr + reg, val);
}
void BarRegion::write16(off_t reg, uint16_t val)
{
if (m_type == BarType::IO)
return IO::outw(m_vaddr + reg, val);
MMIO::write16(m_vaddr + reg, val);
}
void BarRegion::write32(off_t reg, uint32_t val)
{
if (m_type == BarType::IO)
return IO::outl(m_vaddr + reg, val);
MMIO::write32(m_vaddr + reg, val);
}
uint8_t BarRegion::read8(off_t reg)
{
if (m_type == BarType::IO)
return IO::inb(m_vaddr + reg);
return MMIO::read8(m_vaddr + reg);
}
uint16_t BarRegion::read16(off_t reg)
{
if (m_type == BarType::IO)
return IO::inw(m_vaddr + reg);
return MMIO::read16(m_vaddr + reg);
}
uint32_t BarRegion::read32(off_t reg)
{
if (m_type == BarType::IO)
return IO::inl(m_vaddr + reg);
return MMIO::read32(m_vaddr + reg);
}
PCI::Device::Device(uint8_t bus, uint8_t dev, uint8_t func)
: m_bus(bus), m_dev(dev), m_func(func)
{
uint32_t type = read_word(0x0A);
@ -142,87 +317,101 @@ namespace Kernel
m_subclass = (uint8_t)(type);
m_prog_if = read_byte(0x09);
m_header_type = read_byte(0x0E);
enumerate_capabilites();
}
uint32_t PCIDevice::read_dword(uint8_t offset) const
uint32_t PCI::Device::read_dword(uint8_t offset) const
{
ASSERT((offset & 0x03) == 0);
return read_config_dword(m_bus, m_dev, m_func, offset);
ASSERT(offset % 4 == 0);
return PCIManager::read_config_dword(m_bus, m_dev, m_func, offset);
}
uint16_t PCIDevice::read_word(uint8_t offset) const
uint16_t PCI::Device::read_word(uint8_t offset) const
{
ASSERT((offset & 0x01) == 0);
uint32_t dword = read_config_dword(m_bus, m_dev, m_func, offset & 0xFC);
return (uint16_t)(dword >> (8 * (offset & 0x03)));
ASSERT(offset % 2 == 0);
return PCIManager::read_config_word(m_bus, m_dev, m_func, offset);
}
uint8_t PCIDevice::read_byte(uint8_t offset) const
uint8_t PCI::Device::read_byte(uint8_t offset) const
{
uint32_t dword = read_config_dword(m_bus, m_dev, m_func, offset & 0xFC);
return (uint8_t)(dword >> (8 * (offset & 0x03)));
return PCIManager::read_config_byte(m_bus, m_dev, m_func, offset);
}
void PCIDevice::write_dword(uint8_t offset, uint32_t value) const
void PCI::Device::write_dword(uint8_t offset, uint32_t value)
{
ASSERT((offset & 0x03) == 0);
write_config_dword(m_bus, m_dev, m_func, offset, value);
ASSERT(offset % 4 == 0);
PCIManager::write_config_dword(m_bus, m_dev, m_func, offset, value);
}
PCIDevice::BarType PCIDevice::read_bar_type(uint8_t bar) const
void PCI::Device::write_word(uint8_t offset, uint16_t value)
{
ASSERT(m_header_type == 0x00);
ASSERT(bar <= 5);
uint32_t type = read_dword(0x10 + bar * 4) & 0b111;
if (type & 1)
return BarType::IO;
type >>= 1;
if (type == 0x0 || type == 0x2)
return BarType::MEM;
return BarType::INVAL;
ASSERT(offset % 2 == 0);
PCIManager::write_config_word(m_bus, m_dev, m_func, offset, value);
}
uint64_t PCIDevice::read_bar_address(uint8_t bar) const
void PCI::Device::write_byte(uint8_t offset, uint8_t value)
{
ASSERT(m_header_type == 0x00);
ASSERT(bar <= 5);
uint64_t address = read_dword(0x10 + bar * 4);
if (address & 1)
return address & 0xFFFFFFFC;
if ((address & 0b110) == 0b100)
address |= (uint64_t)read_dword(0x10 + bar * 4 + 4) << 32;
return address & 0xFFFFFFFFFFFFFFF0;
PCIManager::write_config_byte(m_bus, m_dev, m_func, offset, value);
}
void PCIDevice::enable_bus_mastering() const
BAN::ErrorOr<BAN::UniqPtr<BarRegion>> PCI::Device::allocate_bar_region(uint8_t bar_num)
{
return BarRegion::create(*this, bar_num);
}
void PCI::Device::enumerate_capabilites()
{
uint16_t status = read_word(0x06);
if (!(status & (1 << 4)))
return;
uint8_t capabilities = read_byte(0x34) & 0xFC;
while (capabilities)
{
uint16_t next = read_word(capabilities);
dprintln(" cap {2H}", next & 0xFF);
capabilities = (next >> 8) & 0xFC;
}
}
void PCI::Device::enable_bus_mastering()
{
write_dword(0x04, read_dword(0x04) | 1u << 2);
}
void PCIDevice::disable_bus_mastering() const
void PCI::Device::disable_bus_mastering()
{
write_dword(0x04, read_dword(0x04) & ~(1u << 2));
}
void PCIDevice::enable_memory_space() const
void PCI::Device::enable_memory_space()
{
write_dword(0x04, read_dword(0x04) | 1u << 1);
}
void PCIDevice::disable_memory_space() const
void PCI::Device::disable_memory_space()
{
write_dword(0x04, read_dword(0x04) & ~(1u << 1));
}
void PCIDevice::enable_pin_interrupts() const
void PCI::Device::enable_io_space()
{
write_dword(0x04, read_dword(0x04) | 1u << 0);
}
void PCI::Device::disable_io_space()
{
write_dword(0x04, read_dword(0x04) & ~(1u << 0));
}
void PCI::Device::enable_pin_interrupts()
{
write_dword(0x04, read_dword(0x04) | 1u << 10);
}
void PCIDevice::disable_pin_interrupts() const
void PCI::Device::disable_pin_interrupts()
{
write_dword(0x04, read_dword(0x04) & ~(1u << 10));
}

View File

@ -99,7 +99,7 @@ namespace Kernel
BAN::ErrorOr<Process*> Process::create_userspace(const Credentials& credentials, BAN::StringView path)
{
auto elf = TRY(load_elf_for_exec(credentials, path, "/"sv, {}));
auto elf = TRY(load_elf_for_exec(credentials, path, "/"sv));
auto* process = create_process(credentials, 0);
MUST(process->m_working_directory.push_back('/'));
@ -114,27 +114,35 @@ namespace Kernel
elf.clear();
char** argv = nullptr;
char** envp = nullptr;
{
PageTableScope _(process->page_table());
argv = (char**)MUST(process->sys_alloc(sizeof(char**) * 2));
argv[0] = (char*)MUST(process->sys_alloc(path.size() + 1));
memcpy(argv[0], path.data(), path.size());
argv[0][path.size()] = '\0';
argv[1] = nullptr;
size_t needed_bytes = sizeof(char*) * 2 + path.size() + 1;
if (auto rem = needed_bytes % PAGE_SIZE)
needed_bytes += PAGE_SIZE - rem;
BAN::StringView env1 = "PATH=/bin:/usr/bin"sv;
envp = (char**)MUST(process->sys_alloc(sizeof(char**) * 2));
envp[0] = (char*)MUST(process->sys_alloc(env1.size() + 1));
memcpy(envp[0], env1.data(), env1.size());
envp[0][env1.size()] = '\0';
envp[1] = nullptr;
auto argv_range = MUST(VirtualRange::create_to_vaddr_range(
process->page_table(),
0x400000, KERNEL_OFFSET,
needed_bytes,
PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present
));
argv_range->set_zero();
uintptr_t temp = argv_range->vaddr() + sizeof(char*) * 2;
argv_range->copy_from(0, (uint8_t*)&temp, sizeof(char*));
temp = 0;
argv_range->copy_from(sizeof(char*), (uint8_t*)&temp, sizeof(char*));
argv_range->copy_from(sizeof(char*) * 2, (const uint8_t*)path.data(), path.size());
MUST(process->m_mapped_ranges.emplace_back(false, BAN::move(argv_range)));
}
process->m_userspace_info.argc = 1;
process->m_userspace_info.argv = argv;
process->m_userspace_info.envp = envp;
process->m_userspace_info.envp = nullptr;
auto* thread = MUST(Thread::create_userspace(process));
process->add_thread(thread);
@ -157,8 +165,6 @@ namespace Kernel
Process::~Process()
{
ASSERT(m_threads.empty());
ASSERT(m_fixed_width_allocators.empty());
ASSERT(!m_general_allocator);
ASSERT(m_mapped_ranges.empty());
ASSERT(m_exit_status.waiting == 0);
ASSERT(&PageTable::current() != m_page_table.ptr());
@ -193,10 +199,6 @@ namespace Kernel
// NOTE: We must unmap ranges while the page table is still alive
m_mapped_ranges.clear();
// NOTE: We must clear allocators while the page table is still alive
m_fixed_width_allocators.clear();
m_general_allocator.clear();
}
void Process::on_thread_exit(Thread& thread)
@ -273,7 +275,7 @@ namespace Kernel
return 0;
}
BAN::ErrorOr<BAN::UniqPtr<LibELF::ELF>> Process::load_elf_for_exec(const Credentials& credentials, BAN::StringView file_path, const BAN::String& cwd, const BAN::Vector<BAN::StringView>& path_env)
BAN::ErrorOr<BAN::UniqPtr<LibELF::ELF>> Process::load_elf_for_exec(const Credentials& credentials, BAN::StringView file_path, const BAN::String& cwd)
{
if (file_path.empty())
return BAN::Error::from_errno(ENOENT);
@ -281,44 +283,13 @@ namespace Kernel
BAN::String absolute_path;
if (file_path.front() == '/')
{
// We have an absolute path
TRY(absolute_path.append(file_path));
}
else if (file_path.front() == '.' || file_path.contains('/'))
else
{
// We have a relative path
TRY(absolute_path.append(cwd));
TRY(absolute_path.push_back('/'));
TRY(absolute_path.append(file_path));
}
else
{
// We have neither relative or absolute path,
// search from PATH environment
for (auto path_part : path_env)
{
if (path_part.empty())
continue;
if (path_part.front() != '/')
{
TRY(absolute_path.append(cwd));
TRY(absolute_path.push_back('/'));
}
TRY(absolute_path.append(path_part));
TRY(absolute_path.push_back('/'));
TRY(absolute_path.append(file_path));
if (!VirtualFileSystem::get().file_from_absolute_path(credentials, absolute_path, O_EXEC).is_error())
break;
absolute_path.clear();
}
if (absolute_path.empty())
return BAN::Error::from_errno(ENOENT);
}
auto file = TRY(VirtualFileSystem::get().file_from_absolute_path(credentials, absolute_path, O_EXEC));
@ -358,20 +329,10 @@ namespace Kernel
OpenFileDescriptorSet open_file_descriptors(m_credentials);
TRY(open_file_descriptors.clone_from(m_open_file_descriptors));
BAN::Vector<BAN::UniqPtr<VirtualRange>> mapped_ranges;
BAN::Vector<MappedRange> mapped_ranges;
TRY(mapped_ranges.reserve(m_mapped_ranges.size()));
for (auto& mapped_range : m_mapped_ranges)
MUST(mapped_ranges.push_back(TRY(mapped_range->clone(*page_table))));
BAN::Vector<BAN::UniqPtr<FixedWidthAllocator>> fixed_width_allocators;
TRY(fixed_width_allocators.reserve(m_fixed_width_allocators.size()));
for (auto& allocator : m_fixed_width_allocators)
if (allocator->allocations() > 0)
MUST(fixed_width_allocators.push_back(TRY(allocator->clone(*page_table))));
BAN::UniqPtr<GeneralAllocator> general_allocator;
if (m_general_allocator)
general_allocator = TRY(m_general_allocator->clone(*page_table));
MUST(mapped_ranges.emplace_back(mapped_range.can_be_unmapped, TRY(mapped_range.range->clone(*page_table))));
Process* forked = create_process(m_credentials, m_pid, m_sid, m_pgrp);
forked->m_controlling_terminal = m_controlling_terminal;
@ -379,8 +340,6 @@ namespace Kernel
forked->m_page_table = BAN::move(page_table);
forked->m_open_file_descriptors = BAN::move(open_file_descriptors);
forked->m_mapped_ranges = BAN::move(mapped_ranges);
forked->m_fixed_width_allocators = BAN::move(fixed_width_allocators);
forked->m_general_allocator = BAN::move(general_allocator);
forked->m_is_userspace = m_is_userspace;
forked->m_userspace_info = m_userspace_info;
forked->m_has_called_exec = false;
@ -404,14 +363,9 @@ namespace Kernel
for (int i = 0; argv && argv[i]; i++)
TRY(str_argv.emplace_back(argv[i]));
BAN::Vector<BAN::StringView> path_env;
BAN::Vector<BAN::String> str_envp;
for (int i = 0; envp && envp[i]; i++)
{
TRY(str_envp.emplace_back(envp[i]));
if (strncmp(envp[i], "PATH=", 5) == 0)
path_env = TRY(BAN::StringView(envp[i]).substring(5).split(':'));
}
BAN::String working_directory;
@ -420,14 +374,12 @@ namespace Kernel
TRY(working_directory.append(m_working_directory));
}
auto elf = TRY(load_elf_for_exec(m_credentials, path, working_directory, path_env));
auto elf = TRY(load_elf_for_exec(m_credentials, path, working_directory));
LockGuard lock_guard(m_lock);
m_open_file_descriptors.close_cloexec();
m_fixed_width_allocators.clear();
m_general_allocator.clear();
m_mapped_ranges.clear();
load_elf_to_memory(*elf);
@ -444,27 +396,46 @@ namespace Kernel
ASSERT(&Process::current() == this);
// allocate memory on the new process for arguments and environment
{
LockGuard _(page_table());
m_userspace_info.argv = (char**)MUST(sys_alloc(sizeof(char**) * (str_argv.size() + 1)));
for (size_t i = 0; i < str_argv.size(); i++)
auto create_range =
[&](const auto& container) -> BAN::UniqPtr<VirtualRange>
{
m_userspace_info.argv[i] = (char*)MUST(sys_alloc(str_argv[i].size() + 1));
memcpy(m_userspace_info.argv[i], str_argv[i].data(), str_argv[i].size());
m_userspace_info.argv[i][str_argv[i].size()] = '\0';
}
m_userspace_info.argv[str_argv.size()] = nullptr;
size_t bytes = sizeof(char*);
for (auto& elem : container)
bytes += sizeof(char*) + elem.size() + 1;
m_userspace_info.envp = (char**)MUST(sys_alloc(sizeof(char**) * (str_envp.size() + 1)));
for (size_t i = 0; i < str_envp.size(); i++)
{
m_userspace_info.envp[i] = (char*)MUST(sys_alloc(str_envp[i].size() + 1));
memcpy(m_userspace_info.envp[i], str_envp[i].data(), str_envp[i].size());
m_userspace_info.envp[i][str_envp[i].size()] = '\0';
}
m_userspace_info.envp[str_envp.size()] = nullptr;
}
if (auto rem = bytes % PAGE_SIZE)
bytes += PAGE_SIZE - rem;
auto range = MUST(VirtualRange::create_to_vaddr_range(
page_table(),
0x400000, KERNEL_OFFSET,
bytes,
PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present
));
range->set_zero();
size_t data_offset = sizeof(char*) * (container.size() + 1);
for (size_t i = 0; i < container.size(); i++)
{
uintptr_t ptr_addr = range->vaddr() + data_offset;
range->copy_from(sizeof(char*) * i, (const uint8_t*)&ptr_addr, sizeof(char*));
range->copy_from(data_offset, (const uint8_t*)container[i].data(), container[i].size());
data_offset += container[i].size() + 1;
}
uintptr_t null = 0;
range->copy_from(sizeof(char*) * container.size(), (const uint8_t*)&null, sizeof(char*));
return BAN::move(range);
};
auto argv_range = create_range(str_argv);
m_userspace_info.argv = (char**)argv_range->vaddr();
MUST(m_mapped_ranges.emplace_back(false, BAN::move(argv_range)));
auto envp_range = create_range(str_envp);
m_userspace_info.envp = (char**)envp_range->vaddr();
MUST(m_mapped_ranges.emplace_back(false, BAN::move(envp_range)));
m_userspace_info.argc = str_argv.size();
@ -538,13 +509,6 @@ namespace Kernel
return 0;
}
BAN::ErrorOr<long> Process::sys_setenvp(char** envp)
{
LockGuard _(m_lock);
m_userspace_info.envp = envp;
return 0;
}
void Process::load_elf_to_memory(LibELF::ELF& elf)
{
ASSERT(elf.is_native());
@ -583,9 +547,11 @@ namespace Kernel
{
LockGuard _(m_lock);
MUST(m_mapped_ranges.push_back(MUST(VirtualRange::create_to_vaddr(page_table(), page_start * PAGE_SIZE, page_count * PAGE_SIZE, flags))));
m_mapped_ranges.back()->set_zero();
m_mapped_ranges.back()->copy_from(elf_program_header.p_vaddr % PAGE_SIZE, elf.data() + elf_program_header.p_offset, elf_program_header.p_filesz);
auto range = MUST(VirtualRange::create_to_vaddr(page_table(), page_start * PAGE_SIZE, page_count * PAGE_SIZE, flags));
range->set_zero();
range->copy_from(elf_program_header.p_vaddr % PAGE_SIZE, elf.data() + elf_program_header.p_offset, elf_program_header.p_filesz);
MUST(m_mapped_ranges.emplace_back(false, BAN::move(range)));
}
page_table().unlock();
@ -811,90 +777,66 @@ namespace Kernel
return (long)buffer;
}
static constexpr size_t allocator_size_for_allocation(size_t value)
BAN::ErrorOr<long> Process::sys_mmap(const sys_mmap_t& args)
{
if (value <= 256) {
if (value <= 64)
return 64;
else
return 256;
} else {
if (value <= 1024)
return 1024;
else
return 4096;
}
}
if (args.prot != PROT_NONE && args.prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
return BAN::Error::from_errno(EINVAL);
BAN::ErrorOr<long> Process::sys_alloc(size_t bytes)
{
vaddr_t address = 0;
PageTable::flags_t flags = PageTable::Flags::UserSupervisor;
if (args.prot & PROT_READ)
flags |= PageTable::Flags::Present;
if (args.prot & PROT_WRITE)
flags |= PageTable::Flags::ReadWrite | PageTable::Flags::Present;
if (args.prot & PROT_EXEC)
flags |= PageTable::Flags::Execute | PageTable::Flags::Present;
if (bytes <= PAGE_SIZE)
if (args.flags == (MAP_ANONYMOUS | MAP_PRIVATE))
{
// Do fixed width allocation
size_t allocation_size = allocator_size_for_allocation(bytes);
ASSERT(bytes <= allocation_size);
ASSERT(allocation_size <= PAGE_SIZE);
if (args.addr != nullptr)
return BAN::Error::from_errno(ENOTSUP);
if (args.off != 0)
return BAN::Error::from_errno(EINVAL);
if (args.len % PAGE_SIZE != 0)
return BAN::Error::from_errno(EINVAL);
auto range = TRY(VirtualRange::create_to_vaddr_range(
page_table(),
0x400000, KERNEL_OFFSET,
args.len,
PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present
));
range->set_zero();
LockGuard _(m_lock);
bool needs_new_allocator { true };
for (auto& allocator : m_fixed_width_allocators)
{
if (allocator->allocation_size() == allocation_size && allocator->allocations() < allocator->max_allocations())
{
address = allocator->allocate();
needs_new_allocator = false;
break;
}
}
if (needs_new_allocator)
{
auto allocator = TRY(FixedWidthAllocator::create(page_table(), allocation_size));
TRY(m_fixed_width_allocators.push_back(BAN::move(allocator)));
address = m_fixed_width_allocators.back()->allocate();
}
}
else
{
LockGuard _(m_lock);
if (!m_general_allocator)
m_general_allocator = TRY(GeneralAllocator::create(page_table(), 0x400000));
address = m_general_allocator->allocate(bytes);
TRY(m_mapped_ranges.emplace_back(true, BAN::move(range)));
return m_mapped_ranges.back().range->vaddr();
}
if (address == 0)
return BAN::Error::from_errno(ENOMEM);
return address;
return BAN::Error::from_errno(ENOTSUP);
}
BAN::ErrorOr<long> Process::sys_free(void* ptr)
BAN::ErrorOr<long> Process::sys_munmap(void* addr, size_t len)
{
if (len == 0)
return BAN::Error::from_errno(EINVAL);
vaddr_t vaddr = (vaddr_t)addr;
if (vaddr % PAGE_SIZE != 0)
return BAN::Error::from_errno(EINVAL);
LockGuard _(m_lock);
for (size_t i = 0; i < m_fixed_width_allocators.size(); i++)
for (size_t i = 0; i < m_mapped_ranges.size(); i++)
{
auto& allocator = m_fixed_width_allocators[i];
if (allocator->deallocate((vaddr_t)ptr))
{
// TODO: This might be too much. Maybe we should only
// remove allocators when we have low memory... ?
if (allocator->allocations() == 0)
m_fixed_width_allocators.remove(i);
return 0;
}
if (!m_mapped_ranges[i].can_be_unmapped)
continue;
auto& range = m_mapped_ranges[i].range;
if (vaddr + len < range->vaddr() || vaddr >= range->vaddr() + range->size())
continue;
m_mapped_ranges.remove(i);
}
if (m_general_allocator && m_general_allocator->deallocate((vaddr_t)ptr))
return 0;
dwarnln("free called on pointer that was not allocated");
return BAN::Error::from_errno(EINVAL);
return 0;
}
BAN::ErrorOr<long> Process::sys_termid(char* buffer) const

View File

@ -11,7 +11,7 @@
namespace Kernel
{
BAN::ErrorOr<BAN::RefPtr<ATAController>> ATAController::create(const PCIDevice& device)
BAN::ErrorOr<BAN::RefPtr<ATAController>> ATAController::create(const PCI::Device& device)
{
ATAController* controller = new ATAController();
if (controller == nullptr)
@ -50,7 +50,7 @@ namespace Kernel
: m_rdev(makedev(DevFileSystem::get().get_next_dev(), 0))
{ }
BAN::ErrorOr<void> ATAController::initialize(const PCIDevice& pci_device)
BAN::ErrorOr<void> ATAController::initialize(const PCI::Device& pci_device)
{
struct Bus
{

View File

@ -32,7 +32,9 @@ namespace Kernel
return 0;
}
#if __enable_sse
Thread::current().save_sse();
#endif
asm volatile("sti");
@ -68,12 +70,6 @@ namespace Kernel
case SYS_OPENAT:
ret = Process::current().sys_openat((int)arg1, (const char*)arg2, (int)arg3, (mode_t)arg4);
break;
case SYS_ALLOC:
ret = Process::current().sys_alloc((size_t)arg1);
break;
case SYS_FREE:
ret = Process::current().sys_free((void*)arg1);
break;
case SYS_SEEK:
ret = Process::current().sys_seek((int)arg1, (long)arg2, (int)arg3);
break;
@ -101,9 +97,6 @@ namespace Kernel
case SYS_FSTAT:
ret = Process::current().sys_fstat((int)arg1, (struct stat*)arg2);
break;
case SYS_SETENVP:
ret = Process::current().sys_setenvp((char**)arg1);
break;
case SYS_READ_DIR_ENTRIES:
ret = Process::current().sys_read_dir_entries((int)arg1, (API::DirectoryEntryList*)arg2, (size_t)arg3);
break;
@ -194,6 +187,12 @@ namespace Kernel
case SYS_SYNC:
ret = Process::current().sys_sync();
break;
case SYS_MMAP:
ret = Process::current().sys_mmap(*(const sys_mmap_t*)arg1);
break;
case SYS_MUNMAP:
ret = Process::current().sys_munmap((void*)arg1, (size_t)arg2);
break;
default:
dwarnln("Unknown syscall {}", syscall);
break;
@ -205,7 +204,10 @@ namespace Kernel
Kernel::panic("Kernel error while returning to userspace {}", ret.error());
ASSERT(Kernel::Thread::current().state() == Kernel::Thread::State::Executing);
#if __enable_sse
Thread::current().load_sse();
#endif
if (ret.is_error())
return -ret.error().get_error_code();

View File

@ -167,7 +167,7 @@ static void init2(void*)
DevFileSystem::get().initialize_device_updater();
PCI::initialize();
PCI::PCIManager::initialize();
dprintln("PCI initialized");
VirtualFileSystem::initialize(cmdline.root);

150
kernel/kernel/lai_host.cpp Normal file
View File

@ -0,0 +1,150 @@
#include <kernel/ACPI.h>
#include <kernel/IO.h>
#include <kernel/Memory/kmalloc.h>
#include <kernel/Memory/PageTable.h>
#include <kernel/PCI.h>
#include <kernel/Timer/Timer.h>
#include <lai/host.h>
using namespace Kernel;
void* laihost_malloc(size_t size)
{
return kmalloc(size);
}
void* laihost_realloc(void* ptr, size_t newsize, size_t oldsize)
{
if (ptr == nullptr)
return laihost_malloc(newsize);
void* new_ptr = laihost_malloc(newsize);
if (new_ptr == nullptr)
return nullptr;
memcpy(new_ptr, ptr, BAN::Math::min(newsize, oldsize));
kfree(ptr);
return new_ptr;
}
void laihost_free(void* ptr, size_t)
{
kfree(ptr);
}
void laihost_log(int level, const char* msg)
{
if (level == LAI_DEBUG_LOG)
dprintln(msg);
else if (level == LAI_WARN_LOG)
dwarnln(msg);
else
ASSERT_NOT_REACHED();
}
void laihost_panic(const char* msg)
{
Kernel::panic(msg);
}
void* laihost_scan(const char* sig, size_t index)
{
ASSERT(index == 0);
return (void*)ACPI::get().get_header(sig);
}
void* laihost_map(size_t address, size_t count)
{
size_t needed_pages = range_page_count(address, count);
vaddr_t vaddr = PageTable::kernel().reserve_free_contiguous_pages(needed_pages, KERNEL_OFFSET);
ASSERT(vaddr);
PageTable::kernel().map_range_at(address & PAGE_ADDR_MASK, vaddr, needed_pages * PAGE_SIZE, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
return (void*)(vaddr + (address % PAGE_SIZE));
}
void laihost_unmap(void* ptr, size_t count)
{
PageTable::kernel().unmap_range((vaddr_t)ptr, count);
}
void laihost_outb(uint16_t port, uint8_t val)
{
IO::outb(port, val);
}
void laihost_outw(uint16_t port, uint16_t val)
{
IO::outw(port, val);
}
void laihost_outd(uint16_t port, uint32_t val)
{
IO::outl(port, val);
}
uint8_t laihost_inb(uint16_t port)
{
return IO::inb(port);
}
uint16_t laihost_inw(uint16_t port)
{
return IO::inw(port);
}
uint32_t laihost_ind(uint16_t port)
{
return IO::inl(port);
}
void laihost_pci_writeb(uint16_t seg, uint8_t bus, uint8_t slot, uint8_t fun, uint16_t offset, uint8_t val)
{
ASSERT(seg == 0);
PCI::PCIManager::write_config_byte(bus, slot, fun, offset, val);
}
void laihost_pci_writew(uint16_t seg, uint8_t bus, uint8_t slot, uint8_t fun, uint16_t offset, uint16_t val)
{
ASSERT(seg == 0);
PCI::PCIManager::write_config_word(bus, slot, fun, offset, val);
}
void laihost_pci_writed(uint16_t seg, uint8_t bus, uint8_t slot, uint8_t fun, uint16_t offset, uint32_t val)
{
ASSERT(seg == 0);
PCI::PCIManager::write_config_dword(bus, slot, fun, offset, val);
}
uint8_t laihost_pci_readb(uint16_t seg, uint8_t bus, uint8_t slot, uint8_t fun, uint16_t offset)
{
ASSERT(seg == 0);
return PCI::PCIManager::read_config_byte(bus, slot, fun, offset);
}
uint16_t laihost_pci_readw(uint16_t seg, uint8_t bus, uint8_t slot, uint8_t fun, uint16_t offset)
{
ASSERT(seg == 0);
return PCI::PCIManager::read_config_word(bus, slot, fun, offset);
}
uint32_t laihost_pci_readd(uint16_t seg, uint8_t bus, uint8_t slot, uint8_t fun, uint16_t offset)
{
ASSERT(seg == 0);
return PCI::PCIManager::read_config_dword(bus, slot, fun, offset);
}
void laihost_sleep(uint64_t ms)
{
SystemTimer::get().sleep(ms);
}
uint64_t laihost_timer(void)
{
auto time = SystemTimer::get().time_since_boot();
return (1'000'000'000ull * time.tv_sec + time.tv_nsec) / 100;
}

1
kernel/lai Submodule

@ -0,0 +1 @@
Subproject commit a228465314ee3a542f62d4bdefeb8fbe2b48da41

View File

@ -7,12 +7,14 @@ set(LIBC_SOURCES
ctype.cpp
dirent.cpp
fcntl.cpp
malloc.cpp
printf_impl.cpp
pwd.cpp
signal.cpp
stdio.cpp
stdlib.cpp
string.cpp
sys/mman.cpp
sys/stat.cpp
sys/wait.cpp
termios.cpp

View File

@ -46,6 +46,16 @@ struct posix_typed_mem_info
size_t posix_tmi_length; /* Maximum length which may be allocated from a typed memory object. */
};
struct sys_mmap_t
{
void* addr;
size_t len;
int prot;
int flags;
int fildes;
off_t off;
};
int mlock(const void* addr, size_t len);
int mlockall(int flags);
void* mmap(void* addr, size_t len, int prot, int flags, int fildes, off_t off);

View File

@ -12,9 +12,6 @@ __BEGIN_DECLS
#define SYS_CLOSE 5
#define SYS_OPEN 6
#define SYS_OPENAT 7
#define SYS_ALLOC 8
#define SYS_REALLOC 9
#define SYS_FREE 10
#define SYS_SEEK 11
#define SYS_TELL 12
#define SYS_GET_TERMIOS 13
@ -24,7 +21,6 @@ __BEGIN_DECLS
#define SYS_SLEEP 17
#define SYS_WAIT 18
#define SYS_FSTAT 19
#define SYS_SETENVP 20
#define SYS_READ_DIR_ENTRIES 21
#define SYS_SET_UID 22
#define SYS_SET_GID 23
@ -55,6 +51,8 @@ __BEGIN_DECLS
#define SYS_FSTATAT 48
#define SYS_STAT 49 // stat/lstat
#define SYS_SYNC 50
#define SYS_MMAP 51
#define SYS_MUNMAP 52
__END_DECLS

222
libc/malloc.cpp Normal file
View File

@ -0,0 +1,222 @@
#include <assert.h>
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <unistd.h>
static consteval size_t log_size_t(size_t value, size_t base)
{
size_t result = 0;
while (value /= base)
result++;
return result;
}
static constexpr size_t s_malloc_pool_size_initial = 4096;
static constexpr size_t s_malloc_pool_size_multiplier = 2;
static constexpr size_t s_malloc_pool_count = sizeof(size_t) * 8 - log_size_t(s_malloc_pool_size_initial, s_malloc_pool_size_multiplier);
static constexpr size_t s_malloc_default_align = 16;
struct malloc_node_t
{
bool allocated;
size_t size;
uint8_t data[0];
size_t data_size() const { return size - sizeof(malloc_node_t); }
malloc_node_t* next() { return (malloc_node_t*)(data + data_size()); }
};
struct malloc_pool_t
{
uint8_t* start;
size_t size;
};
static malloc_pool_t s_malloc_pools[s_malloc_pool_count];
void init_malloc()
{
size_t pool_size = s_malloc_pool_size_initial;
for (size_t i = 0; i < s_malloc_pool_count; i++)
{
s_malloc_pools[i].start = nullptr;
s_malloc_pools[i].size = pool_size;
pool_size *= s_malloc_pool_size_multiplier;
}
}
static bool allocate_pool(size_t pool_index)
{
auto& pool = s_malloc_pools[pool_index];
assert(pool.start == nullptr);
// allocate memory for pool
pool.start = (uint8_t*)mmap(nullptr, pool.size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (pool.start == nullptr)
return false;
// initialize pool to single unallocated node
auto* node = (malloc_node_t*)pool.start;
node->allocated = false;
node->size = pool.size;
return true;
}
static void* allocate_from_pool(size_t pool_index, size_t size)
{
assert(size % s_malloc_default_align == 0);
auto& pool = s_malloc_pools[pool_index];
assert(pool.start != nullptr);
uint8_t* pool_end = pool.start + pool.size;
for (auto* node = (malloc_node_t*)pool.start; (uint8_t*)node < pool_end; node = node->next())
{
if (node->allocated)
continue;
{
// merge two unallocated nodes next to each other
auto* next = node->next();
if ((uint8_t*)next < pool_end && !next->allocated)
node->size += next->size;
}
if (node->data_size() < size)
continue;
node->allocated = true;
// shrink node if needed
if (node->data_size() - size > sizeof(malloc_node_t))
{
uint8_t* node_end = (uint8_t*)node->next();
node->size = sizeof(malloc_node_t) + size;
auto* next = node->next();
next->allocated = false;
next->size = node_end - (uint8_t*)next;
}
return node->data;
}
return nullptr;
}
static malloc_node_t* node_from_data_pointer(void* data_pointer)
{
return (malloc_node_t*)((uint8_t*)data_pointer - sizeof(malloc_node_t));
}
void* malloc(size_t size)
{
// align size to s_malloc_default_align boundary
if (size_t ret = size % s_malloc_default_align)
size += s_malloc_default_align - ret;
// find the first pool with size atleast size
size_t first_usable_pool = 0;
while (s_malloc_pools[first_usable_pool].size < size)
first_usable_pool++;
// first_usable_pool = ceil(log(size/s_malloc_smallest_pool, s_malloc_pool_size_mult))
// try to find any already existing pools that we can allocate in
for (size_t i = first_usable_pool; i < s_malloc_pool_count; i++)
if (s_malloc_pools[i].start != nullptr)
if (void* ret = allocate_from_pool(i, size))
return ret;
// allocate new pool
for (size_t i = first_usable_pool; i < s_malloc_pool_count; i++)
{
if (s_malloc_pools[i].start != nullptr)
continue;
if (!allocate_pool(i))
break;
return allocate_from_pool(i, size);
}
errno = ENOMEM;
return nullptr;
}
void* realloc(void* ptr, size_t size)
{
if (ptr == nullptr)
return malloc(size);
// align size to s_malloc_default_align boundary
if (size_t ret = size % s_malloc_default_align)
size += s_malloc_default_align - ret;
auto* node = node_from_data_pointer(ptr);
size_t oldsize = node->data_size();
if (oldsize == size)
return ptr;
// shrink allocation if needed
if (oldsize > size)
{
if (node->data_size() - size > sizeof(malloc_node_t))
{
uint8_t* node_end = (uint8_t*)node->next();
node->size = sizeof(malloc_node_t) + size;
auto* next = node->next();
next->allocated = false;
next->size = node_end - (uint8_t*)next;
}
return ptr;
}
// FIXME: try to expand allocation
// allocate new pointer
void* new_ptr = malloc(size);
if (new_ptr == nullptr)
return nullptr;
// move data to the new pointer
size_t bytes_to_copy = oldsize < size ? oldsize : size;
memcpy(new_ptr, ptr, bytes_to_copy);
free(ptr);
return new_ptr;
}
void free(void* ptr)
{
if (ptr == nullptr)
return;
auto* node = node_from_data_pointer(ptr);
// mark node as unallocated and try to merge with the next node
node->allocated = false;
if (!node->next()->allocated)
node->size += node->next()->size;
}
void* calloc(size_t nmemb, size_t size)
{
size_t total = nmemb * size;
if (size != 0 && total / size != nmemb)
{
errno = ENOMEM;
return nullptr;
}
void* ptr = malloc(total);
if (ptr == nullptr)
return nullptr;
memset(ptr, 0, total);
return ptr;
}

View File

@ -104,6 +104,7 @@ static void integer_to_string(char* buffer, T value, int base, bool upper, forma
buffer[offset++] = '\0';
}
#if __enable_sse
template<BAN::floating_point T>
static void floating_point_to_string(char* buffer, T value, bool upper, const format_options_t options)
{
@ -227,6 +228,7 @@ static void floating_point_to_exponent_string(char* buffer, T value, bool upper,
exponent_options.width = 3;
integer_to_string<int>(buffer + offset, exponent, 10, upper, exponent_options);
}
#endif
extern "C" int printf_impl(const char* format, va_list arguments, int (*putc_fun)(int, void*), void* data)
{
@ -349,6 +351,7 @@ extern "C" int printf_impl(const char* format, va_list arguments, int (*putc_fun
format++;
break;
}
#if __enable_sse
case 'e':
case 'E':
{
@ -367,6 +370,7 @@ extern "C" int printf_impl(const char* format, va_list arguments, int (*putc_fun
format++;
break;
}
#endif
case 'g':
case 'G':
// TODO

View File

@ -124,6 +124,16 @@ int putenv(char* string)
return -1;
}
if (!environ)
{
environ = (char**)malloc(sizeof(char*) * 2);
if (!environ)
return -1;
environ[0] = string;
environ[1] = nullptr;
return 0;
}
int cnt = 0;
for (int i = 0; string[i]; i++)
if (string[i] == '=')
@ -151,10 +161,7 @@ int putenv(char* string)
char** new_envp = (char**)malloc(sizeof(char*) * (env_count + 2));
if (new_envp == nullptr)
{
errno = ENOMEM;
return -1;
}
for (int i = 0; i < env_count; i++)
new_envp[i] = environ[i];
@ -164,47 +171,9 @@ int putenv(char* string)
free(environ);
environ = new_envp;
if (syscall(SYS_SETENVP, environ) == -1)
return -1;
return 0;
}
void* malloc(size_t bytes)
{
long res = syscall(SYS_ALLOC, bytes);
if (res < 0)
return nullptr;
return (void*)res;
}
void* calloc(size_t nmemb, size_t size)
{
if (nmemb * size < nmemb)
return nullptr;
void* ptr = malloc(nmemb * size);
if (ptr == nullptr)
return nullptr;
memset(ptr, 0, nmemb * size);
return ptr;
}
void* realloc(void* ptr, size_t size)
{
if (ptr == nullptr)
return malloc(size);
long ret = syscall(SYS_REALLOC, ptr, size);
if (ret == -1)
return nullptr;
return (void*)ret;
}
void free(void* ptr)
{
if (ptr == nullptr)
return;
syscall(SYS_FREE, ptr);
}
// Constants and algorithm from https://en.wikipedia.org/wiki/Permuted_congruential_generator
static uint64_t s_rand_state = 0x4d595df4d0f33173;

23
libc/sys/mman.cpp Normal file
View File

@ -0,0 +1,23 @@
#include <sys/mman.h>
#include <sys/syscall.h>
#include <unistd.h>
void* mmap(void* addr, size_t len, int prot, int flags, int fildes, off_t off)
{
sys_mmap_t args {
.addr = addr,
.len = len,
.prot = prot,
.flags = flags,
.off = off
};
long ret = syscall(SYS_MMAP, &args);
if (ret == -1)
return nullptr;
return (void*)ret;
}
int munmap(void* addr, size_t len)
{
return syscall(SYS_MUNMAP, addr, len);
}

View File

@ -11,9 +11,26 @@
char** environ;
extern void init_malloc();
extern "C" void _init_libc(char** _environ)
{
environ = _environ;
init_malloc();
if (!_environ)
return;
size_t env_count = 0;
while (_environ[env_count])
env_count++;
environ = (char**)malloc(sizeof(char*) * env_count + 1);
for (size_t i = 0; i < env_count; i++)
{
size_t bytes = strlen(_environ[i]) + 1;
environ[i] = (char*)malloc(bytes);
memcpy(environ[i], _environ[i], bytes);
}
environ[env_count] = nullptr;
}
void _exit(int status)

View File

@ -1,3 +0,0 @@
#!/bin/bash
find . | grep -vE '(build|toolchain)' | grep -E '\.(cpp|h|S)$' | xargs wc -l | sort -n

View File

@ -6,4 +6,4 @@ qemu-system-$BANAN_ARCH \
-smp 2 \
-drive format=raw,media=disk,file=${DISK_IMAGE_PATH} \
-serial stdio \
-accel kvm \
$@ \

View File

@ -437,6 +437,40 @@ pid_t execute_command_no_wait(BAN::Vector<BAN::String>& args, int fd_in, int fd_
MUST(cmd_args.push_back((char*)arg.data()));
MUST(cmd_args.push_back(nullptr));
// do PATH resolution
BAN::String executable_file;
if (!args.front().empty() && args.front().front() != '.' && args.front().front() != '/')
{
char* path_env_cstr = getenv("PATH");
if (path_env_cstr)
{
auto path_env_list = MUST(BAN::StringView(path_env_cstr).split(':'));
for (auto path_env : path_env_list)
{
BAN::String test_file = path_env;
MUST(test_file.push_back('/'));
MUST(test_file.append(args.front()));
struct stat st;
if (stat(test_file.data(), &st) == 0)
{
executable_file = BAN::move(test_file);
break;
}
}
if (executable_file.empty())
{
fprintf(stderr, "command not found: %s\n", args.front().data());
return -1;
}
}
}
else
{
executable_file = args.front();
}
pid_t pid = fork();
if (pid == 0)
{
@ -477,11 +511,14 @@ pid_t execute_command_no_wait(BAN::Vector<BAN::String>& args, int fd_in, int fd_
setpgid(0, pgrp);
}
execv(cmd_args.front(), cmd_args.data());
execv(executable_file.data(), cmd_args.data());
perror("execv");
exit(1);
}
if (pid == -1)
ERROR_RETURN("fork", -1);
return pid;
}
@ -489,7 +526,7 @@ int execute_command(BAN::Vector<BAN::String>& args, int fd_in, int fd_out)
{
pid_t pid = execute_command_no_wait(args, fd_in, fd_out, 0);
if (pid == -1)
ERROR_RETURN("fork", 1);
return 1;
int status;
if (waitpid(pid, &status, 0) == -1)

View File

@ -74,6 +74,8 @@ int main()
if (setuid(pwd->pw_uid) == -1)
perror("setuid");
setenv("PATH", "/bin:/usr/bin", 0);
setenv("HOME", pwd->pw_dir, 1);
chdir(pwd->pw_dir);