Compare commits

...

29 Commits

Author SHA1 Message Date
Bananymous 995dfa1455 Kernel: Fix AML PCIConfig OpRegion accesses
Apparently I'm not supposted to calculate device/function from the
offset, but parse them from the acpi namespace :)

This allows PCI PIN interrupt routing actually work
2025-07-04 13:21:02 +03:00
Bananymous 544c8dbc13 Kernel: Optimize AML interpreter stack usage
There is a very hacky no-inline hack that I am not proud of but it drops
the stack usage of few functions A LOT.

Previously Virtual Box could not boot with our 8 page stack, but these
changes allow it to boot on 5!
2025-07-03 00:49:23 +03:00
Bananymous 8da4f80453 Kernel: Add custom stack to double fault handler
This prevents triple faults!
2025-07-02 23:14:52 +03:00
Bananymous 6084aae603 Kernel: Add guard pages to kernel and userspace stacks 2025-07-02 23:12:36 +03:00
Bananymous e1319a06f2 Kernel: Remove accidentally commited debug prints 2025-07-02 23:09:57 +03:00
Bananymous 51fd7a607d Kernel: Fix IDE controller waiting
We did not correctly wait until controller is ready to receive data on
write command. Also remove possible kernel panic if controller sends
unexpected interrupts
2025-07-02 22:04:41 +03:00
Bananymous 8aff315c7b LibC: fflush file in setvbuf before updating the buffer 2025-07-02 01:54:03 +03:00
Bananymous 8e0d79f301 ports: Fix `make` compilation with new toolchain 2025-07-02 01:54:03 +03:00
Bananymous 8a0269d29e Kernel: Remove kernel processes
Kernel can just use raw threads, pretty muchs the only thing that
process provides is syscalls which kernel threads of course don't
need.

Also this makes init process have pid 1 :D
2025-07-02 01:54:03 +03:00
Bananymous 892e16dfb1 Kernel: Increase PS2 timeout to 300 ms, load PS2 in separate thread
PS/2 seems to hit command timeout sometimes on slow emulation so
increase the timeouts.

Also move PS/2 device initialization to a different thread because
device indentification waits for timeouts.
2025-07-02 00:17:42 +03:00
Bananymous 92e4078287 Kernel: Rewrite ThreadBlocker
This gets rid of a very old bug where kernel panics when thread is being
woken up and unblocked at the same time on different cores. This
required adding a new lock to SchedulerQueue::Node and adding a cap to
how many threads a threadblocker can simultaneously block. I don't think
I ever block more than five threads on the same ThreadBlocker so this
should be fine.
2025-07-02 00:17:42 +03:00
Bananymous 41e1819072 Kernel: Align argv and envp to pointer boundary
This bug was found with ubsan
2025-07-02 00:17:42 +03:00
Bananymous fb7e9719a1 Kernel: Add fast fill method to framebuffer device
This makes `clear` much faster when running without kvm!
2025-07-02 00:17:42 +03:00
Bananymous c2d09b64ca Kernel: Drop 24 bpp support from double buffer
I don't even know why this was supported, I am not planning on making
the fb anything other than 32 bpp
2025-07-02 00:17:42 +03:00
Bananymous 1b2aa6c2da Kernel: Fix unaligned access in AML
Found with ubsan
2025-07-02 00:17:42 +03:00
Bananymous a5b4cee298 Kernel: Load USTAR from a boot module if loading root partition fails
This allows banan-os to boot on hardware where we don't have working
storage driver or the storage driver fails (pretty common with my usb
mass storage drivers...)
2025-07-02 00:17:42 +03:00
Bananymous 17f1ac10e3 Kernel: Don't ignore modules passed with multiboot2 2025-07-02 00:17:42 +03:00
Bananymous c67198032f Kernel: Rewrite TmpFS block and inode allocation
This makes creating files and appending to then A LOT faster. Some code
I tested took 40 seconds in the previous implementation and less than a
second on the new one!

This code is really sketcy, I hope I'll never have to touch it again :)
2025-07-02 00:17:42 +03:00
Bananymous 107b092982 Kernel: Allow arbitrary sized tmpfs files
The 2 block limit started to get annoying :D
2025-07-02 00:17:42 +03:00
Bananymous bac06e45a4 Kernel: Fix TmpSymlinkInode target getting/setting 2025-07-02 00:17:42 +03:00
Bananymous 0e8a68831c Kernel: Fix TmpInode chmod not preserving file type 2025-07-02 00:17:42 +03:00
Bananymous 5912abd541 Kernel: Add error reporting for readonly mounted filesystems 2025-07-02 00:17:42 +03:00
Bananymous 13d33995cb Kernel: Make sure PageTable::fast_page gets passed page aligned addr 2025-06-30 01:48:47 +03:00
Bananymous c8c05f62b4 Kernel: Remove unused FileSystem::dev 2025-06-29 00:29:03 +03:00
Bananymous 944b045885 Toolchain: Set CMAKE_STAGING_PREFIX in toolchain file 2025-06-28 23:34:11 +03:00
Bananymous 19897ffa26 userspace: Compile test utility :D 2025-06-28 22:40:00 +03:00
Bananymous 42a10b21c7 userspace: Implement true and false utilities 2025-06-28 22:39:19 +03:00
Bananymous 5df0e25c1f userspace: Implement chown utility 2025-06-28 21:32:59 +03:00
Bananymous ebf2b16d09 Kernel: Implement chown to ext2 and tmpfs 2025-06-28 21:28:54 +03:00
80 changed files with 1499 additions and 680 deletions

View File

@ -31,6 +31,7 @@ set(KERNEL_SOURCES
kernel/FS/ProcFS/Inode.cpp
kernel/FS/TmpFS/FileSystem.cpp
kernel/FS/TmpFS/Inode.cpp
kernel/FS/USTARModule.cpp
kernel/FS/VirtualFileSystem.cpp
kernel/GDT.cpp
kernel/IDT.cpp

View File

@ -206,6 +206,7 @@ namespace Kernel
{
ASSERT(s_kernel);
ASSERT(paddr);
ASSERT(paddr % PAGE_SIZE == 0);
ASSERT(s_fast_page_lock.current_processor_has_lock());

View File

@ -36,6 +36,12 @@ multiboot2_start:
.long 12
.long V2P(_start)
# page align modules
.align 8
.short 6
.short 0
.long 8
.align 8
.short 0
.short 0
@ -53,10 +59,10 @@ bananboot_start:
bananboot_end:
.section .bss, "aw", @nobits
.align 4096
boot_stack_bottom:
.global g_boot_stack_top
g_boot_stack_bottom:
.skip 4096 * 4
boot_stack_top:
g_boot_stack_top:
.global g_kernel_cmdline
g_kernel_cmdline:
@ -188,7 +194,7 @@ _start:
movl %ebx, V2P(bootloader_info)
# load boot stack
movl $V2P(boot_stack_top), %esp
movl $V2P(g_boot_stack_top), %esp
# load boot GDT
lgdt V2P(boot_gdtr)
@ -206,7 +212,7 @@ gdt_flush:
call initialize_paging
# load higher half stack pointer
movl $boot_stack_top, %esp
movl $g_boot_stack_top, %esp
# jump to higher half
leal higher_half, %ecx

View File

@ -503,6 +503,7 @@ namespace Kernel
{
ASSERT(s_kernel);
ASSERT(paddr);
ASSERT(paddr % PAGE_SIZE == 0);
ASSERT(s_fast_page_lock.current_processor_has_lock());

View File

@ -36,6 +36,12 @@ multiboot2_start:
.long 12
.long V2P(_start)
# page align modules
.align 8
.short 6
.short 0
.long 8
.align 8
.short 0
.short 0
@ -53,9 +59,10 @@ bananboot_start:
bananboot_end:
.section .bss, "aw", @nobits
boot_stack_bottom:
.skip 4096 * 64
boot_stack_top:
.global g_boot_stack_top
g_boot_stack_bottom:
.skip 4096 * 4
g_boot_stack_top:
.global g_kernel_cmdline
g_kernel_cmdline:
@ -187,7 +194,7 @@ _start:
movl %eax, V2P(bootloader_magic)
movl %ebx, V2P(bootloader_info)
movl $V2P(boot_stack_top), %esp
movl $V2P(g_boot_stack_top), %esp
call check_requirements
call enable_sse

View File

@ -87,6 +87,12 @@ namespace Kernel::ACPI::AML
struct OpRegion
{
GAS::AddressSpaceID address_space;
uint16_t seg;
uint8_t bus;
uint8_t dev;
uint8_t func;
uint64_t offset;
uint64_t length;
};

View File

@ -41,6 +41,12 @@ namespace Kernel
Type type;
};
struct BootModule
{
paddr_t start;
size_t size;
};
struct BootInfo
{
BAN::String command_line;
@ -48,6 +54,7 @@ namespace Kernel
RSDP rsdp {};
paddr_t kernel_paddr {};
BAN::Vector<BootModule> modules;
BAN::Vector<MemoryMapEntry> memory_map_entries;
};

View File

@ -17,6 +17,7 @@ namespace Kernel
uint32_t get_pixel(uint32_t x, uint32_t y) const;
void set_pixel(uint32_t x, uint32_t y, uint32_t rgb);
void fill(uint32_t rgb);
// positive rows -> empty pixels on bottom
// negative rows -> empty pixels on top

View File

@ -62,8 +62,6 @@ namespace Kernel
virtual BAN::RefPtr<Inode> root_inode() override { return m_root_inode; }
virtual dev_t dev() const override { return m_block_device->rdev(); };
private:
Ext2FS(BAN::RefPtr<BlockDevice> block_device)
: m_block_device(block_device)

View File

@ -46,6 +46,7 @@ namespace Kernel
virtual BAN::ErrorOr<size_t> write_impl(off_t, BAN::ConstByteSpan) override;
virtual BAN::ErrorOr<void> truncate_impl(size_t) override;
virtual BAN::ErrorOr<void> chmod_impl(mode_t) override;
virtual BAN::ErrorOr<void> chown_impl(uid_t, gid_t) override;
virtual BAN::ErrorOr<void> utimens_impl(const timespec[2]) override;
virtual BAN::ErrorOr<void> fsync_impl() override;

View File

@ -37,8 +37,6 @@ namespace Kernel
virtual BAN::RefPtr<Inode> root_inode() override { return m_root_inode; }
virtual dev_t dev() const override { return m_block_device->rdev(); };
BAN::ErrorOr<BAN::RefPtr<FATInode>> open_inode(BAN::RefPtr<FATInode> parent, const FAT::DirectoryEntry& entry, uint32_t cluster_index, uint32_t entry_index);
BAN::ErrorOr<void> inode_read_cluster(BAN::RefPtr<FATInode>, size_t index, BAN::ByteSpan buffer);
blksize_t inode_block_size(BAN::RefPtr<const FATInode>) const;

View File

@ -26,8 +26,6 @@ namespace Kernel
static BAN::ErrorOr<BAN::RefPtr<FileSystem>> from_block_device(BAN::RefPtr<BlockDevice>);
virtual BAN::RefPtr<Inode> root_inode() = 0;
virtual dev_t dev() const = 0;
};
}

View File

@ -58,7 +58,7 @@ namespace Kernel
virtual BAN::RefPtr<Inode> root_inode() override { return m_root_inode; }
virtual dev_t dev() const override { return m_rdev; }
dev_t rdev() const { return m_rdev; }
BAN::ErrorOr<BAN::RefPtr<TmpInode>> open_inode(ino_t ino);
@ -118,16 +118,8 @@ namespace Kernel
private:
InodeLocation find_inode(ino_t ino);
paddr_t find_block(size_t index);
template<TmpFuncs::for_each_indirect_paddr_allocating_callback F>
BAN::ErrorOr<void> for_each_indirect_paddr_allocating(PageInfo page_info, F callback, size_t depth);
template<TmpFuncs::for_each_indirect_paddr_allocating_callback F>
BAN::ErrorOr<BAN::Iteration> for_each_indirect_paddr_allocating_internal(PageInfo page_info, F callback, size_t depth);
paddr_t find_indirect(PageInfo root, size_t index, size_t depth);
private:
const dev_t m_rdev;
@ -146,14 +138,14 @@ namespace Kernel
static constexpr size_t max_data_pages =
(PAGE_SIZE / sizeof(PageInfo)) *
(PAGE_SIZE / sizeof(PageInfo)) *
(PAGE_SIZE / sizeof(PageInfo));
(PAGE_SIZE / sizeof(PageInfo) - 1);
// We store inodes in pages with double indirection.
// With 64-bit pointers we can store 512^2 pages of inodes
// which should be enough for now.
// In future this should be dynamically calculated based on maximum
// number of pages for this file system.
PageInfo m_inode_pages;
PageInfo m_inode_pages {};
static constexpr size_t first_inode = 1;
static constexpr size_t max_inodes =
(PAGE_SIZE / sizeof(PageInfo)) *

View File

@ -48,15 +48,21 @@ namespace Kernel
TmpInode(TmpFileSystem&, ino_t, const TmpInodeInfo&);
virtual BAN::ErrorOr<void> chmod_impl(mode_t) override;
virtual BAN::ErrorOr<void> chown_impl(uid_t, gid_t) override;
virtual BAN::ErrorOr<void> utimens_impl(const timespec[2]) override;
virtual BAN::ErrorOr<void> fsync_impl() override { return {}; }
void sync();
void free_all_blocks();
virtual BAN::ErrorOr<void> prepare_unlink() { return {}; };
void free_all_blocks();
void free_indirect_blocks(size_t block, uint32_t depth);
BAN::Optional<size_t> block_index(size_t data_block_index);
BAN::Optional<size_t> block_index_from_indirect(size_t block, size_t index, uint32_t depth);
BAN::ErrorOr<size_t> block_index_with_allocation(size_t data_block_index);
BAN::ErrorOr<size_t> block_index_from_indirect_with_allocation(size_t& block, size_t index, uint32_t depth);
protected:
TmpFileSystem& m_fs;
@ -117,10 +123,9 @@ namespace Kernel
static BAN::ErrorOr<BAN::RefPtr<TmpSymlinkInode>> create_new(TmpFileSystem&, mode_t, uid_t, gid_t, BAN::StringView target);
~TmpSymlinkInode();
BAN::ErrorOr<void> set_link_target(BAN::StringView);
protected:
virtual BAN::ErrorOr<BAN::String> link_target_impl() override;
BAN::ErrorOr<BAN::String> link_target_impl() override;
BAN::ErrorOr<void> set_link_target_impl(BAN::StringView) override;
virtual bool can_read_impl() const override { return false; }
virtual bool can_write_impl() const override { return false; }

View File

@ -0,0 +1,12 @@
#pragma once
#include <kernel/BootInfo.h>
#include <kernel/FS/FileSystem.h>
namespace Kernel
{
bool is_ustar_boot_module(const BootModule&);
BAN::ErrorOr<void> unpack_boot_module_into_filesystem(BAN::RefPtr<FileSystem>, const BootModule&);
}

View File

@ -29,9 +29,6 @@ namespace Kernel
virtual BAN::RefPtr<Inode> root_inode() override { return m_root_fs->root_inode(); }
// FIXME:
virtual dev_t dev() const override { return 0; }
BAN::ErrorOr<void> mount(const Credentials&, BAN::StringView, BAN::StringView);
BAN::ErrorOr<void> mount(const Credentials&, BAN::RefPtr<FileSystem>, BAN::StringView);

View File

@ -75,7 +75,7 @@ namespace Kernel
private:
IDT() = default;
void register_interrupt_handler(uint8_t index, void (*handler)());
void register_interrupt_handler(uint8_t index, void (*handler)(), uint8_t ist = 0);
void register_syscall_handler(uint8_t index, void (*handler)());
private:

View File

@ -27,7 +27,9 @@ namespace Kernel::Input
private:
PS2Controller() = default;
BAN::ErrorOr<void> initialize_impl(uint8_t scancode_set);
BAN::ErrorOr<void> initialize_device(uint8_t, uint8_t scancode_set);
BAN::ErrorOr<void> identify_device(uint8_t, uint8_t scancode_set);
void device_initialize_task(void*);
BAN::ErrorOr<uint8_t> read_byte();
BAN::ErrorOr<void> send_byte(uint16_t port, uint8_t byte);

View File

@ -22,6 +22,7 @@ namespace Kernel::Input
protected:
PS2Device(PS2Controller&, InputDevice::Type type);
virtual ~PS2Device();
protected:
PS2Controller& m_controller;

View File

@ -15,21 +15,21 @@ namespace Kernel
public:
// Create virtual range to fixed virtual address
static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_to_vaddr(PageTable&, vaddr_t, size_t, PageTable::flags_t flags, bool preallocate_pages);
static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_to_vaddr(PageTable&, vaddr_t, size_t, PageTable::flags_t flags, bool preallocate_pages, bool add_guard_pages);
// Create virtual range to virtual address range
static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_to_vaddr_range(PageTable&, vaddr_t vaddr_start, vaddr_t vaddr_end, size_t, PageTable::flags_t flags, bool preallocate_pages);
static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_to_vaddr_range(PageTable&, vaddr_t vaddr_start, vaddr_t vaddr_end, size_t, PageTable::flags_t flags, bool preallocate_pages, bool add_guard_pages);
~VirtualRange();
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> clone(PageTable&);
vaddr_t vaddr() const { return m_vaddr; }
size_t size() const { return m_size; }
vaddr_t vaddr() const { return m_vaddr + (m_has_guard_pages ? PAGE_SIZE : 0); }
size_t size() const { return m_size - (m_has_guard_pages ? 2 * PAGE_SIZE : 0); }
PageTable::flags_t flags() const { return m_flags; }
paddr_t paddr_of(vaddr_t vaddr) const
{
ASSERT(vaddr % PAGE_SIZE == 0);
const size_t index = (vaddr - m_vaddr) / PAGE_SIZE;
const size_t index = (vaddr - this->vaddr()) / PAGE_SIZE;
ASSERT(index < m_paddrs.size());
const paddr_t paddr = m_paddrs[index];
ASSERT(paddr);
@ -41,12 +41,13 @@ namespace Kernel
BAN::ErrorOr<void> allocate_page_for_demand_paging(vaddr_t address);
private:
VirtualRange(PageTable&, bool preallocated, vaddr_t, size_t, PageTable::flags_t);
VirtualRange(PageTable&, bool preallocated, bool has_guard_pages, vaddr_t, size_t, PageTable::flags_t);
BAN::ErrorOr<void> initialize();
private:
PageTable& m_page_table;
const bool m_preallocated;
const bool m_has_guard_pages;
const vaddr_t m_vaddr;
const size_t m_size;
const PageTable::flags_t m_flags;

View File

@ -4,7 +4,7 @@
#include <BAN/HashMap.h>
#include <BAN/UniqPtr.h>
#include <kernel/Networking/NetworkInterface.h>
#include <kernel/Process.h>
#include <kernel/Thread.h>
#include <kernel/ThreadBlocker.h>
namespace Kernel
@ -56,9 +56,9 @@ namespace Kernel
BAN::HashMap<BAN::IPv4Address, BAN::MACAddress> m_arp_table;
Process* m_process = nullptr;
Thread* m_thread { nullptr };
BAN::CircularQueue<PendingArpPacket, 128> m_pending_packets;
ThreadBlocker m_pending_thread_blocker;
ThreadBlocker m_pending_thread_blocker;
friend class BAN::UniqPtr<ARPTable>;
};

View File

@ -11,7 +11,7 @@
#include <kernel/Networking/NetworkInterface.h>
#include <kernel/Networking/NetworkLayer.h>
#include <kernel/Networking/NetworkSocket.h>
#include <kernel/Process.h>
#include <kernel/Thread.h>
namespace Kernel
{
@ -72,7 +72,7 @@ namespace Kernel
RecursiveSpinLock m_bound_socket_lock;
BAN::UniqPtr<ARPTable> m_arp_table;
Process* m_process { nullptr };
Thread* m_thread { nullptr };
static constexpr size_t pending_packet_buffer_size = 128 * PAGE_SIZE;
BAN::UniqPtr<VirtualRange> m_pending_packet_buffer;

View File

@ -6,7 +6,7 @@
#include <kernel/Memory/VirtualRange.h>
#include <kernel/Networking/NetworkInterface.h>
#include <kernel/Networking/NetworkSocket.h>
#include <kernel/Process.h>
#include <kernel/Thread.h>
#include <kernel/ThreadBlocker.h>
namespace Kernel
@ -162,7 +162,7 @@ namespace Kernel
State m_next_state { State::Closed };
uint8_t m_next_flags { 0 };
Process* m_process { nullptr };
Thread* m_thread { nullptr };
uint64_t m_time_wait_start_ms { 0 };

View File

@ -39,8 +39,6 @@ namespace Kernel
using entry_t = Thread::entry_t;
public:
static Process* create_kernel();
static Process* create_kernel(entry_t, void*);
static BAN::ErrorOr<Process*> create_userspace(const Credentials&, BAN::StringView path, BAN::Span<BAN::StringView> arguments);
~Process();
void cleanup_function(Thread*);
@ -217,8 +215,6 @@ namespace Kernel
size_t proc_cmdline(off_t offset, BAN::ByteSpan) const;
size_t proc_environ(off_t offset, BAN::ByteSpan) const;
bool is_userspace() const { return m_is_userspace; }
// Returns error if page could not be allocated
// Returns true if the page was allocated successfully
// Return false if access was page violation (segfault)
@ -331,8 +327,6 @@ namespace Kernel
BAN::Vector<BAN::String> m_cmdline;
BAN::Vector<BAN::String> m_environ;
bool m_is_userspace { false };
BAN::Vector<ChildExitStatus> m_child_exit_statuses;
ThreadBlocker m_child_exit_blocker;

View File

@ -14,33 +14,12 @@ namespace Kernel
class BaseMutex;
class Thread;
class ThreadBlocker;
struct SchedulerQueueNode;
class SchedulerQueue
{
public:
struct Node
{
Node(Thread* thread)
: thread(thread)
{}
Thread* const thread;
Node* next { nullptr };
Node* prev { nullptr };
uint64_t wake_time_ns { static_cast<uint64_t>(-1) };
ThreadBlocker* blocker { nullptr };
Node* block_chain_next { nullptr };
Node* block_chain_prev { nullptr };
ProcessorID processor_id { PROCESSOR_NONE };
bool blocked { false };
uint64_t last_start_ns { 0 };
uint64_t time_used_ns { 0 };
};
using Node = SchedulerQueueNode;
public:
void add_thread_to_back(Node*);

View File

@ -0,0 +1,35 @@
#pragma once
#include <kernel/ProcessorID.h>
#include <kernel/Lock/SpinLock.h>
namespace Kernel
{
class Thread;
class ThreadBlocker;
struct SchedulerQueueNode
{
SchedulerQueueNode(Thread* thread)
: thread(thread)
{}
Thread* const thread;
SchedulerQueueNode* next { nullptr };
SchedulerQueueNode* prev { nullptr };
uint64_t wake_time_ns { static_cast<uint64_t>(-1) };
SpinLock blocker_lock;
ThreadBlocker* blocker { nullptr };
ProcessorID processor_id { PROCESSOR_NONE };
bool blocked { false };
uint64_t last_start_ns { 0 };
uint64_t time_used_ns { 0 };
};
}

View File

@ -40,8 +40,6 @@ namespace Kernel
void select_device(bool is_secondary);
BAN::ErrorOr<DeviceType> identify(bool is_secondary, BAN::Span<uint16_t> buffer);
BAN::ErrorOr<void> block_until_irq();
uint8_t io_read(uint16_t);
void io_write(uint16_t, uint8_t);
void read_buffer(uint16_t, uint16_t*, size_t);
@ -54,7 +52,7 @@ namespace Kernel
const uint16_t m_ctrl;
Mutex m_mutex;
BAN::Atomic<bool> m_has_got_irq { false };
ThreadBlocker m_thread_blocker;
// Non-owning pointers
BAN::Vector<ATADevice*> m_devices;

View File

@ -36,7 +36,7 @@ namespace Kernel
static constexpr size_t userspace_stack_size { PAGE_SIZE * 128 };
public:
static BAN::ErrorOr<Thread*> create_kernel(entry_t, void*, Process*);
static BAN::ErrorOr<Thread*> create_kernel(entry_t, void*);
static BAN::ErrorOr<Thread*> create_userspace(Process*, PageTable&);
~Thread();

View File

@ -33,7 +33,9 @@ namespace Kernel
private:
SpinLock m_lock;
SchedulerQueue::Node* m_block_chain { nullptr };
SchedulerQueue::Node* m_block_chain[32] {};
size_t m_block_chain_length { 0 };
friend class Scheduler;
};

View File

@ -34,7 +34,7 @@ namespace Kernel
BAN::Atomic<uint32_t> m_changed_ports { 0 };
ThreadBlocker m_changed_port_blocker;
BAN::Atomic<Process*> m_port_updater { nullptr };
BAN::Atomic<Thread*> m_port_updater { nullptr };
struct PortInfo
{

View File

@ -73,7 +73,7 @@ namespace Kernel
Mutex m_mutex;
BAN::Atomic<Process*> m_port_updater { nullptr };
BAN::Atomic<Thread*> m_port_updater { nullptr };
ThreadBlocker m_port_thread_blocker;
BAN::Atomic<bool> m_port_changed { false };

View File

@ -6,6 +6,7 @@
#define MULTIBOOT2_TAG_END 0
#define MULTIBOOT2_TAG_CMDLINE 1
#define MULTIBOOT2_TAG_MODULES 3
#define MULTIBOOT2_TAG_MMAP 6
#define MULTIBOOT2_TAG_FRAMEBUFFER 8
#define MULTIBOOT2_TAG_OLD_RSDP 14
@ -33,6 +34,13 @@ struct multiboot2_cmdline_tag_t : public multiboot2_tag_t
char cmdline[];
} __attribute__((packed));
struct multiboot2_modules_tag_t : public multiboot2_tag_t
{
uint32_t mod_start;
uint32_t mod_end;
uint8_t string[];
} __attribute__((packed));
struct multiboot2_mmap_entry_t
{
uint64_t base_addr;

View File

@ -885,7 +885,10 @@ acpi_release_global_lock:
set_irq(irq);
InterruptController::get().enable_irq(irq);
Process::create_kernel([](void*) { get().acpi_event_task(); }, nullptr);
if (auto thread_or_error = Thread::create_kernel([](void*) { get().acpi_event_task(); }, nullptr); thread_or_error.is_error())
dwarnln("Failed to create ACPI thread, power button will not work: {}", thread_or_error.error());
else if (auto ret = Processor::scheduler().add_thread(thread_or_error.value()); ret.is_error())
dwarnln("Failed to create ACPI thread, power button will not work: {}", ret.error());
}
dprintln("Initialized ACPI interrupts");

View File

@ -1,3 +1,10 @@
// FIXME: Rewrite aml interpreter to not be recursive.
// Not inlining TRYs drops our stack usage a ton...
#pragma GCC push_options
#pragma GCC optimize "no-inline"
#include <BAN/Errors.h>
#pragma GCC pop_options
#include <BAN/Assert.h>
#include <BAN/String.h>
@ -75,7 +82,12 @@ namespace Kernel::ACPI::AML
);
return BAN::Error::from_errno(EINVAL);
}
name.parts[i] = aml_data.as<const uint32_t>();
name.parts[i] =
static_cast<uint32_t>(aml_data[0] << 0) |
static_cast<uint32_t>(aml_data[1] << 8) |
static_cast<uint32_t>(aml_data[2] << 16) |
static_cast<uint32_t>(aml_data[3] << 24);
aml_data = aml_data.slice(4);
}
@ -377,9 +389,6 @@ namespace Kernel::ACPI::AML
return result;
}
// FIXME: WHY TF IS THIS USING OVER 1 KiB of stack
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstack-usage="
static BAN::ErrorOr<Node> parse_logical_op(ParseContext& context)
{
dprintln_if(AML_DUMP_FUNCTION_CALLS, "parse_logical_op");
@ -470,7 +479,6 @@ namespace Kernel::ACPI::AML
return result;
}
#pragma GCC diagnostic pop
static BAN::ErrorOr<Node> parse_index_op(ParseContext& context);
@ -746,8 +754,6 @@ namespace Kernel::ACPI::AML
return {};
}
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstack-usage="
static BAN::ErrorOr<void> perform_store(const Node& source, Reference* target, TargetType target_type)
{
dprintln_if(AML_DUMP_FUNCTION_CALLS, "perform_store");
@ -828,7 +834,6 @@ namespace Kernel::ACPI::AML
return {};
}
#pragma GCC diagnostic pop
static BAN::ErrorOr<void> store_into_target(ParseContext& context, const Node& node)
{
@ -1235,7 +1240,7 @@ namespace Kernel::ACPI::AML
}
ASSERT(object);
return TRY(sizeof_impl(object->node));
return sizeof_impl(object->node);
}
static BAN::ErrorOr<Node> derefof_impl(const Node& source)
@ -1243,7 +1248,7 @@ namespace Kernel::ACPI::AML
switch (source.type)
{
case Node::Type::Reference:
return TRY(source.as.reference->node.copy());
return source.as.reference->node.copy();
case Node::Type::Index:
{
switch (source.as.index.type)
@ -1262,7 +1267,7 @@ namespace Kernel::ACPI::AML
{
ASSERT(source.as.index.index < source.as.index.as.package->num_elements);
TRY(resolve_package_element(source.as.index.as.package->elements[source.as.index.index], true));
return TRY(source.as.index.as.package->elements[source.as.index.index].value.node->copy());
return source.as.index.as.package->elements[source.as.index.index].value.node->copy();
}
default: ASSERT_NOT_REACHED();
}
@ -1544,9 +1549,6 @@ namespace Kernel::ACPI::AML
return result;
}
// FIXME: WHY TF IS THIS USING OVER 1 KiB of stack
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstack-usage="
static BAN::ErrorOr<Node> parse_explicit_conversion(ParseContext& context)
{
dprintln_if(AML_DUMP_FUNCTION_CALLS, "parse_explicit_conversion");
@ -1699,7 +1701,6 @@ namespace Kernel::ACPI::AML
return result;
}
#pragma GCC diagnostic pop
static BAN::ErrorOr<Node> parse_to_string_op(ParseContext& context)
{
@ -2001,7 +2002,6 @@ namespace Kernel::ACPI::AML
return {};
}
static BAN::ErrorOr<Node> parse_wait_op(ParseContext& context)
{
dprintln_if(AML_DUMP_FUNCTION_CALLS, "parse_wait_op");
@ -2617,7 +2617,7 @@ namespace Kernel::ACPI::AML
case Node::Type::Buffer:
case Node::Type::Index:
case Node::Type::Reference:
return TRY(node.copy());
return node.copy();
case Node::Type::BufferField:
dwarnln("TODO: evaluate BufferField");
return BAN::Error::from_errno(ENOTSUP);
@ -2626,7 +2626,7 @@ namespace Kernel::ACPI::AML
case Node::Type::Method:
if (node.as.method.arg_count != 0)
return BAN::Error::from_errno(EFAULT);
return TRY(method_call(node_path, node, BAN::Array<Reference*, 7>{}));
return method_call(node_path, node, BAN::Array<Reference*, 7>{});
}
dwarnln("evaluate {}", node);
@ -2761,9 +2761,6 @@ namespace Kernel::ACPI::AML
return method_call(scope, method, BAN::move(args));
}
// FIXME: WHY TF IS THIS USING OVER 2 KiB of stack
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstack-usage="
BAN::ErrorOr<Node> parse_node(ParseContext& context, bool return_ref)
{
if (context.aml_data.empty())
@ -2778,15 +2775,15 @@ namespace Kernel::ACPI::AML
switch (static_cast<AML::ExtOp>(opcode))
{
case AML::ExtOp::CondRefOfOp:
return TRY(parse_condrefof_op(context));
return parse_condrefof_op(context);
case AML::ExtOp::AcquireOp:
return TRY(parse_acquire_op(context));
return parse_acquire_op(context);
case AML::ExtOp::LoadOp:
return TRY(parse_load_op(context));
return parse_load_op(context);
case AML::ExtOp::TimerOp:
return TRY(parse_timer_op(context));
return parse_timer_op(context);
case AML::ExtOp::WaitOp:
return TRY(parse_wait_op(context));
return parse_wait_op(context);
case AML::ExtOp::DebugOp:
{
context.aml_data = context.aml_data.slice(2);
@ -2812,46 +2809,46 @@ namespace Kernel::ACPI::AML
case AML::Byte::WordPrefix:
case AML::Byte::DWordPrefix:
case AML::Byte::QWordPrefix:
return TRY(parse_integer(context.aml_data));
return parse_integer(context.aml_data);
case AML::Byte::StringPrefix:
return TRY(parse_string(context.aml_data));
return parse_string(context.aml_data);
case AML::Byte::BufferOp:
return TRY(parse_buffer_op(context));
return parse_buffer_op(context);
case AML::Byte::PackageOp:
case AML::Byte::VarPackageOp:
return TRY(parse_package_op(context));
return parse_package_op(context);
case AML::Byte::SizeOfOp:
return TRY(parse_sizeof_op(context));
return parse_sizeof_op(context);
case AML::Byte::RefOfOp:
return TRY(parse_refof_op(context));
return parse_refof_op(context);
case AML::Byte::DerefOfOp:
return TRY(parse_derefof_op(context));
return parse_derefof_op(context);
case AML::Byte::StoreOp:
return TRY(parse_store_op(context));
return parse_store_op(context);
case AML::Byte::CopyObjectOp:
return TRY(parse_copy_object_op(context));
return parse_copy_object_op(context);
case AML::Byte::ConcatOp:
return TRY(parse_concat_op(context));
return parse_concat_op(context);
case AML::Byte::MidOp:
return TRY(parse_mid_op(context));
return parse_mid_op(context);
case AML::Byte::IndexOp:
return TRY(parse_index_op(context));
return parse_index_op(context);
case AML::Byte::ObjectTypeOp:
return TRY(parse_object_type_op(context));
return parse_object_type_op(context);
case AML::Byte::MatchOp:
return TRY(parse_match_op(context));
return parse_match_op(context);
case AML::Byte::ToBufferOp:
case AML::Byte::ToDecimalStringOp:
case AML::Byte::ToHexStringOp:
case AML::Byte::ToIntegerOp:
return TRY(parse_explicit_conversion(context));
return parse_explicit_conversion(context);
case AML::Byte::ToStringOp:
return TRY(parse_to_string_op(context));
return parse_to_string_op(context);
case AML::Byte::IncrementOp:
case AML::Byte::DecrementOp:
return TRY(parse_inc_dec_op(context));
return parse_inc_dec_op(context);
case AML::Byte::NotOp:
return TRY(parse_unary_integer_op(context));
return parse_unary_integer_op(context);
case AML::Byte::AddOp:
case AML::Byte::SubtractOp:
case AML::Byte::MultiplyOp:
@ -2864,17 +2861,17 @@ namespace Kernel::ACPI::AML
case AML::Byte::NorOp:
case AML::Byte::XorOp:
case AML::Byte::ModOp:
return TRY(parse_binary_integer_op(context));
return parse_binary_integer_op(context);
case AML::Byte::LAndOp:
case AML::Byte::LEqualOp:
case AML::Byte::LGreaterOp:
case AML::Byte::LLessOp:
case AML::Byte::LNotOp:
case AML::Byte::LOrOp:
return TRY(parse_logical_op(context));
return parse_logical_op(context);
case AML::Byte::FindSetLeftBitOp:
case AML::Byte::FindSetRightBitOp:
return TRY(parse_find_set_bit_op(context));
return parse_find_set_bit_op(context);
case AML::Byte::Local0:
case AML::Byte::Local1:
case AML::Byte::Local2:
@ -2892,7 +2889,7 @@ namespace Kernel::ACPI::AML
return BAN::Error::from_errno(EINVAL);
}
if (!return_ref)
return TRY(context.locals[local_index]->node.copy());
return context.locals[local_index]->node.copy();
Node reference;
reference.type = Node::Type::Reference;
reference.as.reference = context.locals[local_index];
@ -2915,7 +2912,7 @@ namespace Kernel::ACPI::AML
return BAN::Error::from_errno(EINVAL);
}
if (!return_ref)
return TRY(context.args[arg_index]->node.copy());
return context.args[arg_index]->node.copy();
Node reference;
reference.type = Node::Type::Reference;
reference.as.reference = context.args[arg_index];
@ -2962,11 +2959,11 @@ namespace Kernel::ACPI::AML
}
}
return TRY(method_call(BAN::move(object_scope), named_object->node, BAN::move(args), context.call_depth));
return method_call(BAN::move(object_scope), named_object->node, BAN::move(args), context.call_depth);
}
if (!return_ref)
return TRY(named_object->node.copy());
return named_object->node.copy();
Node reference;
reference.type = Node::Type::Reference;
@ -2974,20 +2971,13 @@ namespace Kernel::ACPI::AML
reference.as.reference->ref_count++;
return reference;
}
#pragma GCC diagnostic pop
// FIXME: WHY TF IS THIS USING ALMOST 2 KiB of stack
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstack-usage="
BAN::ErrorOr<ExecutionFlowResult> parse_node_or_execution_flow(ParseContext& context)
{
if (context.aml_data.empty())
return BAN::Error::from_errno(ENODATA);
auto dummy_return = ExecutionFlowResult {
.elem1 = ExecutionFlow::Normal,
.elem2 = BAN::Optional<Node>(),
};
BAN::ErrorOr<void> (*function)(ParseContext&) = nullptr;
if (context.aml_data[0] == static_cast<uint8_t>(AML::Byte::ExtOpPrefix))
{
@ -2996,116 +2986,130 @@ namespace Kernel::ACPI::AML
switch (static_cast<AML::ExtOp>(context.aml_data[1]))
{
case AML::ExtOp::MutexOp:
TRY(parse_mutex_op(context));
return dummy_return;
function = parse_mutex_op;
break;
case AML::ExtOp::FatalOp:
TRY(parse_fatal_op(context));
return dummy_return;
function = parse_fatal_op;
break;
case AML::ExtOp::EventOp:
TRY(parse_event_op(context));
return dummy_return;
function = parse_event_op;
break;
case AML::ExtOp::ResetOp:
case AML::ExtOp::SignalOp:
TRY(parse_reset_signal_op(context));
return dummy_return;
function = parse_reset_signal_op;
break;
case AML::ExtOp::CreateFieldOp:
TRY(parse_createfield_op(context));
return dummy_return;
function = parse_createfield_op;
break;
case AML::ExtOp::SleepOp:
TRY(parse_sleep_op(context));
return dummy_return;
function = parse_sleep_op;
break;
case AML::ExtOp::StallOp:
TRY(parse_stall_op(context));
return dummy_return;
function = parse_stall_op;
break;
case AML::ExtOp::ReleaseOp:
TRY(parse_release_op(context));
return dummy_return;
function = parse_release_op;
break;
case AML::ExtOp::OpRegionOp:
TRY(parse_opregion_op(context));
return dummy_return;
function = parse_opregion_op;
break;
case AML::ExtOp::FieldOp:
TRY(parse_field_op(context));
return dummy_return;
function = parse_field_op;
break;
case AML::ExtOp::IndexFieldOp:
TRY(parse_index_field_op(context));
return dummy_return;
function = parse_index_field_op;
break;
case AML::ExtOp::BankFieldOp:
TRY(parse_bank_field_op(context));
return dummy_return;
function = parse_bank_field_op;
break;
case AML::ExtOp::DeviceOp:
TRY(parse_device_op(context));
return dummy_return;
function = parse_device_op;
break;
case AML::ExtOp::ProcessorOp:
TRY(parse_processor_op(context));
return dummy_return;
function = parse_processor_op;
break;
case AML::ExtOp::PowerResOp:
TRY(parse_power_resource_op(context));
return dummy_return;
function = parse_power_resource_op;
break;
case AML::ExtOp::ThermalZoneOp:
TRY(parse_thermal_zone_op(context));
return dummy_return;
function = parse_thermal_zone_op;
break;
default:
break;
}
}
else
{
switch (static_cast<AML::Byte>(context.aml_data[0]))
{
case AML::Byte::AliasOp:
function = parse_alias_op;
break;
case AML::Byte::NameOp:
function = parse_name_op;
break;
case AML::Byte::MethodOp:
function = parse_method_op;
break;
case AML::Byte::ScopeOp:
function = parse_scope_op;
break;
case AML::Byte::NotifyOp:
function = parse_notify_op;
break;
case AML::Byte::CreateBitFieldOp:
case AML::Byte::CreateByteFieldOp:
case AML::Byte::CreateWordFieldOp:
case AML::Byte::CreateDWordFieldOp:
case AML::Byte::CreateQWordFieldOp:
function = parse_createfield_op;
break;
case AML::Byte::IfOp:
return parse_if_op(context);
case AML::Byte::WhileOp:
return parse_while_op(context);
case AML::Byte::NoopOp:
case AML::Byte::BreakPointOp:
context.aml_data = context.aml_data.slice(1);
return ExecutionFlowResult {
.elem1 = ExecutionFlow::Normal,
.elem2 = BAN::Optional<Node>(),
};;
case AML::Byte::BreakOp:
dprintln_if(AML_DUMP_FUNCTION_CALLS, "parse_break_op");
context.aml_data = context.aml_data.slice(1);
return ExecutionFlowResult {
.elem1 = ExecutionFlow::Break,
.elem2 = BAN::Optional<Node>(),
};
case AML::Byte::ContinueOp:
dprintln_if(AML_DUMP_FUNCTION_CALLS, "parse_continue_op");
context.aml_data = context.aml_data.slice(1);
return ExecutionFlowResult {
.elem1 = ExecutionFlow::Continue,
.elem2 = BAN::Optional<Node>(),
};
case AML::Byte::ReturnOp:
{
dprintln_if(AML_DUMP_FUNCTION_CALLS, "parse_return_op");
context.aml_data = context.aml_data.slice(1);
return ExecutionFlowResult {
.elem1 = ExecutionFlow::Return,
.elem2 = TRY(parse_node(context)),
};
}
default:
break;
}
}
switch (static_cast<AML::Byte>(context.aml_data[0]))
if (function)
{
case AML::Byte::AliasOp:
TRY(parse_alias_op(context));
return dummy_return;
case AML::Byte::NameOp:
TRY(parse_name_op(context));
return dummy_return;
case AML::Byte::MethodOp:
TRY(parse_method_op(context));
return dummy_return;
case AML::Byte::NoopOp:
case AML::Byte::BreakPointOp:
context.aml_data = context.aml_data.slice(1);
return dummy_return;
case AML::Byte::ScopeOp:
TRY(parse_scope_op(context));
return dummy_return;
case AML::Byte::NotifyOp:
TRY(parse_notify_op(context));
return dummy_return;
case AML::Byte::CreateBitFieldOp:
case AML::Byte::CreateByteFieldOp:
case AML::Byte::CreateWordFieldOp:
case AML::Byte::CreateDWordFieldOp:
case AML::Byte::CreateQWordFieldOp:
TRY(parse_createfield_op(context));
return dummy_return;
case AML::Byte::IfOp:
return parse_if_op(context);
case AML::Byte::WhileOp:
return parse_while_op(context);
case AML::Byte::BreakOp:
dprintln_if(AML_DUMP_FUNCTION_CALLS, "parse_break_op");
context.aml_data = context.aml_data.slice(1);
return ExecutionFlowResult {
.elem1 = ExecutionFlow::Break,
.elem2 = BAN::Optional<Node>(),
};
case AML::Byte::ContinueOp:
dprintln_if(AML_DUMP_FUNCTION_CALLS, "parse_continue_op");
context.aml_data = context.aml_data.slice(1);
return ExecutionFlowResult {
.elem1 = ExecutionFlow::Continue,
.elem2 = BAN::Optional<Node>(),
};
case AML::Byte::ReturnOp:
{
dprintln_if(AML_DUMP_FUNCTION_CALLS, "parse_return_op");
context.aml_data = context.aml_data.slice(1);
return ExecutionFlowResult {
.elem1 = ExecutionFlow::Return,
.elem2 = TRY(parse_node(context)),
};
}
default:
break;
TRY(function(context));
return ExecutionFlowResult {
.elem1 = ExecutionFlow::Normal,
.elem2 = BAN::Optional<Node>(),
};;
}
auto node = TRY(parse_node(context));
@ -3114,7 +3118,6 @@ namespace Kernel::ACPI::AML
.elem2 = BAN::move(node)
};
}
#pragma GCC diagnostic pop
BAN::ErrorOr<NameString> NameString::from_string(BAN::StringView name)
{

View File

@ -1,3 +1,9 @@
// FIXME: Find better ways to manage stack usage
#pragma GCC push_options
#pragma GCC optimize "no-inline"
#include <BAN/Errors.h>
#pragma GCC pop_options
#include <kernel/ACPI/AML/Bytes.h>
#include <kernel/ACPI/AML/Namespace.h>
#include <kernel/ACPI/AML/OpRegion.h>
@ -92,6 +98,39 @@ namespace Kernel::ACPI::AML
opregion.as.opregion.offset = region_offset.as.integer.value;
opregion.as.opregion.length = region_length.as.integer.value;
opregion.as.opregion.seg = 0;
opregion.as.opregion.bus = 0;
opregion.as.opregion.dev = 0;
opregion.as.opregion.func = 0;
if (opregion.as.opregion.address_space == GAS::AddressSpaceID::PCIConfig)
{
// FIXME: Am I actually allowed to read these here or should I determine
// them on every read/write access
if (auto seg_res = TRY(Namespace::root_namespace().find_named_object(context.scope, TRY(AML::NameString::from_string("_SEG"_sv)))); seg_res.node != nullptr)
{
auto seg_node = TRY(convert_node(TRY(evaluate_node(seg_res.path, seg_res.node->node)), ConvInteger, -1));
opregion.as.opregion.seg = seg_node.as.integer.value;
}
if (auto bbn_res = TRY(Namespace::root_namespace().find_named_object(context.scope, TRY(AML::NameString::from_string("_BBN"_sv)))); bbn_res.node != nullptr)
{
auto bbn_node = TRY(convert_node(TRY(evaluate_node(bbn_res.path, bbn_res.node->node)), ConvInteger, -1));
opregion.as.opregion.bus = bbn_node.as.integer.value;
}
auto adr_res = TRY(Namespace::root_namespace().find_named_object(context.scope, TRY(AML::NameString::from_string("_ADR"_sv))));
if (adr_res.node == nullptr)
{
dwarnln("No _ADR for PCIConfig OpRegion");
return BAN::Error::from_errno(EFAULT);
}
auto adr_node = TRY(convert_node(TRY(evaluate_node(adr_res.path, adr_res.node->node)), ConvInteger, -1));
opregion.as.opregion.dev = adr_node.as.integer.value >> 16;
opregion.as.opregion.func = adr_node.as.integer.value & 0xFF;
}
TRY(Namespace::root_namespace().add_named_object(context, region_name, BAN::move(opregion)));
return {};
@ -416,19 +455,19 @@ namespace Kernel::ACPI::AML
ASSERT_NOT_REACHED();
case GAS::AddressSpaceID::PCIConfig:
{
// https://uefi.org/htmlspecs/ACPI_Spec_6_4_html/05_ACPI_Software_Programming_Model/ACPI_Software_Programming_Model.html#address-space-format
// PCI configuration space is confined to segment 0, bus 0
if (opregion.seg != 0)
{
dwarnln("PCIConfig OpRegion with segment");
return BAN::Error::from_errno(ENOTSUP);
}
const uint16_t device = (byte_offset >> 32) & 0xFFFF;
const uint16_t function = (byte_offset >> 16) & 0xFFFF;
const uint16_t offset = byte_offset & 0xFFFF;
switch (access_size)
{
case 1: return PCI::PCIManager::get().read_config_byte (0, device, function, offset);
case 2: return PCI::PCIManager::get().read_config_word (0, device, function, offset);
case 4: return PCI::PCIManager::get().read_config_dword(0, device, function, offset);
case 1: return PCI::PCIManager::get().read_config_byte (opregion.bus, opregion.dev, opregion.func, byte_offset);
case 2: return PCI::PCIManager::get().read_config_word (opregion.bus, opregion.dev, opregion.func, byte_offset);
case 4: return PCI::PCIManager::get().read_config_dword(opregion.bus, opregion.dev, opregion.func, byte_offset);
default:
dwarnln("{} byte read from PCI {2H}:{2H}:{2H}", device, function, offset);
dwarnln("{} byte read from PCI {2H}:{2H}:{2H} offset {2H}", access_size, opregion.bus, opregion.dev, opregion.func, byte_offset);
return BAN::Error::from_errno(EINVAL);
}
ASSERT_NOT_REACHED();
@ -486,19 +525,19 @@ namespace Kernel::ACPI::AML
return {};
case GAS::AddressSpaceID::PCIConfig:
{
// https://uefi.org/htmlspecs/ACPI_Spec_6_4_html/05_ACPI_Software_Programming_Model/ACPI_Software_Programming_Model.html#address-space-format
// PCI configuration space is confined to segment 0, bus 0
if (opregion.seg != 0)
{
dwarnln("PCIConfig OpRegion with segment");
return BAN::Error::from_errno(ENOTSUP);
}
const uint16_t device = (byte_offset >> 32) & 0xFFFF;
const uint16_t function = (byte_offset >> 16) & 0xFFFF;
const uint16_t offset = byte_offset & 0xFFFF;
switch (access_size)
{
case 1: PCI::PCIManager::get().write_config_byte (0, device, function, offset, value); break;
case 2: PCI::PCIManager::get().write_config_word (0, device, function, offset, value); break;
case 4: PCI::PCIManager::get().write_config_dword(0, device, function, offset, value); break;
case 1: PCI::PCIManager::get().write_config_byte (opregion.bus, opregion.dev, opregion.func, byte_offset, value); break;
case 2: PCI::PCIManager::get().write_config_word (opregion.bus, opregion.dev, opregion.func, byte_offset, value); break;
case 4: PCI::PCIManager::get().write_config_dword(opregion.bus, opregion.dev, opregion.func, byte_offset, value); break;
default:
dwarnln("{} byte write to PCI {2H}:{2H}:{2H}", device, function, offset);
dwarnln("{} byte write to PCI {2H}:{2H}:{2H} offset {2H}", access_size, opregion.bus, opregion.dev, opregion.func, byte_offset);
return BAN::Error::from_errno(EINVAL);
}
return {};

View File

@ -540,8 +540,6 @@ namespace Kernel
// this is a hack to allow direct GSI reservation
BAN::ErrorOr<uint8_t> APIC::reserve_gsi(uint32_t gsi)
{
dwarnln("TRYING TO RESERVE GSI {}", gsi);
size_t irq = 0;
for (; irq < 0x100; irq++)
if (m_irq_overrides[irq] == gsi)
@ -553,8 +551,6 @@ namespace Kernel
return BAN::Error::from_errno(ENOTSUP);
}
dwarnln(" matches IRQ {}", irq);
TRY(reserve_irq(irq));
return irq;

View File

@ -26,59 +26,73 @@ namespace Kernel
for (const auto* tag = multiboot2_info.tags; tag->type != MULTIBOOT2_TAG_END; tag = tag->next())
{
if (tag->type == MULTIBOOT2_TAG_CMDLINE)
switch (tag->type)
{
const auto& command_line_tag = *static_cast<const multiboot2_cmdline_tag_t*>(tag);
MUST(g_boot_info.command_line.append(command_line_tag.cmdline));
}
else if (tag->type == MULTIBOOT2_TAG_FRAMEBUFFER)
{
const auto& framebuffer_tag = *static_cast<const multiboot2_framebuffer_tag_t*>(tag);
g_boot_info.framebuffer.address = framebuffer_tag.framebuffer_addr;
g_boot_info.framebuffer.pitch = framebuffer_tag.framebuffer_pitch;
g_boot_info.framebuffer.width = framebuffer_tag.framebuffer_width;
g_boot_info.framebuffer.height = framebuffer_tag.framebuffer_height;
g_boot_info.framebuffer.bpp = framebuffer_tag.framebuffer_bpp;
if (framebuffer_tag.framebuffer_type == MULTIBOOT2_FRAMEBUFFER_TYPE_RGB)
g_boot_info.framebuffer.type = FramebufferInfo::Type::RGB;
else if (framebuffer_tag.framebuffer_type == MULTIBOOT2_FRAMEBUFFER_TYPE_TEXT)
g_boot_info.framebuffer.type = FramebufferInfo::Type::Text;
else
g_boot_info.framebuffer.type = FramebufferInfo::Type::Unknown;
}
else if (tag->type == MULTIBOOT2_TAG_MMAP)
{
const auto& mmap_tag = *static_cast<const multiboot2_mmap_tag_t*>(tag);
const size_t entry_count = (mmap_tag.size - sizeof(multiboot2_mmap_tag_t)) / mmap_tag.entry_size;
MUST(g_boot_info.memory_map_entries.resize(entry_count));
for (size_t i = 0; i < entry_count; i++)
case MULTIBOOT2_TAG_CMDLINE:
{
const auto& mmap_entry = *reinterpret_cast<const multiboot2_mmap_entry_t*>(reinterpret_cast<uintptr_t>(tag) + sizeof(multiboot2_mmap_tag_t) + i * mmap_tag.entry_size);
dprintln("entry {16H} {16H} {8H}",
(uint64_t)mmap_entry.base_addr,
(uint64_t)mmap_entry.length,
(uint64_t)mmap_entry.type
);
g_boot_info.memory_map_entries[i].address = mmap_entry.base_addr;
g_boot_info.memory_map_entries[i].length = mmap_entry.length;
g_boot_info.memory_map_entries[i].type = bios_number_to_memory_type(mmap_entry.type);
const auto& command_line_tag = *static_cast<const multiboot2_cmdline_tag_t*>(tag);
MUST(g_boot_info.command_line.append(command_line_tag.cmdline));
break;
}
}
else if (tag->type == MULTIBOOT2_TAG_OLD_RSDP)
{
if (g_boot_info.rsdp.length == 0)
case MULTIBOOT2_TAG_MODULES:
{
memcpy(&g_boot_info.rsdp, static_cast<const multiboot2_rsdp_tag_t*>(tag)->data, 20);
g_boot_info.rsdp.length = 20;
const auto& modules_tag = *static_cast<const multiboot2_modules_tag_t*>(tag);
MUST(g_boot_info.modules.emplace_back(modules_tag.mod_start, modules_tag.mod_end - modules_tag.mod_start));
break;
}
case MULTIBOOT2_TAG_FRAMEBUFFER:
{
const auto& framebuffer_tag = *static_cast<const multiboot2_framebuffer_tag_t*>(tag);
g_boot_info.framebuffer.address = framebuffer_tag.framebuffer_addr;
g_boot_info.framebuffer.pitch = framebuffer_tag.framebuffer_pitch;
g_boot_info.framebuffer.width = framebuffer_tag.framebuffer_width;
g_boot_info.framebuffer.height = framebuffer_tag.framebuffer_height;
g_boot_info.framebuffer.bpp = framebuffer_tag.framebuffer_bpp;
if (framebuffer_tag.framebuffer_type == MULTIBOOT2_FRAMEBUFFER_TYPE_RGB)
g_boot_info.framebuffer.type = FramebufferInfo::Type::RGB;
else if (framebuffer_tag.framebuffer_type == MULTIBOOT2_FRAMEBUFFER_TYPE_TEXT)
g_boot_info.framebuffer.type = FramebufferInfo::Type::Text;
else
g_boot_info.framebuffer.type = FramebufferInfo::Type::Unknown;
break;
}
case MULTIBOOT2_TAG_MMAP:
{
const auto& mmap_tag = *static_cast<const multiboot2_mmap_tag_t*>(tag);
const size_t entry_count = (mmap_tag.size - sizeof(multiboot2_mmap_tag_t)) / mmap_tag.entry_size;
MUST(g_boot_info.memory_map_entries.resize(entry_count));
for (size_t i = 0; i < entry_count; i++)
{
const auto& mmap_entry = *reinterpret_cast<const multiboot2_mmap_entry_t*>(reinterpret_cast<uintptr_t>(tag) + sizeof(multiboot2_mmap_tag_t) + i * mmap_tag.entry_size);
dprintln("entry {16H} {16H} {8H}",
(uint64_t)mmap_entry.base_addr,
(uint64_t)mmap_entry.length,
(uint64_t)mmap_entry.type
);
g_boot_info.memory_map_entries[i].address = mmap_entry.base_addr;
g_boot_info.memory_map_entries[i].length = mmap_entry.length;
g_boot_info.memory_map_entries[i].type = bios_number_to_memory_type(mmap_entry.type);
}
break;
}
case MULTIBOOT2_TAG_OLD_RSDP:
{
if (g_boot_info.rsdp.length == 0)
{
memcpy(&g_boot_info.rsdp, static_cast<const multiboot2_rsdp_tag_t*>(tag)->data, 20);
g_boot_info.rsdp.length = 20;
}
break;
}
case MULTIBOOT2_TAG_NEW_RSDP:
{
const auto& rsdp = *reinterpret_cast<const RSDP*>(static_cast<const multiboot2_rsdp_tag_t*>(tag)->data);
memcpy(&g_boot_info.rsdp, &rsdp, BAN::Math::min<uint32_t>(rsdp.length, sizeof(g_boot_info.rsdp)));
break;
}
}
else if (tag->type == MULTIBOOT2_TAG_NEW_RSDP)
{
const auto& rsdp = *reinterpret_cast<const RSDP*>(static_cast<const multiboot2_rsdp_tag_t*>(tag)->data);
memcpy(&g_boot_info.rsdp, &rsdp, BAN::Math::min<uint32_t>(rsdp.length, sizeof(g_boot_info.rsdp)));
}
}

View File

@ -78,7 +78,7 @@ namespace Kernel
KERNEL_OFFSET, UINTPTR_MAX,
BAN::Math::div_round_up<size_t>(m_width * m_height * (BANAN_FB_BPP / 8), PAGE_SIZE) * PAGE_SIZE,
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true
true, false
));
return {};
@ -128,22 +128,24 @@ namespace Kernel
uint32_t FramebufferDevice::get_pixel(uint32_t x, uint32_t y) const
{
ASSERT(x < m_width && y < m_height);
const auto* video_buffer_u8 = reinterpret_cast<const uint8_t*>(m_video_buffer->vaddr());
return (video_buffer_u8[(y * m_width + x) * (BANAN_FB_BPP / 8) + 0] << 0)
| (video_buffer_u8[(y * m_width + x) * (BANAN_FB_BPP / 8) + 1] << 8)
| (video_buffer_u8[(y * m_width + x) * (BANAN_FB_BPP / 8) + 2] << 16);
static_assert(BANAN_FB_BPP == 32);
return reinterpret_cast<uint32_t*>(m_video_buffer->vaddr())[y * m_width + x];
}
void FramebufferDevice::set_pixel(uint32_t x, uint32_t y, uint32_t rgb)
{
if (x >= m_width || y >= m_height)
return;
auto* video_buffer_u8 = reinterpret_cast<uint8_t*>(m_video_buffer->vaddr());
video_buffer_u8[(y * m_width + x) * (BANAN_FB_BPP / 8) + 0] = rgb >> 0;
video_buffer_u8[(y * m_width + x) * (BANAN_FB_BPP / 8) + 1] = rgb >> 8;
video_buffer_u8[(y * m_width + x) * (BANAN_FB_BPP / 8) + 2] = rgb >> 16;
if constexpr(BANAN_FB_BPP == 32)
video_buffer_u8[(y * m_width + x) * (BANAN_FB_BPP / 8) + 3] = rgb >> 24;
static_assert(BANAN_FB_BPP == 32);
reinterpret_cast<uint32_t*>(m_video_buffer->vaddr())[y * m_width + x] = rgb;
}
void FramebufferDevice::fill(uint32_t rgb)
{
static_assert(BANAN_FB_BPP == 32);
auto* video_buffer_u32 = reinterpret_cast<uint32_t*>(m_video_buffer->vaddr());
for (uint32_t i = 0; i < m_width * m_height; i++)
video_buffer_u32[i] = rgb;
}
void FramebufferDevice::scroll(int32_t rows, uint32_t rgb)

View File

@ -45,7 +45,7 @@ namespace Kernel
void DevFileSystem::initialize_device_updater()
{
Process::create_kernel(
auto* updater_thread = MUST(Thread::create_kernel(
[](void* _devfs)
{
auto* devfs = static_cast<DevFileSystem*>(_devfs);
@ -59,44 +59,39 @@ namespace Kernel
SystemTimer::get().sleep_ms(10);
}
}, s_instance
);
));
MUST(Processor::scheduler().add_thread(updater_thread));
auto* sync_process = Process::create_kernel();
sync_process->add_thread(MUST(Thread::create_kernel(
auto* disk_sync_thread = MUST(Thread::create_kernel(
[](void* _devfs)
{
auto* devfs = static_cast<DevFileSystem*>(_devfs);
constexpr uint64_t sync_interval_ms = 10'000;
uint64_t next_sync_ms { sync_interval_ms };
while (true)
{
LockGuard _(devfs->m_device_lock);
while (!devfs->m_should_sync)
devfs->m_sync_thread_blocker.block_indefinite(&devfs->m_device_lock);
{
const uint64_t current_ms = SystemTimer::get().ms_since_boot();
if (devfs->m_should_sync || current_ms >= next_sync_ms)
break;
devfs->m_sync_thread_blocker.block_with_timeout_ms(next_sync_ms - current_ms, &devfs->m_device_lock);
}
for (auto& device : devfs->m_devices)
if (device->is_storage_device())
if (auto ret = static_cast<StorageDevice*>(device.ptr())->sync_disk_cache(); ret.is_error())
dwarnln("disk sync: {}", ret.error());
next_sync_ms = SystemTimer::get().ms_since_boot() + sync_interval_ms;
devfs->m_should_sync = false;
devfs->m_sync_done.unblock();
}
}, s_instance, sync_process
)));
sync_process->add_thread(MUST(Kernel::Thread::create_kernel(
[](void* _devfs)
{
auto* devfs = static_cast<DevFileSystem*>(_devfs);
while (true)
{
SystemTimer::get().sleep_ms(10'000);
devfs->initiate_sync(false);
}
}, s_instance, sync_process
)));
sync_process->register_to_scheduler();
}, s_instance
));
MUST(Processor::scheduler().add_thread(disk_sync_thread));
}
void DevFileSystem::initiate_sync(bool should_block)

View File

@ -289,6 +289,26 @@ namespace Kernel
return {};
}
BAN::ErrorOr<void> Ext2Inode::chown_impl(uid_t uid, gid_t gid)
{
if (m_inode.uid == uid && m_inode.gid == gid)
return {};
const auto old_uid = m_inode.uid;
const auto old_gid = m_inode.gid;
m_inode.uid = uid;
m_inode.gid = gid;
if (auto ret = sync(); ret.is_error())
{
m_inode.uid = old_uid;
m_inode.gid = old_gid;
return ret.release_error();
}
return {};
}
BAN::ErrorOr<void> Ext2Inode::utimens_impl(const timespec times[2])
{
const uint32_t old_times[2] {

View File

@ -2,6 +2,7 @@
#include <kernel/Lock/LockGuard.h>
#include <ctype.h>
#include <sys/statvfs.h>
namespace Kernel
{
@ -73,7 +74,7 @@ namespace Kernel
fsfilcnt_t FATFS::ffree() const { return 0; } // FIXME
fsfilcnt_t FATFS::favail() const { return 0; } // FIXME
unsigned long FATFS::fsid() const { return m_type == Type::FAT32 ? m_bpb.ext_32.volume_id : m_bpb.ext_12_16.volume_id; }
unsigned long FATFS::flag() const { return 0; }
unsigned long FATFS::flag() const { return ST_RDONLY; }
unsigned long FATFS::namemax() const { return 255; }
BAN::ErrorOr<BAN::RefPtr<FATFS>> FATFS::create(BAN::RefPtr<BlockDevice> block_device)

View File

@ -1,9 +1,11 @@
#include <kernel/Epoll.h>
#include <kernel/FS/FileSystem.h>
#include <kernel/FS/Inode.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/FileBackedRegion.h>
#include <fcntl.h>
#include <sys/statvfs.h>
namespace Kernel
{
@ -81,6 +83,8 @@ namespace Kernel
return BAN::Error::from_errno(ENOTDIR);
if (Mode(mode).ifdir())
return BAN::Error::from_errno(EINVAL);
if (auto* fs = filesystem(); fs && (fs->flag() & ST_RDONLY))
return BAN::Error::from_errno(EROFS);
return create_file_impl(name, mode, uid, gid);
}
@ -91,6 +95,8 @@ namespace Kernel
return BAN::Error::from_errno(ENOTDIR);
if (!Mode(mode).ifdir())
return BAN::Error::from_errno(EINVAL);
if (auto* fs = filesystem(); fs && (fs->flag() & ST_RDONLY))
return BAN::Error::from_errno(EROFS);
return create_directory_impl(name, mode, uid, gid);
}
@ -101,6 +107,8 @@ namespace Kernel
return BAN::Error::from_errno(ENOTDIR);
if (inode->mode().ifdir())
return BAN::Error::from_errno(EINVAL);
if (auto* fs = filesystem(); fs && (fs->flag() & ST_RDONLY))
return BAN::Error::from_errno(EROFS);
return link_inode_impl(name, inode);
}
@ -111,6 +119,8 @@ namespace Kernel
return BAN::Error::from_errno(ENOTDIR);
if (name == "."_sv || name == ".."_sv)
return BAN::Error::from_errno(EINVAL);
if (auto* fs = filesystem(); fs && (fs->flag() & ST_RDONLY))
return BAN::Error::from_errno(EROFS);
return unlink_impl(name);
}
@ -127,6 +137,8 @@ namespace Kernel
LockGuard _(m_mutex);
if (!mode().iflnk())
return BAN::Error::from_errno(EINVAL);
if (auto* fs = filesystem(); fs && (fs->flag() & ST_RDONLY))
return BAN::Error::from_errno(EROFS);
return set_link_target_impl(target);
}
@ -207,6 +219,8 @@ namespace Kernel
LockGuard _(m_mutex);
if (mode().ifdir())
return BAN::Error::from_errno(EISDIR);
if (auto* fs = filesystem(); fs && (fs->flag() & ST_RDONLY))
return BAN::Error::from_errno(EROFS);
return write_impl(offset, buffer);
}
@ -215,6 +229,8 @@ namespace Kernel
LockGuard _(m_mutex);
if (mode().ifdir())
return BAN::Error::from_errno(EISDIR);
if (auto* fs = filesystem(); fs && (fs->flag() & ST_RDONLY))
return BAN::Error::from_errno(EROFS);
return truncate_impl(size);
}
@ -222,18 +238,24 @@ namespace Kernel
{
ASSERT((mode & Inode::Mode::TYPE_MASK) == 0);
LockGuard _(m_mutex);
if (auto* fs = filesystem(); fs && (fs->flag() & ST_RDONLY))
return BAN::Error::from_errno(EROFS);
return chmod_impl(mode);
}
BAN::ErrorOr<void> Inode::chown(uid_t uid, gid_t gid)
{
LockGuard _(m_mutex);
if (auto* fs = filesystem(); fs && (fs->flag() & ST_RDONLY))
return BAN::Error::from_errno(EROFS);
return chown_impl(uid, gid);
}
BAN::ErrorOr<void> Inode::utimens(const timespec times[2])
{
LockGuard _(m_mutex);
if (auto* fs = filesystem(); fs && (fs->flag() & ST_RDONLY))
return BAN::Error::from_errno(EROFS);
return utimens_impl(times);
}

View File

@ -1,3 +1,4 @@
#include <BAN/ScopeGuard.h>
#include <kernel/Device/DeviceNumbers.h>
#include <kernel/FS/TmpFS/FileSystem.h>
#include <kernel/Memory/Heap.h>
@ -105,7 +106,7 @@ namespace Kernel
{
LockGuard _(m_mutex);
auto inode_location = find_inode(ino);
const auto inode_location = find_inode(ino);
PageTable::with_fast_page(inode_location.paddr, [&] {
out = PageTable::fast_page_as_sized<TmpInodeInfo>(inode_location.index);
});
@ -115,7 +116,7 @@ namespace Kernel
{
LockGuard _(m_mutex);
auto inode_location = find_inode(ino);
const auto inode_location = find_inode(ino);
PageTable::with_fast_page(inode_location.paddr, [&] {
auto& inode_info = PageTable::fast_page_as_sized<TmpInodeInfo>(inode_location.index);
inode_info = info;
@ -126,7 +127,7 @@ namespace Kernel
{
LockGuard _(m_mutex);
auto inode_location = find_inode(ino);
const auto inode_location = find_inode(ino);
PageTable::with_fast_page(inode_location.paddr, [&] {
auto& inode_info = PageTable::fast_page_as_sized<TmpInodeInfo>(inode_location.index);
ASSERT(inode_info.nlink == 0);
@ -134,6 +135,7 @@ namespace Kernel
ASSERT(paddr == 0);
inode_info = {};
});
ASSERT(!m_inode_cache.contains(ino));
}
@ -141,26 +143,86 @@ namespace Kernel
{
LockGuard _(m_mutex);
constexpr size_t inodes_per_page = PAGE_SIZE / sizeof(TmpInodeInfo);
constexpr size_t inode_infos_per_page = PAGE_SIZE / sizeof(TmpInodeInfo);
constexpr size_t page_infos_per_page = PAGE_SIZE / sizeof(PageInfo);
ino_t ino = first_inode;
TRY(for_each_indirect_paddr_allocating(m_inode_pages, [&](paddr_t paddr, bool) {
BAN::Iteration result = BAN::Iteration::Continue;
PageTable::with_fast_page(paddr, [&] {
for (size_t i = 0; i < inodes_per_page; i++, ino++)
{
auto& inode_info = PageTable::fast_page_as_sized<TmpInodeInfo>(i);
if (inode_info.mode != 0)
continue;
inode_info = info;
result = BAN::Iteration::Break;
return;
}
for (size_t layer0_index = 0; layer0_index < page_infos_per_page; layer0_index++)
{
PageInfo layer0_page;
PageTable::with_fast_page(m_inode_pages.paddr(), [&] {
layer0_page = PageTable::fast_page_as_sized<PageInfo>(layer0_index);
});
return result;
}, 2));
return ino;
if (!(layer0_page.flags() & PageInfo::Flags::Present))
{
if (m_used_pages >= m_max_pages)
return BAN::Error::from_errno(ENOSPC);
const paddr_t paddr = Heap::get().take_free_page();
if (paddr == 0)
return BAN::Error::from_errno(ENOMEM);
PageTable::with_fast_page(paddr, [&] {
memset(PageTable::fast_page_as_ptr(), 0, PAGE_SIZE);
});
PageTable::with_fast_page(m_inode_pages.paddr(), [&] {
auto& page_info = PageTable::fast_page_as_sized<PageInfo>(layer0_index);
page_info.set_paddr(paddr);
page_info.set_flags(PageInfo::Flags::Present);
layer0_page = page_info;
});
m_used_pages++;
}
for (size_t layer1_index = 0; layer1_index < page_infos_per_page; layer1_index++)
{
PageInfo layer1_page;
PageTable::with_fast_page(layer0_page.paddr(), [&] {
layer1_page = PageTable::fast_page_as_sized<PageInfo>(layer1_index);
});
if (!(layer1_page.flags() & PageInfo::Flags::Present))
{
if (m_used_pages >= m_max_pages)
return BAN::Error::from_errno(ENOSPC);
const paddr_t paddr = Heap::get().take_free_page();
if (paddr == 0)
return BAN::Error::from_errno(ENOMEM);
PageTable::with_fast_page(paddr, [&] {
memset(PageTable::fast_page_as_ptr(), 0, PAGE_SIZE);
});
PageTable::with_fast_page(layer0_page.paddr(), [&] {
auto& page_info = PageTable::fast_page_as_sized<PageInfo>(layer1_index);
page_info.set_paddr(paddr);
page_info.set_flags(PageInfo::Flags::Present);
layer1_page = page_info;
});
m_used_pages++;
}
size_t layer2_index = SIZE_MAX;
PageTable::with_fast_page(layer1_page.paddr(), [&] {
for (size_t i = 0; i < PAGE_SIZE / sizeof(TmpInodeInfo); i++)
{
auto& inode_info = PageTable::fast_page_as_sized<TmpInodeInfo>(i);
if (inode_info.mode != 0)
continue;
inode_info = info;
layer2_index = i;
return;
}
});
if (layer2_index != SIZE_MAX)
{
const size_t layer0_offset = layer0_index * inode_infos_per_page * page_infos_per_page;
const size_t layer1_offset = layer1_index * inode_infos_per_page;
const size_t layer2_offset = layer2_index;
return layer0_offset + layer1_offset + layer2_offset + first_inode;
}
}
}
ASSERT_NOT_REACHED();
}
TmpFileSystem::InodeLocation TmpFileSystem::find_inode(ino_t ino)
@ -168,16 +230,30 @@ namespace Kernel
LockGuard _(m_mutex);
ASSERT(ino >= first_inode);
ASSERT(ino < max_inodes);
ASSERT(ino - first_inode < max_inodes);
constexpr size_t inodes_per_page = PAGE_SIZE / sizeof(TmpInodeInfo);
constexpr size_t inode_infos_per_page = PAGE_SIZE / sizeof(TmpInodeInfo);
constexpr size_t page_infos_per_page = PAGE_SIZE / sizeof(PageInfo);
const size_t layer0_index = (ino - first_inode) / inode_infos_per_page / page_infos_per_page;
const size_t layer1_index = (ino - first_inode) / inode_infos_per_page % page_infos_per_page;
const size_t layer2_index = (ino - first_inode) % inode_infos_per_page;
ASSERT(layer0_index < page_infos_per_page);
size_t index_of_page = (ino - first_inode) / inodes_per_page;
size_t index_in_page = (ino - first_inode) % inodes_per_page;
PageInfo layer0_page;
PageTable::with_fast_page(m_inode_pages.paddr(), [&] {
layer0_page = PageTable::fast_page_as_sized<PageInfo>(layer0_index);
});
ASSERT(layer0_page.flags() & PageInfo::Flags::Present);
PageInfo layer1_page;
PageTable::with_fast_page(layer0_page.paddr(), [&] {
layer1_page = PageTable::fast_page_as_sized<PageInfo>(layer1_index);
});
ASSERT(layer1_page.flags() & PageInfo::Flags::Present);
return {
.paddr = find_indirect(m_inode_pages, index_of_page, 2),
.index = index_in_page
.paddr = layer1_page.paddr(),
.index = layer2_index,
};
}
@ -185,145 +261,185 @@ namespace Kernel
{
LockGuard _(m_mutex);
constexpr size_t addresses_per_page = PAGE_SIZE / sizeof(PageInfo);
ASSERT(index >= first_data_page);
ASSERT(index - first_data_page < max_data_pages);
const size_t index_of_page = (index - first_data_page) / addresses_per_page;
const size_t index_in_page = (index - first_data_page) % addresses_per_page;
constexpr size_t page_infos_per_page = PAGE_SIZE / sizeof(PageInfo);
const size_t layer0_index = (index - first_data_page) / (page_infos_per_page - 1) / page_infos_per_page;
const size_t layer1_index = (index - first_data_page) / (page_infos_per_page - 1) % page_infos_per_page;
const size_t layer2_index = (index - first_data_page) % (page_infos_per_page - 1);
ASSERT(layer0_index < page_infos_per_page);
paddr_t page_containing = find_indirect(m_data_pages, index_of_page, 2);
PageInfo layer0_page;
PageTable::with_fast_page(m_data_pages.paddr(), [&] {
layer0_page = PageTable::fast_page_as_sized<PageInfo>(layer0_index);
});
ASSERT(layer0_page.flags() & PageInfo::Flags::Present);
paddr_t paddr_to_free = 0;
PageTable::with_fast_page(page_containing, [&] {
auto& page_info = PageTable::fast_page_as_sized<PageInfo>(index_in_page);
PageInfo layer1_page;
PageTable::with_fast_page(layer0_page.paddr(), [&] {
layer1_page = PageTable::fast_page_as_sized<PageInfo>(layer1_index);
});
ASSERT(layer1_page.flags() & PageInfo::Flags::Present);
paddr_t page_to_free;
PageTable::with_fast_page(layer1_page.paddr(), [&] {
auto& allocated_pages = PageTable::fast_page_as_sized<size_t>(page_infos_per_page - 1);
ASSERT(allocated_pages > 0);
allocated_pages--;
auto& page_info = PageTable::fast_page_as_sized<PageInfo>(layer2_index);
ASSERT(page_info.flags() & PageInfo::Flags::Present);
paddr_to_free = page_info.paddr();
m_used_pages--;
page_to_free = page_info.paddr();
page_info.set_paddr(0);
page_info.set_flags(0);
});
Heap::get().release_page(paddr_to_free);
}
BAN::ErrorOr<size_t> TmpFileSystem::allocate_block()
{
LockGuard _(m_mutex);
size_t result = first_data_page;
TRY(for_each_indirect_paddr_allocating(m_data_pages, [&] (paddr_t, bool allocated) {
if (allocated)
return BAN::Iteration::Break;
result++;
return BAN::Iteration::Continue;
}, 3));
return result;
Heap::get().release_page(page_to_free);
}
paddr_t TmpFileSystem::find_block(size_t index)
{
LockGuard _(m_mutex);
ASSERT(index > 0);
return find_indirect(m_data_pages, index - first_data_page, 3);
}
ASSERT(index >= first_data_page);
ASSERT(index - first_data_page < max_data_pages);
paddr_t TmpFileSystem::find_indirect(PageInfo root, size_t index, size_t depth)
{
LockGuard _(m_mutex);
constexpr size_t page_infos_per_page = PAGE_SIZE / sizeof(PageInfo);
const size_t layer0_index = (index - first_data_page) / (page_infos_per_page - 1) / page_infos_per_page;
const size_t layer1_index = (index - first_data_page) / (page_infos_per_page - 1) % page_infos_per_page;
const size_t layer2_index = (index - first_data_page) % (page_infos_per_page - 1);
ASSERT(layer0_index < page_infos_per_page);
ASSERT(root.flags() & PageInfo::Flags::Present);
if (depth == 0)
{
ASSERT(index == 0);
return root.paddr();
}
constexpr size_t addresses_per_page = PAGE_SIZE / sizeof(PageInfo);
size_t divisor = 1;
for (size_t i = 1; i < depth; i++)
divisor *= addresses_per_page;
size_t index_of_page = index / divisor;
size_t index_in_page = index % divisor;
ASSERT(index_of_page < addresses_per_page);
PageInfo next;
PageTable::with_fast_page(root.paddr(), [&] {
next = PageTable::fast_page_as_sized<PageInfo>(index_of_page);
PageInfo layer0_page;
PageTable::with_fast_page(m_data_pages.paddr(), [&] {
layer0_page = PageTable::fast_page_as_sized<PageInfo>(layer0_index);
});
ASSERT(layer0_page.flags() & PageInfo::Flags::Present);
return find_indirect(next, index_in_page, depth - 1);
PageInfo layer1_page;
PageTable::with_fast_page(layer0_page.paddr(), [&] {
layer1_page = PageTable::fast_page_as_sized<PageInfo>(layer1_index);
});
ASSERT(layer1_page.flags() & PageInfo::Flags::Present);
PageInfo layer2_page;
PageTable::with_fast_page(layer1_page.paddr(), [&] {
layer2_page = PageTable::fast_page_as_sized<PageInfo>(layer2_index);
});
ASSERT(layer2_page.flags() & PageInfo::Flags::Present);
return layer2_page.paddr();
}
template<TmpFuncs::for_each_indirect_paddr_allocating_callback F>
BAN::ErrorOr<BAN::Iteration> TmpFileSystem::for_each_indirect_paddr_allocating_internal(PageInfo page_info, F callback, size_t depth)
BAN::ErrorOr<size_t> TmpFileSystem::allocate_block()
{
LockGuard _(m_mutex);
ASSERT(page_info.flags() & PageInfo::Flags::Present);
if (depth == 0)
{
bool is_new_block = page_info.flags() & PageInfo::Flags::Internal;
return callback(page_info.paddr(), is_new_block);
}
if (m_used_pages >= m_max_pages)
return BAN::Error::from_errno(ENOSPC);
for (size_t i = 0; i < PAGE_SIZE / sizeof(PageInfo); i++)
const paddr_t new_block = Heap::get().take_free_page();
if (new_block == 0)
return BAN::Error::from_errno(ENOMEM);
PageTable::with_fast_page(new_block, [] {
memset(PageTable::fast_page_as_ptr(), 0, PAGE_SIZE);
});
BAN::ScopeGuard block_deleter([new_block] { Heap::get().release_page(new_block); });
constexpr size_t page_infos_per_page = PAGE_SIZE / sizeof(PageInfo);
for (size_t layer0_index = 0; layer0_index < PAGE_SIZE / sizeof(PageInfo); layer0_index++)
{
PageInfo next_info;
PageTable::with_fast_page(page_info.paddr(), [&] {
next_info = PageTable::fast_page_as_sized<PageInfo>(i);
PageInfo layer0_page;
PageTable::with_fast_page(m_data_pages.paddr(), [&] {
layer0_page = PageTable::fast_page_as_sized<PageInfo>(layer0_index);
});
if (!(next_info.flags() & PageInfo::Flags::Present))
if (!(layer0_page.flags() & PageInfo::Flags::Present))
{
if (m_used_pages >= m_max_pages)
if (m_used_pages + 1 >= m_max_pages)
return BAN::Error::from_errno(ENOSPC);
paddr_t new_paddr = Heap::get().take_free_page();
if (new_paddr == 0)
const paddr_t paddr = Heap::get().take_free_page();
if (paddr == 0)
return BAN::Error::from_errno(ENOMEM);
PageTable::with_fast_page(paddr, [&] {
memset(PageTable::fast_page_as_ptr(), 0, PAGE_SIZE);
});
PageTable::with_fast_page(m_data_pages.paddr(), [&] {
auto& page_info = PageTable::fast_page_as_sized<PageInfo>(layer0_index);
page_info.set_paddr(paddr);
page_info.set_flags(PageInfo::Flags::Present);
layer0_page = page_info;
});
m_used_pages++;
PageTable::with_fast_page(new_paddr, [&] {
memset(PageTable::fast_page_as_ptr(), 0x00, PAGE_SIZE);
});
next_info.set_paddr(new_paddr);
next_info.set_flags(PageInfo::Flags::Present);
PageTable::with_fast_page(page_info.paddr(), [&] {
auto& to_update_info = PageTable::fast_page_as_sized<PageInfo>(i);
to_update_info = next_info;
});
// Don't sync the internal bit to actual memory
next_info.set_flags(PageInfo::Flags::Internal | PageInfo::Flags::Present);
}
auto result = TRY(for_each_indirect_paddr_allocating_internal(next_info, callback, depth - 1));
switch (result)
for (size_t layer1_index = 0; layer1_index < PAGE_SIZE / sizeof(PageInfo); layer1_index++)
{
case BAN::Iteration::Continue:
break;
case BAN::Iteration::Break:
return BAN::Iteration::Break;
default:
PageInfo layer1_page;
PageTable::with_fast_page(layer0_page.paddr(), [&] {
layer1_page = PageTable::fast_page_as_sized<PageInfo>(layer1_index);
});
if (!(layer1_page.flags() & PageInfo::Flags::Present))
{
if (m_used_pages + 1 >= m_max_pages)
return BAN::Error::from_errno(ENOSPC);
const paddr_t paddr = Heap::get().take_free_page();
if (paddr == 0)
return BAN::Error::from_errno(ENOMEM);
PageTable::with_fast_page(paddr, [&] {
memset(PageTable::fast_page_as_ptr(), 0, PAGE_SIZE);
});
PageTable::with_fast_page(layer0_page.paddr(), [&] {
auto& page_info = PageTable::fast_page_as_sized<PageInfo>(layer1_index);
page_info.set_paddr(paddr);
page_info.set_flags(PageInfo::Flags::Present);
layer1_page = page_info;
});
m_used_pages++;
}
size_t layer2_index = SIZE_MAX;
PageTable::with_fast_page(layer1_page.paddr(), [&] {
constexpr size_t pages_per_block = page_infos_per_page - 1;
auto& allocated_pages = PageTable::fast_page_as_sized<size_t>(pages_per_block);
if (allocated_pages == pages_per_block)
return;
for (size_t i = 0; i < pages_per_block; i++)
{
auto& page_info = PageTable::fast_page_as_sized<PageInfo>(i);
if (page_info.flags() & PageInfo::Flags::Present)
continue;
page_info.set_paddr(new_block);
page_info.set_flags(PageInfo::Flags::Present);
allocated_pages++;
layer2_index = i;
return;
}
ASSERT_NOT_REACHED();
});
if (layer2_index != SIZE_MAX)
{
block_deleter.disable();
m_used_pages++;
const size_t layer0_offset = layer0_index * (page_infos_per_page - 1) * page_infos_per_page;
const size_t layer1_offset = layer1_index * (page_infos_per_page - 1);
const size_t layer2_offset = layer2_index;
return layer0_offset + layer1_offset + layer2_offset + first_data_page;
}
}
}
return BAN::Iteration::Continue;
}
template<TmpFuncs::for_each_indirect_paddr_allocating_callback F>
BAN::ErrorOr<void> TmpFileSystem::for_each_indirect_paddr_allocating(PageInfo page_info, F callback, size_t depth)
{
LockGuard _(m_mutex);
BAN::Iteration result = TRY(for_each_indirect_paddr_allocating_internal(page_info, callback, depth));
ASSERT(result == BAN::Iteration::Break);
return {};
ASSERT_NOT_REACHED();
}
}

View File

@ -50,7 +50,7 @@ namespace Kernel
dev_t TmpInode::dev() const
{
return m_fs.dev();
return m_fs.rdev();
}
BAN::ErrorOr<BAN::RefPtr<TmpInode>> TmpInode::create_from_existing(TmpFileSystem& fs, ino_t ino, const TmpInodeInfo& info)
@ -94,12 +94,19 @@ namespace Kernel
BAN::ErrorOr<void> TmpInode::chmod_impl(mode_t new_mode)
{
ASSERT(!(new_mode & Mode::TYPE_MASK));
m_inode_info.mode &= ~Mode::TYPE_MASK;
ASSERT(!(new_mode & Inode::Mode::TYPE_MASK));
m_inode_info.mode &= Inode::Mode::TYPE_MASK;
m_inode_info.mode |= new_mode;
return {};
}
BAN::ErrorOr<void> TmpInode::chown_impl(uid_t new_uid, gid_t new_gid)
{
m_inode_info.uid = new_uid;
m_inode_info.gid = new_gid;
return {};
}
BAN::ErrorOr<void> TmpInode::utimens_impl(const timespec times[2])
{
if (times[0].tv_nsec != UTIME_OMIT)
@ -117,36 +124,160 @@ namespace Kernel
void TmpInode::free_all_blocks()
{
for (size_t i = 0; i < TmpInodeInfo::direct_block_count; i++)
{
if (m_inode_info.block[i])
m_fs.free_block(m_inode_info.block[i]);
m_inode_info.block[i] = 0;
if (size_t block = m_inode_info.block[TmpInodeInfo::direct_block_count + 0])
free_indirect_blocks(block, 1);
if (size_t block = m_inode_info.block[TmpInodeInfo::direct_block_count + 1])
free_indirect_blocks(block, 2);
if (size_t block = m_inode_info.block[TmpInodeInfo::direct_block_count + 2])
free_indirect_blocks(block, 3);
for (auto& block : m_inode_info.block)
block = 0;
}
void TmpInode::free_indirect_blocks(size_t block, uint32_t depth)
{
ASSERT(block != 0);
if (depth == 0)
{
m_fs.free_block(block);
return;
}
for (auto block : m_inode_info.block)
ASSERT(block == 0);
const size_t indices_per_block = blksize() / sizeof(size_t);
for (size_t index = 0; index < indices_per_block; index++)
{
size_t next_block;
m_fs.with_block_buffer(block, [&](BAN::ByteSpan block_buffer) {
next_block = block_buffer.as_span<size_t>()[index];
});
if (next_block == 0)
continue;
free_indirect_blocks(next_block, depth - 1);
}
m_fs.free_block(block);
}
BAN::Optional<size_t> TmpInode::block_index(size_t data_block_index)
{
ASSERT(data_block_index < TmpInodeInfo::direct_block_count);
if (m_inode_info.block[data_block_index])
if (data_block_index < TmpInodeInfo::direct_block_count)
{
if (m_inode_info.block[data_block_index] == 0)
return {};
return m_inode_info.block[data_block_index];
return {};
}
data_block_index -= TmpInodeInfo::direct_block_count;
const size_t indices_per_block = blksize() / sizeof(size_t);
if (data_block_index < indices_per_block)
return block_index_from_indirect(m_inode_info.block[TmpInodeInfo::direct_block_count + 0], data_block_index, 1);
data_block_index -= indices_per_block;
if (data_block_index < indices_per_block * indices_per_block)
return block_index_from_indirect(m_inode_info.block[TmpInodeInfo::direct_block_count + 1], data_block_index, 2);
data_block_index -= indices_per_block * indices_per_block;
if (data_block_index < indices_per_block * indices_per_block * indices_per_block)
return block_index_from_indirect(m_inode_info.block[TmpInodeInfo::direct_block_count + 2], data_block_index, 3);
ASSERT_NOT_REACHED();
}
BAN::Optional<size_t> TmpInode::block_index_from_indirect(size_t block, size_t index, uint32_t depth)
{
if (block == 0)
return {};
ASSERT(depth >= 1);
const size_t indices_per_block = blksize() / sizeof(size_t);
size_t divisor = 1;
for (size_t i = 1; i < depth; i++)
divisor *= indices_per_block;
size_t next_block;
m_fs.with_block_buffer(block, [&](BAN::ByteSpan block_buffer) {
next_block = block_buffer.as_span<size_t>()[(index / divisor) % indices_per_block];
});
if (next_block == 0)
return {};
if (depth == 1)
return next_block;
return block_index_from_indirect(next_block, index, depth - 1);
}
BAN::ErrorOr<size_t> TmpInode::block_index_with_allocation(size_t data_block_index)
{
if (data_block_index >= TmpInodeInfo::direct_block_count)
if (data_block_index < TmpInodeInfo::direct_block_count)
{
dprintln("only {} blocks supported :D", TmpInodeInfo::direct_block_count);
return BAN::Error::from_errno(ENOSPC);
if (m_inode_info.block[data_block_index] == 0)
{
m_inode_info.block[data_block_index] = TRY(m_fs.allocate_block());
m_inode_info.blocks++;
}
return m_inode_info.block[data_block_index];
}
if (m_inode_info.block[data_block_index] == 0)
data_block_index -= TmpInodeInfo::direct_block_count;
const size_t indices_per_block = blksize() / sizeof(size_t);
if (data_block_index < indices_per_block)
return block_index_from_indirect_with_allocation(m_inode_info.block[TmpInodeInfo::direct_block_count + 0], data_block_index, 1);
data_block_index -= indices_per_block;
if (data_block_index < indices_per_block * indices_per_block)
return block_index_from_indirect_with_allocation(m_inode_info.block[TmpInodeInfo::direct_block_count + 1], data_block_index, 2);
data_block_index -= indices_per_block * indices_per_block;
if (data_block_index < indices_per_block * indices_per_block * indices_per_block)
return block_index_from_indirect_with_allocation(m_inode_info.block[TmpInodeInfo::direct_block_count + 2], data_block_index, 3);
ASSERT_NOT_REACHED();
}
BAN::ErrorOr<size_t> TmpInode::block_index_from_indirect_with_allocation(size_t& block, size_t index, uint32_t depth)
{
if (block == 0)
{
m_inode_info.block[data_block_index] = TRY(m_fs.allocate_block());
block = TRY(m_fs.allocate_block());
m_inode_info.blocks++;
}
return m_inode_info.block[data_block_index];
ASSERT(depth >= 1);
const size_t indices_per_block = blksize() / sizeof(size_t);
size_t divisor = 1;
for (size_t i = 1; i < depth; i++)
divisor *= indices_per_block;
size_t next_block;
m_fs.with_block_buffer(block, [&](BAN::ByteSpan block_buffer) {
next_block = block_buffer.as_span<size_t>()[(index / divisor) % indices_per_block];
});
if (next_block == 0)
{
next_block = TRY(m_fs.allocate_block());
m_inode_info.blocks++;
m_fs.with_block_buffer(block, [&](BAN::ByteSpan block_buffer) {
block_buffer.as_span<size_t>()[(index / divisor) % indices_per_block] = next_block;
});
}
if (depth == 1)
return next_block;
return block_index_from_indirect_with_allocation(next_block, index, depth - 1);
}
/* FILE INODE */
@ -234,6 +365,9 @@ namespace Kernel
BAN::ErrorOr<void> TmpFileInode::truncate_impl(size_t new_size)
{
// FIXME: if size is decreased, we should probably free
// unused blocks
m_inode_info.size = new_size;
return {};
}
@ -288,7 +422,7 @@ namespace Kernel
{
}
BAN::ErrorOr<void> TmpSymlinkInode::set_link_target(BAN::StringView new_target)
BAN::ErrorOr<void> TmpSymlinkInode::set_link_target_impl(BAN::StringView new_target)
{
free_all_blocks();
m_inode_info.size = 0;
@ -500,6 +634,9 @@ namespace Kernel
case Mode::IFREG:
new_inode = TRY(TmpFileInode::create_new(m_fs, mode, uid, gid));
break;
case Mode::IFLNK:
new_inode = TRY(TmpSymlinkInode::create_new(m_fs, mode, uid, gid, ""_sv));
break;
case Mode::IFSOCK:
new_inode = TRY(TmpSocketInode::create_new(m_fs, mode, uid, gid));
break;

View File

@ -0,0 +1,166 @@
#include <BAN/ScopeGuard.h>
#include <kernel/FS/USTARModule.h>
#include <tar.h>
namespace Kernel
{
bool is_ustar_boot_module(const BootModule& module)
{
if (module.start % PAGE_SIZE)
{
dprintln("ignoring non-page-aligned module");
return false;
}
if (module.size < 512)
return false;
bool has_ustar_signature;
PageTable::with_fast_page(module.start, [&] {
has_ustar_signature = memcmp(PageTable::fast_page_as_ptr(257), "ustar", 5) == 0;
});
return has_ustar_signature;
}
BAN::ErrorOr<void> unpack_boot_module_into_filesystem(BAN::RefPtr<FileSystem> filesystem, const BootModule& module)
{
ASSERT(is_ustar_boot_module(module));
auto root_inode = filesystem->root_inode();
uint8_t* temp_page = static_cast<uint8_t*>(kmalloc(PAGE_SIZE));
if (temp_page == nullptr)
return BAN::Error::from_errno(ENOMEM);
BAN::ScopeGuard _([temp_page] { kfree(temp_page); });
size_t offset = 0;
while (offset + 512 <= module.size)
{
size_t file_size = 0;
mode_t file_mode = 0;
uid_t file_uid = 0;
gid_t file_gid = 0;
uint8_t file_type = 0;
char file_path[100 + 1 + 155 + 1] {};
PageTable::with_fast_page((module.start + offset) & PAGE_ADDR_MASK, [&] {
const size_t page_off = offset % PAGE_SIZE;
const auto parse_octal =
[page_off](size_t offset, size_t length) -> size_t
{
size_t result = 0;
for (size_t i = 0; i < length; i++)
{
const char ch = PageTable::fast_page_as<char>(page_off + offset + i);
if (ch == '\0')
break;
result = (result * 8) + (ch - '0');
}
return result;
};
if (memcmp(PageTable::fast_page_as_ptr(page_off + 257), "ustar", 5)) {
file_size = SIZE_MAX;
return;
}
memcpy(file_path, PageTable::fast_page_as_ptr(page_off + 345), 155);
const size_t prefix_len = strlen(file_path);
file_path[prefix_len] = '/';
memcpy(file_path + prefix_len + 1, PageTable::fast_page_as_ptr(page_off), 100);
file_mode = parse_octal(100, 8);
file_uid = parse_octal(108, 8);
file_gid = parse_octal(116, 8);
file_size = parse_octal(124, 12);
file_type = PageTable::fast_page_as<char>(page_off + 156);
});
if (file_size == SIZE_MAX)
break;
if (offset + 512 + file_size > module.size)
break;
auto parent_inode = filesystem->root_inode();
auto file_path_parts = TRY(BAN::StringView(file_path).split('/'));
for (size_t i = 0; i < file_path_parts.size() - 1; i++)
parent_inode = TRY(parent_inode->find_inode(file_path_parts[i]));
switch (file_type)
{
case REGTYPE:
case AREGTYPE: file_mode |= Inode::Mode::IFREG; break;
case LNKTYPE: break;
case SYMTYPE: file_mode |= Inode::Mode::IFLNK; break;
case CHRTYPE: file_mode |= Inode::Mode::IFCHR; break;
case BLKTYPE: file_mode |= Inode::Mode::IFBLK; break;
case DIRTYPE: file_mode |= Inode::Mode::IFDIR; break;
case FIFOTYPE: file_mode |= Inode::Mode::IFIFO; break;
default:
ASSERT_NOT_REACHED();
}
auto file_name_sv = file_path_parts.back();
if (file_type == DIRTYPE)
{
TRY(parent_inode->create_directory(file_name_sv, file_mode, file_uid, file_gid));
}
else if (file_type == LNKTYPE)
{
dwarnln("TODO: hardlink");
}
else if (file_type == SYMTYPE)
{
TRY(parent_inode->create_file(file_name_sv, file_mode, file_uid, file_gid));
char link_target[101] {};
const paddr_t paddr = module.start + offset;
PageTable::with_fast_page(paddr & PAGE_ADDR_MASK, [&] {
memcpy(link_target, PageTable::fast_page_as_ptr((paddr % PAGE_SIZE) + 157), 100);
});
if (link_target[0])
{
auto inode = TRY(parent_inode->find_inode(file_name_sv));
TRY(inode->set_link_target(link_target));
}
}
else
{
TRY(parent_inode->create_file(file_name_sv, file_mode, file_uid, file_gid));
if (file_size)
{
auto inode = TRY(parent_inode->find_inode(file_name_sv));
size_t nwritten = 0;
while (nwritten < file_size)
{
const paddr_t paddr = module.start + offset + 512 + nwritten;
PageTable::with_fast_page(paddr & PAGE_ADDR_MASK, [&] {
memcpy(temp_page, PageTable::fast_page_as_ptr(), PAGE_SIZE);
});
const size_t page_off = paddr % PAGE_SIZE;
const size_t to_write = BAN::Math::min(file_size - nwritten, PAGE_SIZE - page_off);
TRY(inode->write(nwritten, { temp_page + page_off, to_write }));
nwritten += to_write;
}
}
}
offset += 512 + file_size;
if (auto rem = offset % 512)
offset += 512 - rem;
}
return {};
}
}

View File

@ -3,6 +3,7 @@
#include <kernel/FS/DevFS/FileSystem.h>
#include <kernel/FS/ProcFS/FileSystem.h>
#include <kernel/FS/TmpFS/FileSystem.h>
#include <kernel/FS/USTARModule.h>
#include <kernel/FS/VirtualFileSystem.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Storage/Partition.h>
@ -51,7 +52,31 @@ namespace Kernel
return BAN::RefPtr<BlockDevice>(static_cast<BlockDevice*>(device_inode.ptr()));
}
static BAN::RefPtr<BlockDevice> find_root_device(BAN::StringView root_path)
static BAN::RefPtr<FileSystem> load_fallback_root_filesystem()
{
if (g_boot_info.modules.empty())
panic("No fallback boot modules given");
auto filesystem_or_error = TmpFileSystem::create(-1, 0755, 0, 0);
if (filesystem_or_error.is_error())
panic("Failed to create fallback filesystem: {}", filesystem_or_error.error());
dwarnln("Attempting to load fallback filesystem from {} modules", g_boot_info.modules.size());
auto filesystem = BAN::RefPtr<FileSystem>::adopt(filesystem_or_error.release_value());
for (const auto& module : g_boot_info.modules)
{
if (!is_ustar_boot_module(module))
continue;
if (auto ret = unpack_boot_module_into_filesystem(filesystem, module); ret.is_error())
dwarnln("Failed to unpack boot module: {}", ret.error());
}
return filesystem;
}
static BAN::RefPtr<FileSystem> load_root_filesystem(BAN::StringView root_path)
{
enum class RootType
{
@ -66,19 +91,26 @@ namespace Kernel
{
entry = root_path.substring(9);
if (entry.size() != 36)
panic("Invalid UUID '{}'", entry);
{
derrorln("Invalid UUID '{}'", entry);
return load_fallback_root_filesystem();
}
type = RootType::PartitionUUID;
}
else if (root_path.starts_with("/dev/"_sv))
{
entry = root_path.substring(5);
if (entry.empty() || entry.contains('/'))
panic("Invalid root path '{}'", root_path);
{
derrorln("Invalid root path '{}'", root_path);
return load_fallback_root_filesystem();
}
type = RootType::BlockDeviceName;
}
else
{
panic("Unsupported root path format '{}'", root_path);
derrorln("Unsupported root path format '{}'", root_path);
return load_fallback_root_filesystem();
}
constexpr size_t timeout_ms = 10'000;
@ -99,15 +131,30 @@ namespace Kernel
}
if (!ret.is_error())
return ret.release_value();
{
auto filesystem_or_error = FileSystem::from_block_device(ret.release_value());
if (filesystem_or_error.is_error())
{
derrorln("Could not create filesystem from '{}': {}", root_path, filesystem_or_error.error());
return load_fallback_root_filesystem();
}
return filesystem_or_error.release_value();;
}
if (ret.error().get_error_code() != ENOENT)
panic("could not open root device '{}': {}", root_path, ret.error());
{
derrorln("Could not open root device '{}': {}", root_path, ret.error());
return load_fallback_root_filesystem();
}
if (i == 4)
dwarnln("Could not find specified root device, waiting for it to get loaded...");
SystemTimer::get().sleep_ms(sleep_ms);
}
panic("could not find root device '{}' after {} ms", root_path, timeout_ms);
derrorln("Could not find root device '{}' after {} ms", root_path, timeout_ms);
return load_fallback_root_filesystem();
}
void VirtualFileSystem::initialize(BAN::StringView root_path)
@ -115,13 +162,9 @@ namespace Kernel
ASSERT(!s_instance);
s_instance = MUST(BAN::RefPtr<VirtualFileSystem>::create());
auto root_device = find_root_device(root_path);
ASSERT(root_device);
auto filesystem_result = FileSystem::from_block_device(root_device);
if (filesystem_result.is_error())
panic("Could not create filesystem from '{}': {}", root_path, filesystem_result.error());
s_instance->m_root_fs = filesystem_result.release_value();
s_instance->m_root_fs = load_root_filesystem(root_path);
if (!s_instance->m_root_fs)
panic("Could not load root filesystem");
Credentials root_creds { 0, 0, 0, 0 };
MUST(s_instance->mount(root_creds, &DevFileSystem::get(), "/dev"_sv));

View File

@ -1,8 +1,11 @@
#include <kernel/GDT.h>
#include <kernel/Memory/Types.h>
#include <kernel/Processor.h>
#include <string.h>
extern "C" uint8_t g_boot_stack_top[];
namespace Kernel
{
@ -62,6 +65,7 @@ namespace Kernel
{
memset(&m_tss, 0x00, sizeof(TaskStateSegment));
m_tss.iopb = sizeof(TaskStateSegment);
m_tss.ist1 = reinterpret_cast<vaddr_t>(g_boot_stack_top);
uintptr_t base = reinterpret_cast<uintptr_t>(&m_tss);

View File

@ -247,7 +247,7 @@ namespace Kernel
);
}
if (Thread::current().has_process() && Process::current().is_userspace())
if (Thread::current().has_process())
process_name = Process::current().name();
#if ARCH(x86_64)
@ -401,7 +401,7 @@ namespace Kernel
Thread::current().load_sse();
}
void IDT::register_interrupt_handler(uint8_t index, void (*handler)())
void IDT::register_interrupt_handler(uint8_t index, void (*handler)(), uint8_t ist)
{
auto& desc = m_idt[index];
memset(&desc, 0, sizeof(GateDescriptor));
@ -412,6 +412,7 @@ namespace Kernel
desc.offset2 = (uint32_t)((uintptr_t)handler >> 32);
#endif
desc.IST = ist;
desc.selector = 0x08;
desc.flags = 0x8E;
}
@ -453,6 +454,9 @@ namespace Kernel
ISR_LIST_X
#undef X
idt->register_interrupt_handler(DoubleFault, isr8, 1);
static_assert(DoubleFault == 8);
#define X(num) idt->register_interrupt_handler(IRQ_VECTOR_BASE + num, irq ## num);
IRQ_LIST_X
#undef X

View File

@ -12,7 +12,7 @@
namespace Kernel::Input
{
static constexpr uint64_t s_ps2_timeout_ms = 100;
static constexpr uint64_t s_ps2_timeout_ms = 300;
static PS2Controller* s_instance = nullptr;
@ -238,6 +238,15 @@ namespace Kernel::Input
return *s_instance;
}
struct PS2DeviceInitInfo
{
PS2Controller* controller;
bool valid_ports[2];
uint8_t scancode_set;
uint8_t config;
BAN::Atomic<bool> thread_started;
};
BAN::ErrorOr<void> PS2Controller::initialize_impl(uint8_t scancode_set)
{
constexpr size_t iapc_flag_off = offsetof(ACPI::FADT, iapc_boot_arch);
@ -315,6 +324,54 @@ namespace Kernel::Input
if (!valid_ports[0] && !valid_ports[1])
return {};
// Reserve IRQs
if (valid_ports[0] && InterruptController::get().reserve_irq(PS2::IRQ::DEVICE0).is_error())
{
dwarnln("Could not reserve irq for PS/2 port 1");
valid_ports[0] = false;
}
if (valid_ports[1] && InterruptController::get().reserve_irq(PS2::IRQ::DEVICE1).is_error())
{
dwarnln("Could not reserve irq for PS/2 port 2");
valid_ports[1] = false;
}
PS2DeviceInitInfo info {
.controller = this,
.valid_ports = { valid_ports[0], valid_ports[1] },
.scancode_set = scancode_set,
.config = config,
.thread_started { false },
};
auto* init_thread = TRY(Thread::create_kernel(
[](void* info) {
static_cast<PS2DeviceInitInfo*>(info)->controller->device_initialize_task(info);
}, &info
));
TRY(Processor::scheduler().add_thread(init_thread));
while (!info.thread_started)
Processor::pause();
return {};
}
void PS2Controller::device_initialize_task(void* _info)
{
bool valid_ports[2];
uint8_t scancode_set;
uint8_t config;
{
auto& info = *static_cast<PS2DeviceInitInfo*>(_info);
valid_ports[0] = info.valid_ports[0];
valid_ports[1] = info.valid_ports[1];
scancode_set = info.scancode_set;
config = info.config;
info.thread_started = true;
}
// Initialize devices
for (uint8_t device = 0; device < 2; device++)
{
@ -325,7 +382,7 @@ namespace Kernel::Input
dwarnln_if(DEBUG_PS2, "PS/2 device enable failed: {}", ret.error());
continue;
}
if (auto res = initialize_device(device, scancode_set); res.is_error())
if (auto res = identify_device(device, scancode_set); res.is_error())
{
dwarnln_if(DEBUG_PS2, "PS/2 device initialization failed: {}", res.error());
(void)send_command(device == 0 ? PS2::Command::DISABLE_FIRST_PORT : PS2::Command::DISABLE_SECOND_PORT);
@ -333,20 +390,8 @@ namespace Kernel::Input
}
}
// Reserve IRQs
if (m_devices[0] && InterruptController::get().reserve_irq(PS2::IRQ::DEVICE0).is_error())
{
dwarnln("Could not reserve irq for PS/2 port 1");
m_devices[0].clear();
}
if (m_devices[1] && InterruptController::get().reserve_irq(PS2::IRQ::DEVICE1).is_error())
{
dwarnln("Could not reserve irq for PS/2 port 2");
m_devices[1].clear();
}
if (!m_devices[0] && !m_devices[1])
return {};
return;
// Enable irqs on valid devices
if (m_devices[0])
@ -362,21 +407,21 @@ namespace Kernel::Input
config |= PS2::Config::INTERRUPT_SECOND_PORT;
}
TRY(send_command(PS2::Command::WRITE_CONFIG, config));
if (auto ret = send_command(PS2::Command::WRITE_CONFIG, config); ret.is_error())
{
dwarnln("PS2 failed to enable interrupts: {}", ret.error());
m_devices[0].clear();
m_devices[1].clear();
return;
}
// Send device initialization sequence after interrupts are enabled
for (uint8_t i = 0; i < 2; i++)
{
if (!m_devices[i])
continue;
m_devices[i]->send_initialize();
DevFileSystem::get().add_device(m_devices[i]);
}
return {};
if (m_devices[i])
m_devices[i]->send_initialize();
}
BAN::ErrorOr<void> PS2Controller::initialize_device(uint8_t device, uint8_t scancode_set)
BAN::ErrorOr<void> PS2Controller::identify_device(uint8_t device, uint8_t scancode_set)
{
// Reset device
TRY(device_send_byte_and_wait_ack(device, PS2::DeviceCommand::RESET));

View File

@ -9,7 +9,14 @@ namespace Kernel::Input
PS2Device::PS2Device(PS2Controller& controller, InputDevice::Type type)
: InputDevice(type)
, m_controller(controller)
{ }
{
DevFileSystem::get().add_device(this);
}
PS2Device::~PS2Device()
{
DevFileSystem::get().remove_device(this);
}
bool PS2Device::append_command_queue(uint8_t command, uint8_t response_size)
{

View File

@ -58,9 +58,13 @@ namespace Kernel
if (entry.type != MemoryMapEntry::Type::Available)
continue;
// FIXME: only reserve kernel area and modules, not everything from 0 -> kernel end
paddr_t start = entry.address;
if (start < (vaddr_t)g_kernel_end - KERNEL_OFFSET + g_boot_info.kernel_paddr)
start = (vaddr_t)g_kernel_end - KERNEL_OFFSET + g_boot_info.kernel_paddr;
for (const auto& module : g_boot_info.modules)
if (start < module.start + module.size)
start = module.start + module.size;
if (auto rem = start % PAGE_SIZE)
start += PAGE_SIZE - rem;

View File

@ -4,21 +4,30 @@
namespace Kernel
{
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_to_vaddr(PageTable& page_table, vaddr_t vaddr, size_t size, PageTable::flags_t flags, bool preallocate_pages)
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_to_vaddr(PageTable& page_table, vaddr_t vaddr, size_t size, PageTable::flags_t flags, bool preallocate_pages, bool add_guard_pages)
{
ASSERT(size % PAGE_SIZE == 0);
ASSERT(vaddr % PAGE_SIZE == 0);
ASSERT(vaddr > 0);
auto result = TRY(BAN::UniqPtr<VirtualRange>::create(page_table, preallocate_pages, vaddr, size, flags));
if (add_guard_pages)
{
vaddr -= PAGE_SIZE;
size += 2 * PAGE_SIZE;
}
auto result = TRY(BAN::UniqPtr<VirtualRange>::create(page_table, preallocate_pages, add_guard_pages, vaddr, size, flags));
ASSERT(page_table.reserve_range(vaddr, size));
TRY(result->initialize());
return result;
}
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_to_vaddr_range(PageTable& page_table, vaddr_t vaddr_start, vaddr_t vaddr_end, size_t size, PageTable::flags_t flags, bool preallocate_pages)
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_to_vaddr_range(PageTable& page_table, vaddr_t vaddr_start, vaddr_t vaddr_end, size_t size, PageTable::flags_t flags, bool preallocate_pages, bool add_guard_pages)
{
if (add_guard_pages)
size += 2 * PAGE_SIZE;
ASSERT(size % PAGE_SIZE == 0);
ASSERT(vaddr_start > 0);
ASSERT(vaddr_start + size <= vaddr_end);
@ -31,13 +40,13 @@ namespace Kernel
ASSERT(vaddr_start < vaddr_end);
ASSERT(vaddr_end - vaddr_start + 1 >= size / PAGE_SIZE);
vaddr_t vaddr = page_table.reserve_free_contiguous_pages(size / PAGE_SIZE, vaddr_start, vaddr_end);
const vaddr_t vaddr = page_table.reserve_free_contiguous_pages(size / PAGE_SIZE, vaddr_start, vaddr_end);
if (vaddr == 0)
return BAN::Error::from_errno(ENOMEM);
ASSERT(vaddr >= vaddr_start);
ASSERT(vaddr + size <= vaddr_end);
auto result_or_error = BAN::UniqPtr<VirtualRange>::create(page_table, preallocate_pages, vaddr, size, flags);
auto result_or_error = BAN::UniqPtr<VirtualRange>::create(page_table, preallocate_pages, add_guard_pages, vaddr, size, flags);
if (result_or_error.is_error())
{
page_table.unmap_range(vaddr, size);
@ -50,9 +59,10 @@ namespace Kernel
return result;
}
VirtualRange::VirtualRange(PageTable& page_table, bool preallocated, vaddr_t vaddr, size_t size, PageTable::flags_t flags)
VirtualRange::VirtualRange(PageTable& page_table, bool preallocated, bool has_guard_pages, vaddr_t vaddr, size_t size, PageTable::flags_t flags)
: m_page_table(page_table)
, m_preallocated(preallocated)
, m_has_guard_pages(has_guard_pages)
, m_vaddr(vaddr)
, m_size(size)
, m_flags(flags)
@ -70,26 +80,26 @@ namespace Kernel
BAN::ErrorOr<void> VirtualRange::initialize()
{
TRY(m_paddrs.resize(m_size / PAGE_SIZE, 0));
TRY(m_paddrs.resize(size() / PAGE_SIZE, 0));
if (!m_preallocated)
return {};
const size_t page_count = m_size / PAGE_SIZE;
const size_t page_count = size() / PAGE_SIZE;
for (size_t i = 0; i < page_count; i++)
{
m_paddrs[i] = Heap::get().take_free_page();
if (m_paddrs[i] == 0)
return BAN::Error::from_errno(ENOMEM);
m_page_table.map_page_at(m_paddrs[i], m_vaddr + i * PAGE_SIZE, m_flags);
m_page_table.map_page_at(m_paddrs[i], vaddr() + i * PAGE_SIZE, m_flags);
}
if (&PageTable::current() == &m_page_table || &PageTable::kernel() == &m_page_table)
memset(reinterpret_cast<void*>(m_vaddr), 0, m_size);
memset(reinterpret_cast<void*>(vaddr()), 0, size());
else
{
const size_t page_count = m_size / PAGE_SIZE;
for (size_t i = 0; i < page_count; i++)
const size_t page_count = size() / PAGE_SIZE;
for (size_t i = m_has_guard_pages; i < page_count; i++)
{
PageTable::with_fast_page(m_paddrs[i], [&] {
memset(PageTable::fast_page_as_ptr(), 0, PAGE_SIZE);
@ -107,10 +117,10 @@ namespace Kernel
SpinLockGuard _(m_lock);
auto result = TRY(create_to_vaddr(page_table, m_vaddr, m_size, m_flags, m_preallocated));
auto result = TRY(create_to_vaddr(page_table, vaddr(), size(), m_flags, m_preallocated, m_has_guard_pages));
const size_t page_count = m_size / PAGE_SIZE;
for (size_t i = 0; i < page_count; i++)
const size_t page_count = size() / PAGE_SIZE;
for (size_t i = m_has_guard_pages; i < page_count; i++)
{
if (m_paddrs[i] == 0)
continue;
@ -119,11 +129,11 @@ namespace Kernel
result->m_paddrs[i] = Heap::get().take_free_page();
if (result->m_paddrs[i] == 0)
return BAN::Error::from_errno(ENOMEM);
result->m_page_table.map_page_at(result->m_paddrs[i], m_vaddr + i * PAGE_SIZE, m_flags);
result->m_page_table.map_page_at(result->m_paddrs[i], vaddr() + i * PAGE_SIZE, m_flags);
}
PageTable::with_fast_page(result->m_paddrs[i], [&] {
memcpy(PageTable::fast_page_as_ptr(), reinterpret_cast<void*>(m_vaddr + i * PAGE_SIZE), PAGE_SIZE);
memcpy(PageTable::fast_page_as_ptr(), reinterpret_cast<void*>(vaddr() + i * PAGE_SIZE), PAGE_SIZE);
});
}
@ -137,7 +147,7 @@ namespace Kernel
ASSERT(contains(vaddr));
ASSERT(&PageTable::current() == &m_page_table);
const size_t index = (vaddr - m_vaddr) / PAGE_SIZE;
const size_t index = (vaddr - this->vaddr()) / PAGE_SIZE;
ASSERT(m_paddrs[index] == 0);
SpinLockGuard _(m_lock);

View File

@ -18,14 +18,14 @@ namespace Kernel
BAN::ErrorOr<BAN::UniqPtr<ARPTable>> ARPTable::create()
{
auto arp_table = TRY(BAN::UniqPtr<ARPTable>::create());
arp_table->m_process = Process::create_kernel(
arp_table->m_thread = TRY(Thread::create_kernel(
[](void* arp_table_ptr)
{
auto& arp_table = *reinterpret_cast<ARPTable*>(arp_table_ptr);
arp_table.packet_handle_task();
}, arp_table.ptr()
);
ASSERT(arp_table->m_process);
));
TRY(Processor::scheduler().add_thread(arp_table->m_thread));
return arp_table;
}
@ -35,9 +35,9 @@ namespace Kernel
ARPTable::~ARPTable()
{
if (m_process)
m_process->exit(0, SIGKILL);
m_process = nullptr;
if (m_thread)
m_thread->add_signal(SIGKILL);
m_thread = nullptr;
}
BAN::ErrorOr<BAN::MACAddress> ARPTable::get_mac_from_ipv4(NetworkInterface& interface, BAN::IPv4Address ipv4_address)

View File

@ -21,21 +21,21 @@ namespace Kernel
BAN::ErrorOr<BAN::UniqPtr<IPv4Layer>> IPv4Layer::create()
{
auto ipv4_manager = TRY(BAN::UniqPtr<IPv4Layer>::create());
ipv4_manager->m_process = Process::create_kernel(
ipv4_manager->m_thread = TRY(Thread::create_kernel(
[](void* ipv4_manager_ptr)
{
auto& ipv4_manager = *reinterpret_cast<IPv4Layer*>(ipv4_manager_ptr);
ipv4_manager.packet_handle_task();
}, ipv4_manager.ptr()
);
ASSERT(ipv4_manager->m_process);
));
TRY(Processor::scheduler().add_thread(ipv4_manager->m_thread));
ipv4_manager->m_pending_packet_buffer = TRY(VirtualRange::create_to_vaddr_range(
PageTable::kernel(),
KERNEL_OFFSET,
~(uintptr_t)0,
pending_packet_buffer_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true
true, false
));
ipv4_manager->m_arp_table = TRY(ARPTable::create());
return ipv4_manager;
@ -46,9 +46,9 @@ namespace Kernel
IPv4Layer::~IPv4Layer()
{
if (m_process)
m_process->exit(0, SIGKILL);
m_process = nullptr;
if (m_thread)
m_thread->add_signal(SIGKILL);
m_thread = nullptr;
}
void IPv4Layer::add_ipv4_header(BAN::ByteSpan packet, BAN::IPv4Address src_ipv4, BAN::IPv4Address dst_ipv4, uint8_t protocol) const

View File

@ -16,7 +16,7 @@ namespace Kernel
BAN::numeric_limits<vaddr_t>::max(),
buffer_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true
true, false
));
loopback->set_ipv4_address({ 127, 0, 0, 1 });
loopback->set_netmask({ 255, 0, 0, 0 });

View File

@ -1,6 +1,7 @@
#include <kernel/Lock/LockGuard.h>
#include <kernel/Networking/NetworkManager.h>
#include <kernel/Networking/TCPSocket.h>
#include <kernel/Process.h>
#include <kernel/Random.h>
#include <kernel/Timer/Timer.h>
@ -31,7 +32,7 @@ namespace Kernel
~(vaddr_t)0,
s_recv_window_buffer_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true
true, false
));
socket->m_recv_window.scale_shift = PAGE_SIZE_SHIFT; // use PAGE_SIZE windows
socket->m_send_window.buffer = TRY(VirtualRange::create_to_vaddr_range(
@ -40,14 +41,15 @@ namespace Kernel
~(vaddr_t)0,
s_send_window_buffer_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true
true, false
));
socket->m_process = Process::create_kernel(
socket->m_thread = TRY(Thread::create_kernel(
[](void* socket_ptr)
{
reinterpret_cast<TCPSocket*>(socket_ptr)->process_task();
}, socket.ptr()
);
));
TRY(Processor::scheduler().add_thread(socket->m_thread));
// hack to keep socket alive until its process starts
socket->ref();
return socket;
@ -63,7 +65,7 @@ namespace Kernel
TCPSocket::~TCPSocket()
{
ASSERT(!is_bound());
ASSERT(m_process == nullptr);
ASSERT(m_thread == nullptr);
dprintln_if(DEBUG_TCP, "Socket destroyed");
}
@ -620,7 +622,7 @@ namespace Kernel
dprintln_if(DEBUG_TCP, "Socket unbound");
}
m_process = nullptr;
m_thread = nullptr;
}
void TCPSocket::remove_listen_child(BAN::RefPtr<TCPSocket> socket)
@ -652,7 +654,7 @@ namespace Kernel
LockGuard _(m_mutex);
while (m_process)
while (m_thread)
{
const uint64_t current_ms = SystemTimer::get().ms_since_boot();

View File

@ -17,7 +17,7 @@ namespace Kernel
~(uintptr_t)0,
packet_buffer_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true
true, false
));
return socket;
}

View File

@ -3,6 +3,7 @@
#include <kernel/Lock/SpinLockAsMutex.h>
#include <kernel/Networking/NetworkManager.h>
#include <kernel/Networking/UNIX/Socket.h>
#include <kernel/Process.h>
#include <kernel/Scheduler.h>
#include <fcntl.h>
@ -28,7 +29,7 @@ namespace Kernel
~(uintptr_t)0,
s_packet_buffer_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true
true, false
));
return socket;
}

View File

@ -3,6 +3,7 @@
#include <kernel/Lock/LockGuard.h>
#include <kernel/Networking/NetworkManager.h>
#include <kernel/OpenFileDescriptorSet.h>
#include <kernel/Process.h>
#include <fcntl.h>
#include <sys/file.h>

View File

@ -97,21 +97,6 @@ namespace Kernel
MUST(Processor::scheduler().add_thread(thread));
}
Process* Process::create_kernel()
{
auto* process = create_process({ 0, 0, 0, 0 }, 0);
return process;
}
Process* Process::create_kernel(entry_t entry, void* data)
{
auto* process = create_process({ 0, 0, 0, 0 }, 0);
auto* thread = MUST(Thread::create_kernel(entry, data, process));
process->add_thread(thread);
process->register_to_scheduler();
return process;
}
BAN::ErrorOr<Process*> Process::create_userspace(const Credentials& credentials, BAN::StringView path, BAN::Span<BAN::StringView> arguments)
{
auto* process = create_process(credentials, 0);
@ -609,7 +594,6 @@ namespace Kernel
forked->m_page_table = BAN::move(page_table);
forked->m_open_file_descriptors = BAN::move(*open_file_descriptors);
forked->m_mapped_regions = BAN::move(mapped_regions);
forked->m_is_userspace = m_is_userspace;
forked->m_has_called_exec = false;
memcpy(forked->m_signal_handlers, m_signal_handlers, sizeof(m_signal_handlers));

View File

@ -4,6 +4,7 @@
#include <kernel/Lock/Mutex.h>
#include <kernel/Process.h>
#include <kernel/Scheduler.h>
#include <kernel/SchedulerQueueNode.h>
#include <kernel/Thread.h>
#include <kernel/Timer/Timer.h>
@ -118,7 +119,7 @@ namespace Kernel
BAN::ErrorOr<void> Scheduler::initialize()
{
m_idle_thread = TRY(Thread::create_kernel([](void*) { asm volatile("1: hlt; jmp 1b"); }, nullptr, nullptr));
m_idle_thread = TRY(Thread::create_kernel([](void*) { asm volatile("1: hlt; jmp 1b"); }, nullptr));
ASSERT(m_idle_thread);
size_t processor_index = 0;
@ -307,8 +308,11 @@ namespace Kernel
while (!m_block_queue.empty() && current_ns >= m_block_queue.front()->wake_time_ns)
{
auto* node = m_block_queue.pop_front();
if (node->blocker)
node->blocker->remove_blocked_thread(node);
{
SpinLockGuard _(node->blocker_lock);
if (node->blocker)
node->blocker->remove_blocked_thread(node);
}
node->blocked = false;
update_most_loaded_node_queue(node, &m_run_queue);
m_run_queue.add_thread_to_back(node);
@ -336,8 +340,11 @@ namespace Kernel
return;
if (node != m_current)
m_block_queue.remove_node(node);
if (node->blocker)
node->blocker->remove_blocked_thread(node);
{
SpinLockGuard _(node->blocker_lock);
if (node->blocker)
node->blocker->remove_blocked_thread(node);
}
node->blocked = false;
if (node != m_current)
m_run_queue.add_thread_to_back(node);
@ -618,8 +625,13 @@ namespace Kernel
m_current->blocked = true;
m_current->wake_time_ns = wake_time_ns;
if (blocker)
blocker->add_thread_to_block_queue(m_current);
{
SpinLockGuard _(m_current->blocker_lock);
if (blocker)
blocker->add_thread_to_block_queue(m_current);
}
update_most_loaded_node_queue(m_current, &m_block_queue);
uint32_t lock_depth = 0;
@ -642,10 +654,7 @@ namespace Kernel
void Scheduler::unblock_thread(Thread* thread)
{
auto state = Processor::get_interrupt_state();
Processor::set_interrupt_state(InterruptState::Disabled);
unblock_thread(thread->m_scheduler_node);
Processor::set_interrupt_state(state);
}
Thread& Scheduler::current_thread()

View File

@ -126,26 +126,7 @@ namespace Kernel
{
if (io_read(ATA_PORT_STATUS) & ATA_STATUS_ERR)
dprintln("ATA Error: {}", error());
bool expected { false };
[[maybe_unused]] bool success = m_has_got_irq.compare_exchange(expected, true);
ASSERT(success);
}
BAN::ErrorOr<void> ATABus::block_until_irq()
{
const uint64_t timeout_ms = SystemTimer::get().ms_since_boot() + s_ata_timeout_ms;
bool expected { true };
while (!m_has_got_irq.compare_exchange(expected, false))
{
if (SystemTimer::get().ms_since_boot() >= timeout_ms)
return BAN::Error::from_errno(ETIMEDOUT);
Processor::pause();
expected = true;
}
return {};
m_thread_blocker.unblock();
}
uint8_t ATABus::io_read(uint16_t port)
@ -192,22 +173,30 @@ namespace Kernel
for (uint32_t i = 0; i < 4; i++)
io_read(ATA_PORT_ALT_STATUS);
uint64_t timeout = SystemTimer::get().ms_since_boot() + s_ata_timeout_ms;
const uint64_t start_ms = SystemTimer::get().ms_since_boot();
const uint64_t timeout_ms = start_ms + s_ata_timeout_ms;
uint8_t status;
while ((status = io_read(ATA_PORT_STATUS)) & ATA_STATUS_BSY)
if (SystemTimer::get().ms_since_boot() >= timeout)
return BAN::Error::from_errno(ETIMEDOUT);
while (wait_drq && !(status & ATA_STATUS_DRQ))
for (;;)
{
if (SystemTimer::get().ms_since_boot() >= timeout)
return BAN::Error::from_errno(ETIMEDOUT);
const uint8_t status = io_read(ATA_PORT_ALT_STATUS);
if (status & ATA_STATUS_BSY)
goto drive_not_ready;
if (!wait_drq || (status & ATA_STATUS_DRQ))
break;
if (status & ATA_STATUS_ERR)
return error();
if (status & ATA_STATUS_DF)
return BAN::Error::from_errno(EIO);
status = io_read(ATA_PORT_STATUS);
drive_not_ready:
const uint64_t current_ms = SystemTimer::get().ms_since_boot();
if (current_ms >= timeout_ms)
return BAN::Error::from_errno(ETIMEDOUT);
// NODE: poll for 5 milliseconds, then just block
// until timeout or irq
if (current_ms < start_ms + 5)
continue;
m_thread_blocker.block_with_timeout_ms(timeout_ms - current_ms, nullptr);
}
return {};
@ -249,7 +238,7 @@ namespace Kernel
for (uint32_t sector = 0; sector < sector_count; sector++)
{
TRY(block_until_irq());
TRY(wait(true));
read_buffer(ATA_PORT_DATA, (uint16_t*)buffer.data() + sector * device.words_per_sector(), device.words_per_sector());
}
@ -269,12 +258,12 @@ namespace Kernel
for (uint32_t sector = 0; sector < sector_count; sector++)
{
TRY(wait(true));
write_buffer(ATA_PORT_DATA, (uint16_t*)buffer.data() + sector * device.words_per_sector(), device.words_per_sector());
TRY(block_until_irq());
}
TRY(wait(false));
io_write(ATA_PORT_COMMAND, ATA_COMMAND_CACHE_FLUSH);
TRY(block_until_irq());
return {};
}
@ -310,9 +299,10 @@ namespace Kernel
io_lba2 = (cylinder >> 8) & 0xFF;
}
TRY(wait(false));
io_write(ATA_PORT_DRIVE_SELECT, io_select);
select_delay();
io_write(ATA_PORT_CONTROL, 0);
io_write(ATA_PORT_SECTOR_COUNT, sector_count);
io_write(ATA_PORT_LBA0, io_lba0);

View File

@ -79,9 +79,7 @@ namespace Kernel
for (auto& pixel : m_cursor_data)
pixel = color.rgb;
for (uint32_t y = 0; y < m_framebuffer_device->height(); y++)
for (uint32_t x = 0; x < m_framebuffer_device->width(); x++)
m_framebuffer_device->set_pixel(x, y, color.rgb);
m_framebuffer_device->fill(color.rgb);
m_framebuffer_device->sync_pixels_full();
if (m_cursor_shown)

View File

@ -44,7 +44,8 @@ namespace Kernel
PageTable::kernel(),
KERNEL_OFFSET, static_cast<vaddr_t>(-1),
16 * PAGE_SIZE,
PageTable::Flags::ReadWrite | PageTable::Flags::Present, true
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true, false
));
auto pts_master = TRY(BAN::RefPtr<PseudoTerminalMaster>::create(BAN::move(pts_master_buffer), mode, uid, gid));
DevFileSystem::get().remove_from_cache(pts_master);

View File

@ -161,7 +161,7 @@ namespace Kernel
static bool initialized = false;
ASSERT(!initialized);
auto* thread = MUST(Thread::create_kernel(&TTY::keyboard_task, nullptr, nullptr));
auto* thread = MUST(Thread::create_kernel(&TTY::keyboard_task, nullptr));
MUST(Processor::scheduler().add_thread(thread));
DevFileSystem::get().add_inode("tty", MUST(DevTTY::create(0666, 0, 0)));

View File

@ -69,10 +69,10 @@ namespace Kernel
s_default_sse_storage_initialized = true;
}
BAN::ErrorOr<Thread*> Thread::create_kernel(entry_t entry, void* data, Process* process)
BAN::ErrorOr<Thread*> Thread::create_kernel(entry_t entry, void* data)
{
// Create the thread object
Thread* thread = new Thread(s_next_tid++, process);
Thread* thread = new Thread(s_next_tid++, nullptr);
if (thread == nullptr)
return BAN::Error::from_errno(ENOMEM);
BAN::ScopeGuard thread_deleter([thread] { delete thread; });
@ -84,7 +84,7 @@ namespace Kernel
~(uintptr_t)0,
kernel_stack_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true
true, true
));
// Initialize stack for returning
@ -124,7 +124,7 @@ namespace Kernel
0x200000, USERSPACE_END,
kernel_stack_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true
true, true
));
thread->m_userspace_stack = TRY(VirtualRange::create_to_vaddr_range(
@ -132,7 +132,7 @@ namespace Kernel
0x200000, USERSPACE_END,
userspace_stack_size,
PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true
true, true
));
thread_deleter.disable();
@ -285,6 +285,9 @@ namespace Kernel
// auxv
needed_size += auxv.size() * sizeof(LibELF::AuxiliaryVector);
if (auto rem = needed_size % alignof(char*))
needed_size += alignof(char*) - rem;
if (needed_size > m_userspace_stack->size())
return BAN::Error::from_errno(ENOBUFS);
@ -358,8 +361,6 @@ namespace Kernel
stack_push_str(envp[i]);
}
ASSERT(vaddr == userspace_stack_top());
setup_exec(entry, userspace_stack_top() - needed_size);
return {};

View File

@ -1,4 +1,5 @@
#include <kernel/Processor.h>
#include <kernel/SchedulerQueueNode.h>
#include <kernel/ThreadBlocker.h>
#include <kernel/Timer/Timer.h>
@ -22,71 +23,60 @@ namespace Kernel
void ThreadBlocker::unblock()
{
SchedulerQueue::Node* block_chain;
decltype(m_block_chain) temp_block_chain;
size_t temp_block_chain_length { 0 };
{
SpinLockGuard _(m_lock);
block_chain = m_block_chain;
m_block_chain = nullptr;
for (size_t i = 0; i < m_block_chain_length; i++)
temp_block_chain[i] = m_block_chain[i];
temp_block_chain_length = m_block_chain_length;
m_block_chain_length = 0;
}
for (auto* node = block_chain; node;)
{
ASSERT(node->blocked);
auto* next = node->block_chain_next;
node->blocker = nullptr;
node->block_chain_next = nullptr;
node->block_chain_prev = nullptr;
Processor::scheduler().unblock_thread(node);
node = next;
}
for (size_t i = 0; i < temp_block_chain_length; i++)
Processor::scheduler().unblock_thread(temp_block_chain[i]);
}
void ThreadBlocker::add_thread_to_block_queue(SchedulerQueue::Node* node)
{
ASSERT(node->blocker_lock.current_processor_has_lock());
SpinLockGuard _(m_lock);
ASSERT(m_block_chain_length < sizeof(m_block_chain) / sizeof(m_block_chain[0]));
ASSERT(node);
ASSERT(node->blocked);
ASSERT(node->blocker == nullptr);
ASSERT(node->block_chain_prev == nullptr);
ASSERT(node->block_chain_next == nullptr);
SpinLockGuard _(m_lock);
for (size_t i = 0 ; i < m_block_chain_length; i++)
ASSERT(m_block_chain[i] != node);
m_block_chain[m_block_chain_length++] = node;
node->blocker = this;
node->block_chain_prev = nullptr;
node->block_chain_next = m_block_chain;
if (m_block_chain)
m_block_chain->block_chain_prev = node;
m_block_chain = node;
}
void ThreadBlocker::remove_blocked_thread(SchedulerQueue::Node* node)
{
ASSERT(node->blocker_lock.current_processor_has_lock());
SpinLockGuard _(m_lock);
ASSERT(node);
ASSERT(node->blocked);
ASSERT(node->blocker == this);
if (node == m_block_chain)
for (size_t i = 0 ; i < m_block_chain_length; i++)
{
ASSERT(node->block_chain_prev == nullptr);
m_block_chain = node->block_chain_next;
if (m_block_chain)
m_block_chain->block_chain_prev = nullptr;
}
else
{
ASSERT(node->block_chain_prev);
node->block_chain_prev->block_chain_next = node->block_chain_next;
if (node->block_chain_next)
node->block_chain_next->block_chain_prev = node->block_chain_prev;
if (m_block_chain[i] != node)
continue;
for (size_t j = i + 1; j < m_block_chain_length; j++)
m_block_chain[j - 1] = m_block_chain[j];
m_block_chain_length--;
}
node->blocker = nullptr;
node->block_chain_next = nullptr;
node->block_chain_prev = nullptr;
}
}

View File

@ -215,9 +215,8 @@ namespace Kernel
m_changed_ports |= 1u << port_id;
}
m_port_updater = Process::create_kernel([](void* data) { reinterpret_cast<USBHubDriver*>(data)->port_updater_task(); }, this);
if (m_port_updater == nullptr)
return BAN::Error::from_errno(ENOMEM);
m_port_updater = TRY(Thread::create_kernel([](void* data) { reinterpret_cast<USBHubDriver*>(data)->port_updater_task(); }, this));
TRY(Processor::scheduler().add_thread(m_port_updater));
return {};
}

View File

@ -135,9 +135,8 @@ namespace Kernel
while (operational.usbsts & XHCI::USBSTS::HCHalted)
continue;
m_port_updater = Process::create_kernel([](void* data) { reinterpret_cast<XHCIController*>(data)->port_updater_task(); }, this);
if (m_port_updater == nullptr)
return BAN::Error::from_errno(ENOMEM);
m_port_updater = TRY(Thread::create_kernel([](void* data) { reinterpret_cast<XHCIController*>(data)->port_updater_task(); }, this));
TRY(Processor::scheduler().add_thread(m_port_updater));
return {};
}

View File

@ -199,7 +199,8 @@ extern "C" void kernel_main(uint32_t boot_magic, uint32_t boot_info)
Processor::wait_until_processors_ready();
MUST(Processor::scheduler().initialize());
Process::create_kernel(init2, nullptr);
auto* init_thread = MUST(Thread::create_kernel(init2, nullptr));
MUST(Processor::scheduler().add_thread(init_thread));
Processor::yield();
ASSERT_NOT_REACHED();
@ -258,6 +259,9 @@ static void init2(void*)
VirtualFileSystem::initialize(cmdline.root);
dprintln("VFS initialized");
// FIXME: release memory used by modules. If modules are used
// they are already loaded in here
TTY::initialize_devices();
auto console_path = MUST(BAN::String::formatted("/dev/{}", cmdline.console));

View File

@ -8,4 +8,5 @@ CONFIGURE_OPTIONS=(
'--disable-nls'
'--disable-posix-spawn'
'--enable-year2038'
'CFLAGS=-std=c17'
)

View File

@ -22,6 +22,7 @@ set(CMAKE_SYSTEM_NAME banan-os)
set(CMAKE_SYSTEM_PROCESSOR ${BANAN_ARCH})
set(CMAKE_SYSROOT ${BANAN_SYSROOT})
set(CMAKE_STAGING_PREFIX ${BANAN_SYSROOT}/usr)
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)

View File

@ -908,12 +908,16 @@ void setbuf(FILE* file, char* buffer)
int setvbuf(FILE* file, char* buffer, int type, size_t size)
{
ScopeLock _(file);
if (file->fd == -1)
{
errno = EBADF;
return -1;
}
(void)fflush(file);
if (size == 0)
type = _IONBF;

View File

@ -4,6 +4,7 @@ set(USERSPACE_PROGRAMS
cat
cat-mmap
chmod
chown
cp
dd
dhcp-client
@ -11,6 +12,7 @@ set(USERSPACE_PROGRAMS
DynamicLoader
echo
env
false
getopt
http-server
id
@ -36,6 +38,8 @@ set(USERSPACE_PROGRAMS
sudo
sync
tee
test
true
TaskBar
Terminal
touch

View File

@ -0,0 +1,9 @@
set(SOURCES
main.cpp
)
add_executable(chown ${SOURCES})
banan_link_library(chown ban)
banan_link_library(chown libc)
install(TARGETS chown OPTIONAL)

View File

@ -0,0 +1,109 @@
#include <ctype.h>
#include <errno.h>
#include <grp.h>
#include <pwd.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <unistd.h>
void usage(const char* argv0, int ret)
{
FILE* out = (ret == 0) ? stdout : stderr;
fprintf(out, "usage: %s [OWNER][:[GROUP]] FILE...\n", argv0);
fprintf(out, " Change the owner and/or group of each FILE.\n");
exit(ret);
}
[[noreturn]] void print_error_and_exit(const char* format, ...)
{
va_list args;
va_start(args, format);
vfprintf(stderr, format, args);
va_end(args);
exit(1);
__builtin_unreachable();
}
const passwd* get_user(const char* string)
{
bool is_numeric = true;
for (size_t i = 0; string[i] && is_numeric; i++)
if (!isdigit(string[i]))
is_numeric = false;
if (is_numeric)
return getpwuid(atoll(string));
return getpwnam(string);
}
const group* get_group(const char* string)
{
bool is_numeric = true;
for (size_t i = 0; string[i] && is_numeric; i++)
if (!isdigit(string[i]))
is_numeric = false;
if (is_numeric)
return getgrgid(atoll(string));
return getgrnam(string);
}
int main(int argc, char** argv)
{
if (argc <= 2)
usage(argv[0], 1);
uid_t uid = -1;
gid_t gid = -1;
const char* owner_string = argv[1];
const char* colon = strchr(owner_string, ':');
if (colon == owner_string)
{
const auto* group = get_group(owner_string + 1);
if (group == nullptr)
print_error_and_exit("could not find group %s\n", owner_string + 1);
gid = group->gr_gid;
}
else if (colon == nullptr)
{
const auto* user = get_user(owner_string);
if (user == nullptr)
print_error_and_exit("could not find user %s\n", owner_string);
uid = user->pw_uid;
}
else
{
char* user_name = strndup(owner_string, colon - owner_string);
if (user_name == nullptr)
print_error_and_exit("strndup: %s\n", strerror(errno));
const auto* user = get_user(user_name);
if (user == nullptr)
print_error_and_exit("could not find user %s\n", user_name);
free(user_name);
uid = user->pw_uid;
if (colon[1] == '\0')
gid = user->pw_gid;
else
{
const auto* group = get_group(colon + 1);
if (group == nullptr)
print_error_and_exit("could not find group %s\n", colon + 1);
gid = group->gr_gid;
}
}
int ret = 0;
for (int i = 2; i < argc; i++)
{
if (chown(argv[i], uid, gid) == -1)
{
perror("chown");
ret = 1;
}
}
return ret;
}

View File

@ -0,0 +1,9 @@
set(SOURCES
main.cpp
)
add_executable(false ${SOURCES})
banan_link_library(false ban)
banan_link_library(false libc)
install(TARGETS false OPTIONAL)

View File

@ -0,0 +1,6 @@
#include <stdlib.h>
int main()
{
return EXIT_FAILURE;
}

View File

@ -0,0 +1,9 @@
set(SOURCES
main.cpp
)
add_executable(true ${SOURCES})
banan_link_library(true ban)
banan_link_library(true libc)
install(TARGETS true OPTIONAL)

View File

@ -0,0 +1,6 @@
#include <stdlib.h>
int main()
{
return EXIT_SUCCESS;
}