Compare commits

..

No commits in common. "995dfa145559f4882025843cb1559402a11d1ccb" and "f2eaab6e43a635a3a2bc9f3f26c254570848dcb3" have entirely different histories.

80 changed files with 685 additions and 1504 deletions

View File

@ -31,7 +31,6 @@ set(KERNEL_SOURCES
kernel/FS/ProcFS/Inode.cpp
kernel/FS/TmpFS/FileSystem.cpp
kernel/FS/TmpFS/Inode.cpp
kernel/FS/USTARModule.cpp
kernel/FS/VirtualFileSystem.cpp
kernel/GDT.cpp
kernel/IDT.cpp

View File

@ -206,7 +206,6 @@ namespace Kernel
{
ASSERT(s_kernel);
ASSERT(paddr);
ASSERT(paddr % PAGE_SIZE == 0);
ASSERT(s_fast_page_lock.current_processor_has_lock());

View File

@ -36,12 +36,6 @@ multiboot2_start:
.long 12
.long V2P(_start)
# page align modules
.align 8
.short 6
.short 0
.long 8
.align 8
.short 0
.short 0
@ -59,10 +53,10 @@ bananboot_start:
bananboot_end:
.section .bss, "aw", @nobits
.global g_boot_stack_top
g_boot_stack_bottom:
.align 4096
boot_stack_bottom:
.skip 4096 * 4
g_boot_stack_top:
boot_stack_top:
.global g_kernel_cmdline
g_kernel_cmdline:
@ -194,7 +188,7 @@ _start:
movl %ebx, V2P(bootloader_info)
# load boot stack
movl $V2P(g_boot_stack_top), %esp
movl $V2P(boot_stack_top), %esp
# load boot GDT
lgdt V2P(boot_gdtr)
@ -212,7 +206,7 @@ gdt_flush:
call initialize_paging
# load higher half stack pointer
movl $g_boot_stack_top, %esp
movl $boot_stack_top, %esp
# jump to higher half
leal higher_half, %ecx

View File

@ -503,7 +503,6 @@ namespace Kernel
{
ASSERT(s_kernel);
ASSERT(paddr);
ASSERT(paddr % PAGE_SIZE == 0);
ASSERT(s_fast_page_lock.current_processor_has_lock());

View File

@ -36,12 +36,6 @@ multiboot2_start:
.long 12
.long V2P(_start)
# page align modules
.align 8
.short 6
.short 0
.long 8
.align 8
.short 0
.short 0
@ -59,10 +53,9 @@ bananboot_start:
bananboot_end:
.section .bss, "aw", @nobits
.global g_boot_stack_top
g_boot_stack_bottom:
.skip 4096 * 4
g_boot_stack_top:
boot_stack_bottom:
.skip 4096 * 64
boot_stack_top:
.global g_kernel_cmdline
g_kernel_cmdline:
@ -194,7 +187,7 @@ _start:
movl %eax, V2P(bootloader_magic)
movl %ebx, V2P(bootloader_info)
movl $V2P(g_boot_stack_top), %esp
movl $V2P(boot_stack_top), %esp
call check_requirements
call enable_sse

View File

@ -87,12 +87,6 @@ namespace Kernel::ACPI::AML
struct OpRegion
{
GAS::AddressSpaceID address_space;
uint16_t seg;
uint8_t bus;
uint8_t dev;
uint8_t func;
uint64_t offset;
uint64_t length;
};

View File

@ -41,12 +41,6 @@ namespace Kernel
Type type;
};
struct BootModule
{
paddr_t start;
size_t size;
};
struct BootInfo
{
BAN::String command_line;
@ -54,7 +48,6 @@ namespace Kernel
RSDP rsdp {};
paddr_t kernel_paddr {};
BAN::Vector<BootModule> modules;
BAN::Vector<MemoryMapEntry> memory_map_entries;
};

View File

@ -17,7 +17,6 @@ namespace Kernel
uint32_t get_pixel(uint32_t x, uint32_t y) const;
void set_pixel(uint32_t x, uint32_t y, uint32_t rgb);
void fill(uint32_t rgb);
// positive rows -> empty pixels on bottom
// negative rows -> empty pixels on top

View File

@ -62,6 +62,8 @@ namespace Kernel
virtual BAN::RefPtr<Inode> root_inode() override { return m_root_inode; }
virtual dev_t dev() const override { return m_block_device->rdev(); };
private:
Ext2FS(BAN::RefPtr<BlockDevice> block_device)
: m_block_device(block_device)

View File

@ -46,7 +46,6 @@ namespace Kernel
virtual BAN::ErrorOr<size_t> write_impl(off_t, BAN::ConstByteSpan) override;
virtual BAN::ErrorOr<void> truncate_impl(size_t) override;
virtual BAN::ErrorOr<void> chmod_impl(mode_t) override;
virtual BAN::ErrorOr<void> chown_impl(uid_t, gid_t) override;
virtual BAN::ErrorOr<void> utimens_impl(const timespec[2]) override;
virtual BAN::ErrorOr<void> fsync_impl() override;

View File

@ -37,6 +37,8 @@ namespace Kernel
virtual BAN::RefPtr<Inode> root_inode() override { return m_root_inode; }
virtual dev_t dev() const override { return m_block_device->rdev(); };
BAN::ErrorOr<BAN::RefPtr<FATInode>> open_inode(BAN::RefPtr<FATInode> parent, const FAT::DirectoryEntry& entry, uint32_t cluster_index, uint32_t entry_index);
BAN::ErrorOr<void> inode_read_cluster(BAN::RefPtr<FATInode>, size_t index, BAN::ByteSpan buffer);
blksize_t inode_block_size(BAN::RefPtr<const FATInode>) const;

View File

@ -26,6 +26,8 @@ namespace Kernel
static BAN::ErrorOr<BAN::RefPtr<FileSystem>> from_block_device(BAN::RefPtr<BlockDevice>);
virtual BAN::RefPtr<Inode> root_inode() = 0;
virtual dev_t dev() const = 0;
};
}

View File

@ -58,7 +58,7 @@ namespace Kernel
virtual BAN::RefPtr<Inode> root_inode() override { return m_root_inode; }
dev_t rdev() const { return m_rdev; }
virtual dev_t dev() const override { return m_rdev; }
BAN::ErrorOr<BAN::RefPtr<TmpInode>> open_inode(ino_t ino);
@ -118,8 +118,16 @@ namespace Kernel
private:
InodeLocation find_inode(ino_t ino);
paddr_t find_block(size_t index);
template<TmpFuncs::for_each_indirect_paddr_allocating_callback F>
BAN::ErrorOr<void> for_each_indirect_paddr_allocating(PageInfo page_info, F callback, size_t depth);
template<TmpFuncs::for_each_indirect_paddr_allocating_callback F>
BAN::ErrorOr<BAN::Iteration> for_each_indirect_paddr_allocating_internal(PageInfo page_info, F callback, size_t depth);
paddr_t find_indirect(PageInfo root, size_t index, size_t depth);
private:
const dev_t m_rdev;
@ -138,14 +146,14 @@ namespace Kernel
static constexpr size_t max_data_pages =
(PAGE_SIZE / sizeof(PageInfo)) *
(PAGE_SIZE / sizeof(PageInfo)) *
(PAGE_SIZE / sizeof(PageInfo) - 1);
(PAGE_SIZE / sizeof(PageInfo));
// We store inodes in pages with double indirection.
// With 64-bit pointers we can store 512^2 pages of inodes
// which should be enough for now.
// In future this should be dynamically calculated based on maximum
// number of pages for this file system.
PageInfo m_inode_pages {};
PageInfo m_inode_pages;
static constexpr size_t first_inode = 1;
static constexpr size_t max_inodes =
(PAGE_SIZE / sizeof(PageInfo)) *

View File

@ -48,21 +48,15 @@ namespace Kernel
TmpInode(TmpFileSystem&, ino_t, const TmpInodeInfo&);
virtual BAN::ErrorOr<void> chmod_impl(mode_t) override;
virtual BAN::ErrorOr<void> chown_impl(uid_t, gid_t) override;
virtual BAN::ErrorOr<void> utimens_impl(const timespec[2]) override;
virtual BAN::ErrorOr<void> fsync_impl() override { return {}; }
void sync();
void free_all_blocks();
virtual BAN::ErrorOr<void> prepare_unlink() { return {}; };
void free_all_blocks();
void free_indirect_blocks(size_t block, uint32_t depth);
BAN::Optional<size_t> block_index(size_t data_block_index);
BAN::Optional<size_t> block_index_from_indirect(size_t block, size_t index, uint32_t depth);
BAN::ErrorOr<size_t> block_index_with_allocation(size_t data_block_index);
BAN::ErrorOr<size_t> block_index_from_indirect_with_allocation(size_t& block, size_t index, uint32_t depth);
protected:
TmpFileSystem& m_fs;
@ -123,9 +117,10 @@ namespace Kernel
static BAN::ErrorOr<BAN::RefPtr<TmpSymlinkInode>> create_new(TmpFileSystem&, mode_t, uid_t, gid_t, BAN::StringView target);
~TmpSymlinkInode();
BAN::ErrorOr<void> set_link_target(BAN::StringView);
protected:
BAN::ErrorOr<BAN::String> link_target_impl() override;
BAN::ErrorOr<void> set_link_target_impl(BAN::StringView) override;
virtual BAN::ErrorOr<BAN::String> link_target_impl() override;
virtual bool can_read_impl() const override { return false; }
virtual bool can_write_impl() const override { return false; }

View File

@ -1,12 +0,0 @@
#pragma once
#include <kernel/BootInfo.h>
#include <kernel/FS/FileSystem.h>
namespace Kernel
{
bool is_ustar_boot_module(const BootModule&);
BAN::ErrorOr<void> unpack_boot_module_into_filesystem(BAN::RefPtr<FileSystem>, const BootModule&);
}

View File

@ -29,6 +29,9 @@ namespace Kernel
virtual BAN::RefPtr<Inode> root_inode() override { return m_root_fs->root_inode(); }
// FIXME:
virtual dev_t dev() const override { return 0; }
BAN::ErrorOr<void> mount(const Credentials&, BAN::StringView, BAN::StringView);
BAN::ErrorOr<void> mount(const Credentials&, BAN::RefPtr<FileSystem>, BAN::StringView);

View File

@ -75,7 +75,7 @@ namespace Kernel
private:
IDT() = default;
void register_interrupt_handler(uint8_t index, void (*handler)(), uint8_t ist = 0);
void register_interrupt_handler(uint8_t index, void (*handler)());
void register_syscall_handler(uint8_t index, void (*handler)());
private:

View File

@ -27,9 +27,7 @@ namespace Kernel::Input
private:
PS2Controller() = default;
BAN::ErrorOr<void> initialize_impl(uint8_t scancode_set);
BAN::ErrorOr<void> identify_device(uint8_t, uint8_t scancode_set);
void device_initialize_task(void*);
BAN::ErrorOr<void> initialize_device(uint8_t, uint8_t scancode_set);
BAN::ErrorOr<uint8_t> read_byte();
BAN::ErrorOr<void> send_byte(uint16_t port, uint8_t byte);

View File

@ -22,7 +22,6 @@ namespace Kernel::Input
protected:
PS2Device(PS2Controller&, InputDevice::Type type);
virtual ~PS2Device();
protected:
PS2Controller& m_controller;

View File

@ -15,21 +15,21 @@ namespace Kernel
public:
// Create virtual range to fixed virtual address
static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_to_vaddr(PageTable&, vaddr_t, size_t, PageTable::flags_t flags, bool preallocate_pages, bool add_guard_pages);
static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_to_vaddr(PageTable&, vaddr_t, size_t, PageTable::flags_t flags, bool preallocate_pages);
// Create virtual range to virtual address range
static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_to_vaddr_range(PageTable&, vaddr_t vaddr_start, vaddr_t vaddr_end, size_t, PageTable::flags_t flags, bool preallocate_pages, bool add_guard_pages);
static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_to_vaddr_range(PageTable&, vaddr_t vaddr_start, vaddr_t vaddr_end, size_t, PageTable::flags_t flags, bool preallocate_pages);
~VirtualRange();
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> clone(PageTable&);
vaddr_t vaddr() const { return m_vaddr + (m_has_guard_pages ? PAGE_SIZE : 0); }
size_t size() const { return m_size - (m_has_guard_pages ? 2 * PAGE_SIZE : 0); }
vaddr_t vaddr() const { return m_vaddr; }
size_t size() const { return m_size; }
PageTable::flags_t flags() const { return m_flags; }
paddr_t paddr_of(vaddr_t vaddr) const
{
ASSERT(vaddr % PAGE_SIZE == 0);
const size_t index = (vaddr - this->vaddr()) / PAGE_SIZE;
const size_t index = (vaddr - m_vaddr) / PAGE_SIZE;
ASSERT(index < m_paddrs.size());
const paddr_t paddr = m_paddrs[index];
ASSERT(paddr);
@ -41,13 +41,12 @@ namespace Kernel
BAN::ErrorOr<void> allocate_page_for_demand_paging(vaddr_t address);
private:
VirtualRange(PageTable&, bool preallocated, bool has_guard_pages, vaddr_t, size_t, PageTable::flags_t);
VirtualRange(PageTable&, bool preallocated, vaddr_t, size_t, PageTable::flags_t);
BAN::ErrorOr<void> initialize();
private:
PageTable& m_page_table;
const bool m_preallocated;
const bool m_has_guard_pages;
const vaddr_t m_vaddr;
const size_t m_size;
const PageTable::flags_t m_flags;

View File

@ -4,7 +4,7 @@
#include <BAN/HashMap.h>
#include <BAN/UniqPtr.h>
#include <kernel/Networking/NetworkInterface.h>
#include <kernel/Thread.h>
#include <kernel/Process.h>
#include <kernel/ThreadBlocker.h>
namespace Kernel
@ -56,9 +56,9 @@ namespace Kernel
BAN::HashMap<BAN::IPv4Address, BAN::MACAddress> m_arp_table;
Thread* m_thread { nullptr };
Process* m_process = nullptr;
BAN::CircularQueue<PendingArpPacket, 128> m_pending_packets;
ThreadBlocker m_pending_thread_blocker;
ThreadBlocker m_pending_thread_blocker;
friend class BAN::UniqPtr<ARPTable>;
};

View File

@ -11,7 +11,7 @@
#include <kernel/Networking/NetworkInterface.h>
#include <kernel/Networking/NetworkLayer.h>
#include <kernel/Networking/NetworkSocket.h>
#include <kernel/Thread.h>
#include <kernel/Process.h>
namespace Kernel
{
@ -72,7 +72,7 @@ namespace Kernel
RecursiveSpinLock m_bound_socket_lock;
BAN::UniqPtr<ARPTable> m_arp_table;
Thread* m_thread { nullptr };
Process* m_process { nullptr };
static constexpr size_t pending_packet_buffer_size = 128 * PAGE_SIZE;
BAN::UniqPtr<VirtualRange> m_pending_packet_buffer;

View File

@ -6,7 +6,7 @@
#include <kernel/Memory/VirtualRange.h>
#include <kernel/Networking/NetworkInterface.h>
#include <kernel/Networking/NetworkSocket.h>
#include <kernel/Thread.h>
#include <kernel/Process.h>
#include <kernel/ThreadBlocker.h>
namespace Kernel
@ -162,7 +162,7 @@ namespace Kernel
State m_next_state { State::Closed };
uint8_t m_next_flags { 0 };
Thread* m_thread { nullptr };
Process* m_process { nullptr };
uint64_t m_time_wait_start_ms { 0 };

View File

@ -39,6 +39,8 @@ namespace Kernel
using entry_t = Thread::entry_t;
public:
static Process* create_kernel();
static Process* create_kernel(entry_t, void*);
static BAN::ErrorOr<Process*> create_userspace(const Credentials&, BAN::StringView path, BAN::Span<BAN::StringView> arguments);
~Process();
void cleanup_function(Thread*);
@ -215,6 +217,8 @@ namespace Kernel
size_t proc_cmdline(off_t offset, BAN::ByteSpan) const;
size_t proc_environ(off_t offset, BAN::ByteSpan) const;
bool is_userspace() const { return m_is_userspace; }
// Returns error if page could not be allocated
// Returns true if the page was allocated successfully
// Return false if access was page violation (segfault)
@ -327,6 +331,8 @@ namespace Kernel
BAN::Vector<BAN::String> m_cmdline;
BAN::Vector<BAN::String> m_environ;
bool m_is_userspace { false };
BAN::Vector<ChildExitStatus> m_child_exit_statuses;
ThreadBlocker m_child_exit_blocker;

View File

@ -14,12 +14,33 @@ namespace Kernel
class BaseMutex;
class Thread;
class ThreadBlocker;
struct SchedulerQueueNode;
class SchedulerQueue
{
public:
using Node = SchedulerQueueNode;
struct Node
{
Node(Thread* thread)
: thread(thread)
{}
Thread* const thread;
Node* next { nullptr };
Node* prev { nullptr };
uint64_t wake_time_ns { static_cast<uint64_t>(-1) };
ThreadBlocker* blocker { nullptr };
Node* block_chain_next { nullptr };
Node* block_chain_prev { nullptr };
ProcessorID processor_id { PROCESSOR_NONE };
bool blocked { false };
uint64_t last_start_ns { 0 };
uint64_t time_used_ns { 0 };
};
public:
void add_thread_to_back(Node*);

View File

@ -1,35 +0,0 @@
#pragma once
#include <kernel/ProcessorID.h>
#include <kernel/Lock/SpinLock.h>
namespace Kernel
{
class Thread;
class ThreadBlocker;
struct SchedulerQueueNode
{
SchedulerQueueNode(Thread* thread)
: thread(thread)
{}
Thread* const thread;
SchedulerQueueNode* next { nullptr };
SchedulerQueueNode* prev { nullptr };
uint64_t wake_time_ns { static_cast<uint64_t>(-1) };
SpinLock blocker_lock;
ThreadBlocker* blocker { nullptr };
ProcessorID processor_id { PROCESSOR_NONE };
bool blocked { false };
uint64_t last_start_ns { 0 };
uint64_t time_used_ns { 0 };
};
}

View File

@ -40,6 +40,8 @@ namespace Kernel
void select_device(bool is_secondary);
BAN::ErrorOr<DeviceType> identify(bool is_secondary, BAN::Span<uint16_t> buffer);
BAN::ErrorOr<void> block_until_irq();
uint8_t io_read(uint16_t);
void io_write(uint16_t, uint8_t);
void read_buffer(uint16_t, uint16_t*, size_t);
@ -52,7 +54,7 @@ namespace Kernel
const uint16_t m_ctrl;
Mutex m_mutex;
ThreadBlocker m_thread_blocker;
BAN::Atomic<bool> m_has_got_irq { false };
// Non-owning pointers
BAN::Vector<ATADevice*> m_devices;

View File

@ -36,7 +36,7 @@ namespace Kernel
static constexpr size_t userspace_stack_size { PAGE_SIZE * 128 };
public:
static BAN::ErrorOr<Thread*> create_kernel(entry_t, void*);
static BAN::ErrorOr<Thread*> create_kernel(entry_t, void*, Process*);
static BAN::ErrorOr<Thread*> create_userspace(Process*, PageTable&);
~Thread();

View File

@ -33,9 +33,7 @@ namespace Kernel
private:
SpinLock m_lock;
SchedulerQueue::Node* m_block_chain[32] {};
size_t m_block_chain_length { 0 };
SchedulerQueue::Node* m_block_chain { nullptr };
friend class Scheduler;
};

View File

@ -34,7 +34,7 @@ namespace Kernel
BAN::Atomic<uint32_t> m_changed_ports { 0 };
ThreadBlocker m_changed_port_blocker;
BAN::Atomic<Thread*> m_port_updater { nullptr };
BAN::Atomic<Process*> m_port_updater { nullptr };
struct PortInfo
{

View File

@ -73,7 +73,7 @@ namespace Kernel
Mutex m_mutex;
BAN::Atomic<Thread*> m_port_updater { nullptr };
BAN::Atomic<Process*> m_port_updater { nullptr };
ThreadBlocker m_port_thread_blocker;
BAN::Atomic<bool> m_port_changed { false };

View File

@ -6,7 +6,6 @@
#define MULTIBOOT2_TAG_END 0
#define MULTIBOOT2_TAG_CMDLINE 1
#define MULTIBOOT2_TAG_MODULES 3
#define MULTIBOOT2_TAG_MMAP 6
#define MULTIBOOT2_TAG_FRAMEBUFFER 8
#define MULTIBOOT2_TAG_OLD_RSDP 14
@ -34,13 +33,6 @@ struct multiboot2_cmdline_tag_t : public multiboot2_tag_t
char cmdline[];
} __attribute__((packed));
struct multiboot2_modules_tag_t : public multiboot2_tag_t
{
uint32_t mod_start;
uint32_t mod_end;
uint8_t string[];
} __attribute__((packed));
struct multiboot2_mmap_entry_t
{
uint64_t base_addr;

View File

@ -885,10 +885,7 @@ acpi_release_global_lock:
set_irq(irq);
InterruptController::get().enable_irq(irq);
if (auto thread_or_error = Thread::create_kernel([](void*) { get().acpi_event_task(); }, nullptr); thread_or_error.is_error())
dwarnln("Failed to create ACPI thread, power button will not work: {}", thread_or_error.error());
else if (auto ret = Processor::scheduler().add_thread(thread_or_error.value()); ret.is_error())
dwarnln("Failed to create ACPI thread, power button will not work: {}", ret.error());
Process::create_kernel([](void*) { get().acpi_event_task(); }, nullptr);
}
dprintln("Initialized ACPI interrupts");

View File

@ -1,10 +1,3 @@
// FIXME: Rewrite aml interpreter to not be recursive.
// Not inlining TRYs drops our stack usage a ton...
#pragma GCC push_options
#pragma GCC optimize "no-inline"
#include <BAN/Errors.h>
#pragma GCC pop_options
#include <BAN/Assert.h>
#include <BAN/String.h>
@ -82,12 +75,7 @@ namespace Kernel::ACPI::AML
);
return BAN::Error::from_errno(EINVAL);
}
name.parts[i] =
static_cast<uint32_t>(aml_data[0] << 0) |
static_cast<uint32_t>(aml_data[1] << 8) |
static_cast<uint32_t>(aml_data[2] << 16) |
static_cast<uint32_t>(aml_data[3] << 24);
name.parts[i] = aml_data.as<const uint32_t>();
aml_data = aml_data.slice(4);
}
@ -389,6 +377,9 @@ namespace Kernel::ACPI::AML
return result;
}
// FIXME: WHY TF IS THIS USING OVER 1 KiB of stack
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstack-usage="
static BAN::ErrorOr<Node> parse_logical_op(ParseContext& context)
{
dprintln_if(AML_DUMP_FUNCTION_CALLS, "parse_logical_op");
@ -479,6 +470,7 @@ namespace Kernel::ACPI::AML
return result;
}
#pragma GCC diagnostic pop
static BAN::ErrorOr<Node> parse_index_op(ParseContext& context);
@ -754,6 +746,8 @@ namespace Kernel::ACPI::AML
return {};
}
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstack-usage="
static BAN::ErrorOr<void> perform_store(const Node& source, Reference* target, TargetType target_type)
{
dprintln_if(AML_DUMP_FUNCTION_CALLS, "perform_store");
@ -834,6 +828,7 @@ namespace Kernel::ACPI::AML
return {};
}
#pragma GCC diagnostic pop
static BAN::ErrorOr<void> store_into_target(ParseContext& context, const Node& node)
{
@ -1240,7 +1235,7 @@ namespace Kernel::ACPI::AML
}
ASSERT(object);
return sizeof_impl(object->node);
return TRY(sizeof_impl(object->node));
}
static BAN::ErrorOr<Node> derefof_impl(const Node& source)
@ -1248,7 +1243,7 @@ namespace Kernel::ACPI::AML
switch (source.type)
{
case Node::Type::Reference:
return source.as.reference->node.copy();
return TRY(source.as.reference->node.copy());
case Node::Type::Index:
{
switch (source.as.index.type)
@ -1267,7 +1262,7 @@ namespace Kernel::ACPI::AML
{
ASSERT(source.as.index.index < source.as.index.as.package->num_elements);
TRY(resolve_package_element(source.as.index.as.package->elements[source.as.index.index], true));
return source.as.index.as.package->elements[source.as.index.index].value.node->copy();
return TRY(source.as.index.as.package->elements[source.as.index.index].value.node->copy());
}
default: ASSERT_NOT_REACHED();
}
@ -1549,6 +1544,9 @@ namespace Kernel::ACPI::AML
return result;
}
// FIXME: WHY TF IS THIS USING OVER 1 KiB of stack
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstack-usage="
static BAN::ErrorOr<Node> parse_explicit_conversion(ParseContext& context)
{
dprintln_if(AML_DUMP_FUNCTION_CALLS, "parse_explicit_conversion");
@ -1701,6 +1699,7 @@ namespace Kernel::ACPI::AML
return result;
}
#pragma GCC diagnostic pop
static BAN::ErrorOr<Node> parse_to_string_op(ParseContext& context)
{
@ -2002,6 +2001,7 @@ namespace Kernel::ACPI::AML
return {};
}
static BAN::ErrorOr<Node> parse_wait_op(ParseContext& context)
{
dprintln_if(AML_DUMP_FUNCTION_CALLS, "parse_wait_op");
@ -2617,7 +2617,7 @@ namespace Kernel::ACPI::AML
case Node::Type::Buffer:
case Node::Type::Index:
case Node::Type::Reference:
return node.copy();
return TRY(node.copy());
case Node::Type::BufferField:
dwarnln("TODO: evaluate BufferField");
return BAN::Error::from_errno(ENOTSUP);
@ -2626,7 +2626,7 @@ namespace Kernel::ACPI::AML
case Node::Type::Method:
if (node.as.method.arg_count != 0)
return BAN::Error::from_errno(EFAULT);
return method_call(node_path, node, BAN::Array<Reference*, 7>{});
return TRY(method_call(node_path, node, BAN::Array<Reference*, 7>{}));
}
dwarnln("evaluate {}", node);
@ -2761,6 +2761,9 @@ namespace Kernel::ACPI::AML
return method_call(scope, method, BAN::move(args));
}
// FIXME: WHY TF IS THIS USING OVER 2 KiB of stack
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstack-usage="
BAN::ErrorOr<Node> parse_node(ParseContext& context, bool return_ref)
{
if (context.aml_data.empty())
@ -2775,15 +2778,15 @@ namespace Kernel::ACPI::AML
switch (static_cast<AML::ExtOp>(opcode))
{
case AML::ExtOp::CondRefOfOp:
return parse_condrefof_op(context);
return TRY(parse_condrefof_op(context));
case AML::ExtOp::AcquireOp:
return parse_acquire_op(context);
return TRY(parse_acquire_op(context));
case AML::ExtOp::LoadOp:
return parse_load_op(context);
return TRY(parse_load_op(context));
case AML::ExtOp::TimerOp:
return parse_timer_op(context);
return TRY(parse_timer_op(context));
case AML::ExtOp::WaitOp:
return parse_wait_op(context);
return TRY(parse_wait_op(context));
case AML::ExtOp::DebugOp:
{
context.aml_data = context.aml_data.slice(2);
@ -2809,46 +2812,46 @@ namespace Kernel::ACPI::AML
case AML::Byte::WordPrefix:
case AML::Byte::DWordPrefix:
case AML::Byte::QWordPrefix:
return parse_integer(context.aml_data);
return TRY(parse_integer(context.aml_data));
case AML::Byte::StringPrefix:
return parse_string(context.aml_data);
return TRY(parse_string(context.aml_data));
case AML::Byte::BufferOp:
return parse_buffer_op(context);
return TRY(parse_buffer_op(context));
case AML::Byte::PackageOp:
case AML::Byte::VarPackageOp:
return parse_package_op(context);
return TRY(parse_package_op(context));
case AML::Byte::SizeOfOp:
return parse_sizeof_op(context);
return TRY(parse_sizeof_op(context));
case AML::Byte::RefOfOp:
return parse_refof_op(context);
return TRY(parse_refof_op(context));
case AML::Byte::DerefOfOp:
return parse_derefof_op(context);
return TRY(parse_derefof_op(context));
case AML::Byte::StoreOp:
return parse_store_op(context);
return TRY(parse_store_op(context));
case AML::Byte::CopyObjectOp:
return parse_copy_object_op(context);
return TRY(parse_copy_object_op(context));
case AML::Byte::ConcatOp:
return parse_concat_op(context);
return TRY(parse_concat_op(context));
case AML::Byte::MidOp:
return parse_mid_op(context);
return TRY(parse_mid_op(context));
case AML::Byte::IndexOp:
return parse_index_op(context);
return TRY(parse_index_op(context));
case AML::Byte::ObjectTypeOp:
return parse_object_type_op(context);
return TRY(parse_object_type_op(context));
case AML::Byte::MatchOp:
return parse_match_op(context);
return TRY(parse_match_op(context));
case AML::Byte::ToBufferOp:
case AML::Byte::ToDecimalStringOp:
case AML::Byte::ToHexStringOp:
case AML::Byte::ToIntegerOp:
return parse_explicit_conversion(context);
return TRY(parse_explicit_conversion(context));
case AML::Byte::ToStringOp:
return parse_to_string_op(context);
return TRY(parse_to_string_op(context));
case AML::Byte::IncrementOp:
case AML::Byte::DecrementOp:
return parse_inc_dec_op(context);
return TRY(parse_inc_dec_op(context));
case AML::Byte::NotOp:
return parse_unary_integer_op(context);
return TRY(parse_unary_integer_op(context));
case AML::Byte::AddOp:
case AML::Byte::SubtractOp:
case AML::Byte::MultiplyOp:
@ -2861,17 +2864,17 @@ namespace Kernel::ACPI::AML
case AML::Byte::NorOp:
case AML::Byte::XorOp:
case AML::Byte::ModOp:
return parse_binary_integer_op(context);
return TRY(parse_binary_integer_op(context));
case AML::Byte::LAndOp:
case AML::Byte::LEqualOp:
case AML::Byte::LGreaterOp:
case AML::Byte::LLessOp:
case AML::Byte::LNotOp:
case AML::Byte::LOrOp:
return parse_logical_op(context);
return TRY(parse_logical_op(context));
case AML::Byte::FindSetLeftBitOp:
case AML::Byte::FindSetRightBitOp:
return parse_find_set_bit_op(context);
return TRY(parse_find_set_bit_op(context));
case AML::Byte::Local0:
case AML::Byte::Local1:
case AML::Byte::Local2:
@ -2889,7 +2892,7 @@ namespace Kernel::ACPI::AML
return BAN::Error::from_errno(EINVAL);
}
if (!return_ref)
return context.locals[local_index]->node.copy();
return TRY(context.locals[local_index]->node.copy());
Node reference;
reference.type = Node::Type::Reference;
reference.as.reference = context.locals[local_index];
@ -2912,7 +2915,7 @@ namespace Kernel::ACPI::AML
return BAN::Error::from_errno(EINVAL);
}
if (!return_ref)
return context.args[arg_index]->node.copy();
return TRY(context.args[arg_index]->node.copy());
Node reference;
reference.type = Node::Type::Reference;
reference.as.reference = context.args[arg_index];
@ -2959,11 +2962,11 @@ namespace Kernel::ACPI::AML
}
}
return method_call(BAN::move(object_scope), named_object->node, BAN::move(args), context.call_depth);
return TRY(method_call(BAN::move(object_scope), named_object->node, BAN::move(args), context.call_depth));
}
if (!return_ref)
return named_object->node.copy();
return TRY(named_object->node.copy());
Node reference;
reference.type = Node::Type::Reference;
@ -2971,13 +2974,20 @@ namespace Kernel::ACPI::AML
reference.as.reference->ref_count++;
return reference;
}
#pragma GCC diagnostic pop
// FIXME: WHY TF IS THIS USING ALMOST 2 KiB of stack
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstack-usage="
BAN::ErrorOr<ExecutionFlowResult> parse_node_or_execution_flow(ParseContext& context)
{
if (context.aml_data.empty())
return BAN::Error::from_errno(ENODATA);
BAN::ErrorOr<void> (*function)(ParseContext&) = nullptr;
auto dummy_return = ExecutionFlowResult {
.elem1 = ExecutionFlow::Normal,
.elem2 = BAN::Optional<Node>(),
};
if (context.aml_data[0] == static_cast<uint8_t>(AML::Byte::ExtOpPrefix))
{
@ -2986,130 +2996,116 @@ namespace Kernel::ACPI::AML
switch (static_cast<AML::ExtOp>(context.aml_data[1]))
{
case AML::ExtOp::MutexOp:
function = parse_mutex_op;
break;
TRY(parse_mutex_op(context));
return dummy_return;
case AML::ExtOp::FatalOp:
function = parse_fatal_op;
break;
TRY(parse_fatal_op(context));
return dummy_return;
case AML::ExtOp::EventOp:
function = parse_event_op;
break;
TRY(parse_event_op(context));
return dummy_return;
case AML::ExtOp::ResetOp:
case AML::ExtOp::SignalOp:
function = parse_reset_signal_op;
break;
TRY(parse_reset_signal_op(context));
return dummy_return;
case AML::ExtOp::CreateFieldOp:
function = parse_createfield_op;
break;
TRY(parse_createfield_op(context));
return dummy_return;
case AML::ExtOp::SleepOp:
function = parse_sleep_op;
break;
TRY(parse_sleep_op(context));
return dummy_return;
case AML::ExtOp::StallOp:
function = parse_stall_op;
break;
TRY(parse_stall_op(context));
return dummy_return;
case AML::ExtOp::ReleaseOp:
function = parse_release_op;
break;
TRY(parse_release_op(context));
return dummy_return;
case AML::ExtOp::OpRegionOp:
function = parse_opregion_op;
break;
TRY(parse_opregion_op(context));
return dummy_return;
case AML::ExtOp::FieldOp:
function = parse_field_op;
break;
TRY(parse_field_op(context));
return dummy_return;
case AML::ExtOp::IndexFieldOp:
function = parse_index_field_op;
break;
TRY(parse_index_field_op(context));
return dummy_return;
case AML::ExtOp::BankFieldOp:
function = parse_bank_field_op;
break;
TRY(parse_bank_field_op(context));
return dummy_return;
case AML::ExtOp::DeviceOp:
function = parse_device_op;
break;
TRY(parse_device_op(context));
return dummy_return;
case AML::ExtOp::ProcessorOp:
function = parse_processor_op;
break;
TRY(parse_processor_op(context));
return dummy_return;
case AML::ExtOp::PowerResOp:
function = parse_power_resource_op;
break;
TRY(parse_power_resource_op(context));
return dummy_return;
case AML::ExtOp::ThermalZoneOp:
function = parse_thermal_zone_op;
break;
default:
break;
}
}
else
{
switch (static_cast<AML::Byte>(context.aml_data[0]))
{
case AML::Byte::AliasOp:
function = parse_alias_op;
break;
case AML::Byte::NameOp:
function = parse_name_op;
break;
case AML::Byte::MethodOp:
function = parse_method_op;
break;
case AML::Byte::ScopeOp:
function = parse_scope_op;
break;
case AML::Byte::NotifyOp:
function = parse_notify_op;
break;
case AML::Byte::CreateBitFieldOp:
case AML::Byte::CreateByteFieldOp:
case AML::Byte::CreateWordFieldOp:
case AML::Byte::CreateDWordFieldOp:
case AML::Byte::CreateQWordFieldOp:
function = parse_createfield_op;
break;
case AML::Byte::IfOp:
return parse_if_op(context);
case AML::Byte::WhileOp:
return parse_while_op(context);
case AML::Byte::NoopOp:
case AML::Byte::BreakPointOp:
context.aml_data = context.aml_data.slice(1);
return ExecutionFlowResult {
.elem1 = ExecutionFlow::Normal,
.elem2 = BAN::Optional<Node>(),
};;
case AML::Byte::BreakOp:
dprintln_if(AML_DUMP_FUNCTION_CALLS, "parse_break_op");
context.aml_data = context.aml_data.slice(1);
return ExecutionFlowResult {
.elem1 = ExecutionFlow::Break,
.elem2 = BAN::Optional<Node>(),
};
case AML::Byte::ContinueOp:
dprintln_if(AML_DUMP_FUNCTION_CALLS, "parse_continue_op");
context.aml_data = context.aml_data.slice(1);
return ExecutionFlowResult {
.elem1 = ExecutionFlow::Continue,
.elem2 = BAN::Optional<Node>(),
};
case AML::Byte::ReturnOp:
{
dprintln_if(AML_DUMP_FUNCTION_CALLS, "parse_return_op");
context.aml_data = context.aml_data.slice(1);
return ExecutionFlowResult {
.elem1 = ExecutionFlow::Return,
.elem2 = TRY(parse_node(context)),
};
}
TRY(parse_thermal_zone_op(context));
return dummy_return;
default:
break;
}
}
if (function)
switch (static_cast<AML::Byte>(context.aml_data[0]))
{
TRY(function(context));
return ExecutionFlowResult {
.elem1 = ExecutionFlow::Normal,
.elem2 = BAN::Optional<Node>(),
};;
case AML::Byte::AliasOp:
TRY(parse_alias_op(context));
return dummy_return;
case AML::Byte::NameOp:
TRY(parse_name_op(context));
return dummy_return;
case AML::Byte::MethodOp:
TRY(parse_method_op(context));
return dummy_return;
case AML::Byte::NoopOp:
case AML::Byte::BreakPointOp:
context.aml_data = context.aml_data.slice(1);
return dummy_return;
case AML::Byte::ScopeOp:
TRY(parse_scope_op(context));
return dummy_return;
case AML::Byte::NotifyOp:
TRY(parse_notify_op(context));
return dummy_return;
case AML::Byte::CreateBitFieldOp:
case AML::Byte::CreateByteFieldOp:
case AML::Byte::CreateWordFieldOp:
case AML::Byte::CreateDWordFieldOp:
case AML::Byte::CreateQWordFieldOp:
TRY(parse_createfield_op(context));
return dummy_return;
case AML::Byte::IfOp:
return parse_if_op(context);
case AML::Byte::WhileOp:
return parse_while_op(context);
case AML::Byte::BreakOp:
dprintln_if(AML_DUMP_FUNCTION_CALLS, "parse_break_op");
context.aml_data = context.aml_data.slice(1);
return ExecutionFlowResult {
.elem1 = ExecutionFlow::Break,
.elem2 = BAN::Optional<Node>(),
};
case AML::Byte::ContinueOp:
dprintln_if(AML_DUMP_FUNCTION_CALLS, "parse_continue_op");
context.aml_data = context.aml_data.slice(1);
return ExecutionFlowResult {
.elem1 = ExecutionFlow::Continue,
.elem2 = BAN::Optional<Node>(),
};
case AML::Byte::ReturnOp:
{
dprintln_if(AML_DUMP_FUNCTION_CALLS, "parse_return_op");
context.aml_data = context.aml_data.slice(1);
return ExecutionFlowResult {
.elem1 = ExecutionFlow::Return,
.elem2 = TRY(parse_node(context)),
};
}
default:
break;
}
auto node = TRY(parse_node(context));
@ -3118,6 +3114,7 @@ namespace Kernel::ACPI::AML
.elem2 = BAN::move(node)
};
}
#pragma GCC diagnostic pop
BAN::ErrorOr<NameString> NameString::from_string(BAN::StringView name)
{

View File

@ -1,9 +1,3 @@
// FIXME: Find better ways to manage stack usage
#pragma GCC push_options
#pragma GCC optimize "no-inline"
#include <BAN/Errors.h>
#pragma GCC pop_options
#include <kernel/ACPI/AML/Bytes.h>
#include <kernel/ACPI/AML/Namespace.h>
#include <kernel/ACPI/AML/OpRegion.h>
@ -98,39 +92,6 @@ namespace Kernel::ACPI::AML
opregion.as.opregion.offset = region_offset.as.integer.value;
opregion.as.opregion.length = region_length.as.integer.value;
opregion.as.opregion.seg = 0;
opregion.as.opregion.bus = 0;
opregion.as.opregion.dev = 0;
opregion.as.opregion.func = 0;
if (opregion.as.opregion.address_space == GAS::AddressSpaceID::PCIConfig)
{
// FIXME: Am I actually allowed to read these here or should I determine
// them on every read/write access
if (auto seg_res = TRY(Namespace::root_namespace().find_named_object(context.scope, TRY(AML::NameString::from_string("_SEG"_sv)))); seg_res.node != nullptr)
{
auto seg_node = TRY(convert_node(TRY(evaluate_node(seg_res.path, seg_res.node->node)), ConvInteger, -1));
opregion.as.opregion.seg = seg_node.as.integer.value;
}
if (auto bbn_res = TRY(Namespace::root_namespace().find_named_object(context.scope, TRY(AML::NameString::from_string("_BBN"_sv)))); bbn_res.node != nullptr)
{
auto bbn_node = TRY(convert_node(TRY(evaluate_node(bbn_res.path, bbn_res.node->node)), ConvInteger, -1));
opregion.as.opregion.bus = bbn_node.as.integer.value;
}
auto adr_res = TRY(Namespace::root_namespace().find_named_object(context.scope, TRY(AML::NameString::from_string("_ADR"_sv))));
if (adr_res.node == nullptr)
{
dwarnln("No _ADR for PCIConfig OpRegion");
return BAN::Error::from_errno(EFAULT);
}
auto adr_node = TRY(convert_node(TRY(evaluate_node(adr_res.path, adr_res.node->node)), ConvInteger, -1));
opregion.as.opregion.dev = adr_node.as.integer.value >> 16;
opregion.as.opregion.func = adr_node.as.integer.value & 0xFF;
}
TRY(Namespace::root_namespace().add_named_object(context, region_name, BAN::move(opregion)));
return {};
@ -455,19 +416,19 @@ namespace Kernel::ACPI::AML
ASSERT_NOT_REACHED();
case GAS::AddressSpaceID::PCIConfig:
{
if (opregion.seg != 0)
{
dwarnln("PCIConfig OpRegion with segment");
return BAN::Error::from_errno(ENOTSUP);
}
// https://uefi.org/htmlspecs/ACPI_Spec_6_4_html/05_ACPI_Software_Programming_Model/ACPI_Software_Programming_Model.html#address-space-format
// PCI configuration space is confined to segment 0, bus 0
const uint16_t device = (byte_offset >> 32) & 0xFFFF;
const uint16_t function = (byte_offset >> 16) & 0xFFFF;
const uint16_t offset = byte_offset & 0xFFFF;
switch (access_size)
{
case 1: return PCI::PCIManager::get().read_config_byte (opregion.bus, opregion.dev, opregion.func, byte_offset);
case 2: return PCI::PCIManager::get().read_config_word (opregion.bus, opregion.dev, opregion.func, byte_offset);
case 4: return PCI::PCIManager::get().read_config_dword(opregion.bus, opregion.dev, opregion.func, byte_offset);
case 1: return PCI::PCIManager::get().read_config_byte (0, device, function, offset);
case 2: return PCI::PCIManager::get().read_config_word (0, device, function, offset);
case 4: return PCI::PCIManager::get().read_config_dword(0, device, function, offset);
default:
dwarnln("{} byte read from PCI {2H}:{2H}:{2H} offset {2H}", access_size, opregion.bus, opregion.dev, opregion.func, byte_offset);
dwarnln("{} byte read from PCI {2H}:{2H}:{2H}", device, function, offset);
return BAN::Error::from_errno(EINVAL);
}
ASSERT_NOT_REACHED();
@ -525,19 +486,19 @@ namespace Kernel::ACPI::AML
return {};
case GAS::AddressSpaceID::PCIConfig:
{
if (opregion.seg != 0)
{
dwarnln("PCIConfig OpRegion with segment");
return BAN::Error::from_errno(ENOTSUP);
}
// https://uefi.org/htmlspecs/ACPI_Spec_6_4_html/05_ACPI_Software_Programming_Model/ACPI_Software_Programming_Model.html#address-space-format
// PCI configuration space is confined to segment 0, bus 0
const uint16_t device = (byte_offset >> 32) & 0xFFFF;
const uint16_t function = (byte_offset >> 16) & 0xFFFF;
const uint16_t offset = byte_offset & 0xFFFF;
switch (access_size)
{
case 1: PCI::PCIManager::get().write_config_byte (opregion.bus, opregion.dev, opregion.func, byte_offset, value); break;
case 2: PCI::PCIManager::get().write_config_word (opregion.bus, opregion.dev, opregion.func, byte_offset, value); break;
case 4: PCI::PCIManager::get().write_config_dword(opregion.bus, opregion.dev, opregion.func, byte_offset, value); break;
case 1: PCI::PCIManager::get().write_config_byte (0, device, function, offset, value); break;
case 2: PCI::PCIManager::get().write_config_word (0, device, function, offset, value); break;
case 4: PCI::PCIManager::get().write_config_dword(0, device, function, offset, value); break;
default:
dwarnln("{} byte write to PCI {2H}:{2H}:{2H} offset {2H}", access_size, opregion.bus, opregion.dev, opregion.func, byte_offset);
dwarnln("{} byte write to PCI {2H}:{2H}:{2H}", device, function, offset);
return BAN::Error::from_errno(EINVAL);
}
return {};

View File

@ -540,6 +540,8 @@ namespace Kernel
// this is a hack to allow direct GSI reservation
BAN::ErrorOr<uint8_t> APIC::reserve_gsi(uint32_t gsi)
{
dwarnln("TRYING TO RESERVE GSI {}", gsi);
size_t irq = 0;
for (; irq < 0x100; irq++)
if (m_irq_overrides[irq] == gsi)
@ -551,6 +553,8 @@ namespace Kernel
return BAN::Error::from_errno(ENOTSUP);
}
dwarnln(" matches IRQ {}", irq);
TRY(reserve_irq(irq));
return irq;

View File

@ -26,74 +26,60 @@ namespace Kernel
for (const auto* tag = multiboot2_info.tags; tag->type != MULTIBOOT2_TAG_END; tag = tag->next())
{
switch (tag->type)
if (tag->type == MULTIBOOT2_TAG_CMDLINE)
{
case MULTIBOOT2_TAG_CMDLINE:
{
const auto& command_line_tag = *static_cast<const multiboot2_cmdline_tag_t*>(tag);
MUST(g_boot_info.command_line.append(command_line_tag.cmdline));
break;
}
case MULTIBOOT2_TAG_MODULES:
{
const auto& modules_tag = *static_cast<const multiboot2_modules_tag_t*>(tag);
MUST(g_boot_info.modules.emplace_back(modules_tag.mod_start, modules_tag.mod_end - modules_tag.mod_start));
break;
}
case MULTIBOOT2_TAG_FRAMEBUFFER:
{
const auto& framebuffer_tag = *static_cast<const multiboot2_framebuffer_tag_t*>(tag);
g_boot_info.framebuffer.address = framebuffer_tag.framebuffer_addr;
g_boot_info.framebuffer.pitch = framebuffer_tag.framebuffer_pitch;
g_boot_info.framebuffer.width = framebuffer_tag.framebuffer_width;
g_boot_info.framebuffer.height = framebuffer_tag.framebuffer_height;
g_boot_info.framebuffer.bpp = framebuffer_tag.framebuffer_bpp;
if (framebuffer_tag.framebuffer_type == MULTIBOOT2_FRAMEBUFFER_TYPE_RGB)
g_boot_info.framebuffer.type = FramebufferInfo::Type::RGB;
else if (framebuffer_tag.framebuffer_type == MULTIBOOT2_FRAMEBUFFER_TYPE_TEXT)
g_boot_info.framebuffer.type = FramebufferInfo::Type::Text;
else
g_boot_info.framebuffer.type = FramebufferInfo::Type::Unknown;
break;
}
case MULTIBOOT2_TAG_MMAP:
{
const auto& mmap_tag = *static_cast<const multiboot2_mmap_tag_t*>(tag);
const auto& command_line_tag = *static_cast<const multiboot2_cmdline_tag_t*>(tag);
MUST(g_boot_info.command_line.append(command_line_tag.cmdline));
}
else if (tag->type == MULTIBOOT2_TAG_FRAMEBUFFER)
{
const auto& framebuffer_tag = *static_cast<const multiboot2_framebuffer_tag_t*>(tag);
g_boot_info.framebuffer.address = framebuffer_tag.framebuffer_addr;
g_boot_info.framebuffer.pitch = framebuffer_tag.framebuffer_pitch;
g_boot_info.framebuffer.width = framebuffer_tag.framebuffer_width;
g_boot_info.framebuffer.height = framebuffer_tag.framebuffer_height;
g_boot_info.framebuffer.bpp = framebuffer_tag.framebuffer_bpp;
if (framebuffer_tag.framebuffer_type == MULTIBOOT2_FRAMEBUFFER_TYPE_RGB)
g_boot_info.framebuffer.type = FramebufferInfo::Type::RGB;
else if (framebuffer_tag.framebuffer_type == MULTIBOOT2_FRAMEBUFFER_TYPE_TEXT)
g_boot_info.framebuffer.type = FramebufferInfo::Type::Text;
else
g_boot_info.framebuffer.type = FramebufferInfo::Type::Unknown;
}
else if (tag->type == MULTIBOOT2_TAG_MMAP)
{
const auto& mmap_tag = *static_cast<const multiboot2_mmap_tag_t*>(tag);
const size_t entry_count = (mmap_tag.size - sizeof(multiboot2_mmap_tag_t)) / mmap_tag.entry_size;
const size_t entry_count = (mmap_tag.size - sizeof(multiboot2_mmap_tag_t)) / mmap_tag.entry_size;
MUST(g_boot_info.memory_map_entries.resize(entry_count));
MUST(g_boot_info.memory_map_entries.resize(entry_count));
for (size_t i = 0; i < entry_count; i++)
{
const auto& mmap_entry = *reinterpret_cast<const multiboot2_mmap_entry_t*>(reinterpret_cast<uintptr_t>(tag) + sizeof(multiboot2_mmap_tag_t) + i * mmap_tag.entry_size);
dprintln("entry {16H} {16H} {8H}",
(uint64_t)mmap_entry.base_addr,
(uint64_t)mmap_entry.length,
(uint64_t)mmap_entry.type
);
g_boot_info.memory_map_entries[i].address = mmap_entry.base_addr;
g_boot_info.memory_map_entries[i].length = mmap_entry.length;
g_boot_info.memory_map_entries[i].type = bios_number_to_memory_type(mmap_entry.type);
}
break;
}
case MULTIBOOT2_TAG_OLD_RSDP:
for (size_t i = 0; i < entry_count; i++)
{
if (g_boot_info.rsdp.length == 0)
{
memcpy(&g_boot_info.rsdp, static_cast<const multiboot2_rsdp_tag_t*>(tag)->data, 20);
g_boot_info.rsdp.length = 20;
}
break;
const auto& mmap_entry = *reinterpret_cast<const multiboot2_mmap_entry_t*>(reinterpret_cast<uintptr_t>(tag) + sizeof(multiboot2_mmap_tag_t) + i * mmap_tag.entry_size);
dprintln("entry {16H} {16H} {8H}",
(uint64_t)mmap_entry.base_addr,
(uint64_t)mmap_entry.length,
(uint64_t)mmap_entry.type
);
g_boot_info.memory_map_entries[i].address = mmap_entry.base_addr;
g_boot_info.memory_map_entries[i].length = mmap_entry.length;
g_boot_info.memory_map_entries[i].type = bios_number_to_memory_type(mmap_entry.type);
}
case MULTIBOOT2_TAG_NEW_RSDP:
}
else if (tag->type == MULTIBOOT2_TAG_OLD_RSDP)
{
if (g_boot_info.rsdp.length == 0)
{
const auto& rsdp = *reinterpret_cast<const RSDP*>(static_cast<const multiboot2_rsdp_tag_t*>(tag)->data);
memcpy(&g_boot_info.rsdp, &rsdp, BAN::Math::min<uint32_t>(rsdp.length, sizeof(g_boot_info.rsdp)));
break;
memcpy(&g_boot_info.rsdp, static_cast<const multiboot2_rsdp_tag_t*>(tag)->data, 20);
g_boot_info.rsdp.length = 20;
}
}
else if (tag->type == MULTIBOOT2_TAG_NEW_RSDP)
{
const auto& rsdp = *reinterpret_cast<const RSDP*>(static_cast<const multiboot2_rsdp_tag_t*>(tag)->data);
memcpy(&g_boot_info.rsdp, &rsdp, BAN::Math::min<uint32_t>(rsdp.length, sizeof(g_boot_info.rsdp)));
}
}
g_boot_info.kernel_paddr = 0;

View File

@ -78,7 +78,7 @@ namespace Kernel
KERNEL_OFFSET, UINTPTR_MAX,
BAN::Math::div_round_up<size_t>(m_width * m_height * (BANAN_FB_BPP / 8), PAGE_SIZE) * PAGE_SIZE,
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true, false
true
));
return {};
@ -128,24 +128,22 @@ namespace Kernel
uint32_t FramebufferDevice::get_pixel(uint32_t x, uint32_t y) const
{
ASSERT(x < m_width && y < m_height);
static_assert(BANAN_FB_BPP == 32);
return reinterpret_cast<uint32_t*>(m_video_buffer->vaddr())[y * m_width + x];
const auto* video_buffer_u8 = reinterpret_cast<const uint8_t*>(m_video_buffer->vaddr());
return (video_buffer_u8[(y * m_width + x) * (BANAN_FB_BPP / 8) + 0] << 0)
| (video_buffer_u8[(y * m_width + x) * (BANAN_FB_BPP / 8) + 1] << 8)
| (video_buffer_u8[(y * m_width + x) * (BANAN_FB_BPP / 8) + 2] << 16);
}
void FramebufferDevice::set_pixel(uint32_t x, uint32_t y, uint32_t rgb)
{
if (x >= m_width || y >= m_height)
return;
static_assert(BANAN_FB_BPP == 32);
reinterpret_cast<uint32_t*>(m_video_buffer->vaddr())[y * m_width + x] = rgb;
}
void FramebufferDevice::fill(uint32_t rgb)
{
static_assert(BANAN_FB_BPP == 32);
auto* video_buffer_u32 = reinterpret_cast<uint32_t*>(m_video_buffer->vaddr());
for (uint32_t i = 0; i < m_width * m_height; i++)
video_buffer_u32[i] = rgb;
auto* video_buffer_u8 = reinterpret_cast<uint8_t*>(m_video_buffer->vaddr());
video_buffer_u8[(y * m_width + x) * (BANAN_FB_BPP / 8) + 0] = rgb >> 0;
video_buffer_u8[(y * m_width + x) * (BANAN_FB_BPP / 8) + 1] = rgb >> 8;
video_buffer_u8[(y * m_width + x) * (BANAN_FB_BPP / 8) + 2] = rgb >> 16;
if constexpr(BANAN_FB_BPP == 32)
video_buffer_u8[(y * m_width + x) * (BANAN_FB_BPP / 8) + 3] = rgb >> 24;
}
void FramebufferDevice::scroll(int32_t rows, uint32_t rgb)

View File

@ -45,7 +45,7 @@ namespace Kernel
void DevFileSystem::initialize_device_updater()
{
auto* updater_thread = MUST(Thread::create_kernel(
Process::create_kernel(
[](void* _devfs)
{
auto* devfs = static_cast<DevFileSystem*>(_devfs);
@ -59,39 +59,44 @@ namespace Kernel
SystemTimer::get().sleep_ms(10);
}
}, s_instance
));
MUST(Processor::scheduler().add_thread(updater_thread));
);
auto* disk_sync_thread = MUST(Thread::create_kernel(
auto* sync_process = Process::create_kernel();
sync_process->add_thread(MUST(Thread::create_kernel(
[](void* _devfs)
{
auto* devfs = static_cast<DevFileSystem*>(_devfs);
constexpr uint64_t sync_interval_ms = 10'000;
uint64_t next_sync_ms { sync_interval_ms };
while (true)
{
LockGuard _(devfs->m_device_lock);
while (!devfs->m_should_sync)
{
const uint64_t current_ms = SystemTimer::get().ms_since_boot();
if (devfs->m_should_sync || current_ms >= next_sync_ms)
break;
devfs->m_sync_thread_blocker.block_with_timeout_ms(next_sync_ms - current_ms, &devfs->m_device_lock);
}
devfs->m_sync_thread_blocker.block_indefinite(&devfs->m_device_lock);
for (auto& device : devfs->m_devices)
if (device->is_storage_device())
if (auto ret = static_cast<StorageDevice*>(device.ptr())->sync_disk_cache(); ret.is_error())
dwarnln("disk sync: {}", ret.error());
next_sync_ms = SystemTimer::get().ms_since_boot() + sync_interval_ms;
devfs->m_should_sync = false;
devfs->m_sync_done.unblock();
}
}, s_instance
));
MUST(Processor::scheduler().add_thread(disk_sync_thread));
}, s_instance, sync_process
)));
sync_process->add_thread(MUST(Kernel::Thread::create_kernel(
[](void* _devfs)
{
auto* devfs = static_cast<DevFileSystem*>(_devfs);
while (true)
{
SystemTimer::get().sleep_ms(10'000);
devfs->initiate_sync(false);
}
}, s_instance, sync_process
)));
sync_process->register_to_scheduler();
}
void DevFileSystem::initiate_sync(bool should_block)

View File

@ -289,26 +289,6 @@ namespace Kernel
return {};
}
BAN::ErrorOr<void> Ext2Inode::chown_impl(uid_t uid, gid_t gid)
{
if (m_inode.uid == uid && m_inode.gid == gid)
return {};
const auto old_uid = m_inode.uid;
const auto old_gid = m_inode.gid;
m_inode.uid = uid;
m_inode.gid = gid;
if (auto ret = sync(); ret.is_error())
{
m_inode.uid = old_uid;
m_inode.gid = old_gid;
return ret.release_error();
}
return {};
}
BAN::ErrorOr<void> Ext2Inode::utimens_impl(const timespec times[2])
{
const uint32_t old_times[2] {

View File

@ -2,7 +2,6 @@
#include <kernel/Lock/LockGuard.h>
#include <ctype.h>
#include <sys/statvfs.h>
namespace Kernel
{
@ -74,7 +73,7 @@ namespace Kernel
fsfilcnt_t FATFS::ffree() const { return 0; } // FIXME
fsfilcnt_t FATFS::favail() const { return 0; } // FIXME
unsigned long FATFS::fsid() const { return m_type == Type::FAT32 ? m_bpb.ext_32.volume_id : m_bpb.ext_12_16.volume_id; }
unsigned long FATFS::flag() const { return ST_RDONLY; }
unsigned long FATFS::flag() const { return 0; }
unsigned long FATFS::namemax() const { return 255; }
BAN::ErrorOr<BAN::RefPtr<FATFS>> FATFS::create(BAN::RefPtr<BlockDevice> block_device)

View File

@ -1,11 +1,9 @@
#include <kernel/Epoll.h>
#include <kernel/FS/FileSystem.h>
#include <kernel/FS/Inode.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/FileBackedRegion.h>
#include <fcntl.h>
#include <sys/statvfs.h>
namespace Kernel
{
@ -83,8 +81,6 @@ namespace Kernel
return BAN::Error::from_errno(ENOTDIR);
if (Mode(mode).ifdir())
return BAN::Error::from_errno(EINVAL);
if (auto* fs = filesystem(); fs && (fs->flag() & ST_RDONLY))
return BAN::Error::from_errno(EROFS);
return create_file_impl(name, mode, uid, gid);
}
@ -95,8 +91,6 @@ namespace Kernel
return BAN::Error::from_errno(ENOTDIR);
if (!Mode(mode).ifdir())
return BAN::Error::from_errno(EINVAL);
if (auto* fs = filesystem(); fs && (fs->flag() & ST_RDONLY))
return BAN::Error::from_errno(EROFS);
return create_directory_impl(name, mode, uid, gid);
}
@ -107,8 +101,6 @@ namespace Kernel
return BAN::Error::from_errno(ENOTDIR);
if (inode->mode().ifdir())
return BAN::Error::from_errno(EINVAL);
if (auto* fs = filesystem(); fs && (fs->flag() & ST_RDONLY))
return BAN::Error::from_errno(EROFS);
return link_inode_impl(name, inode);
}
@ -119,8 +111,6 @@ namespace Kernel
return BAN::Error::from_errno(ENOTDIR);
if (name == "."_sv || name == ".."_sv)
return BAN::Error::from_errno(EINVAL);
if (auto* fs = filesystem(); fs && (fs->flag() & ST_RDONLY))
return BAN::Error::from_errno(EROFS);
return unlink_impl(name);
}
@ -137,8 +127,6 @@ namespace Kernel
LockGuard _(m_mutex);
if (!mode().iflnk())
return BAN::Error::from_errno(EINVAL);
if (auto* fs = filesystem(); fs && (fs->flag() & ST_RDONLY))
return BAN::Error::from_errno(EROFS);
return set_link_target_impl(target);
}
@ -219,8 +207,6 @@ namespace Kernel
LockGuard _(m_mutex);
if (mode().ifdir())
return BAN::Error::from_errno(EISDIR);
if (auto* fs = filesystem(); fs && (fs->flag() & ST_RDONLY))
return BAN::Error::from_errno(EROFS);
return write_impl(offset, buffer);
}
@ -229,8 +215,6 @@ namespace Kernel
LockGuard _(m_mutex);
if (mode().ifdir())
return BAN::Error::from_errno(EISDIR);
if (auto* fs = filesystem(); fs && (fs->flag() & ST_RDONLY))
return BAN::Error::from_errno(EROFS);
return truncate_impl(size);
}
@ -238,24 +222,18 @@ namespace Kernel
{
ASSERT((mode & Inode::Mode::TYPE_MASK) == 0);
LockGuard _(m_mutex);
if (auto* fs = filesystem(); fs && (fs->flag() & ST_RDONLY))
return BAN::Error::from_errno(EROFS);
return chmod_impl(mode);
}
BAN::ErrorOr<void> Inode::chown(uid_t uid, gid_t gid)
{
LockGuard _(m_mutex);
if (auto* fs = filesystem(); fs && (fs->flag() & ST_RDONLY))
return BAN::Error::from_errno(EROFS);
return chown_impl(uid, gid);
}
BAN::ErrorOr<void> Inode::utimens(const timespec times[2])
{
LockGuard _(m_mutex);
if (auto* fs = filesystem(); fs && (fs->flag() & ST_RDONLY))
return BAN::Error::from_errno(EROFS);
return utimens_impl(times);
}

View File

@ -1,4 +1,3 @@
#include <BAN/ScopeGuard.h>
#include <kernel/Device/DeviceNumbers.h>
#include <kernel/FS/TmpFS/FileSystem.h>
#include <kernel/Memory/Heap.h>
@ -106,7 +105,7 @@ namespace Kernel
{
LockGuard _(m_mutex);
const auto inode_location = find_inode(ino);
auto inode_location = find_inode(ino);
PageTable::with_fast_page(inode_location.paddr, [&] {
out = PageTable::fast_page_as_sized<TmpInodeInfo>(inode_location.index);
});
@ -116,7 +115,7 @@ namespace Kernel
{
LockGuard _(m_mutex);
const auto inode_location = find_inode(ino);
auto inode_location = find_inode(ino);
PageTable::with_fast_page(inode_location.paddr, [&] {
auto& inode_info = PageTable::fast_page_as_sized<TmpInodeInfo>(inode_location.index);
inode_info = info;
@ -127,7 +126,7 @@ namespace Kernel
{
LockGuard _(m_mutex);
const auto inode_location = find_inode(ino);
auto inode_location = find_inode(ino);
PageTable::with_fast_page(inode_location.paddr, [&] {
auto& inode_info = PageTable::fast_page_as_sized<TmpInodeInfo>(inode_location.index);
ASSERT(inode_info.nlink == 0);
@ -135,7 +134,6 @@ namespace Kernel
ASSERT(paddr == 0);
inode_info = {};
});
ASSERT(!m_inode_cache.contains(ino));
}
@ -143,86 +141,26 @@ namespace Kernel
{
LockGuard _(m_mutex);
constexpr size_t inode_infos_per_page = PAGE_SIZE / sizeof(TmpInodeInfo);
constexpr size_t page_infos_per_page = PAGE_SIZE / sizeof(PageInfo);
constexpr size_t inodes_per_page = PAGE_SIZE / sizeof(TmpInodeInfo);
for (size_t layer0_index = 0; layer0_index < page_infos_per_page; layer0_index++)
{
PageInfo layer0_page;
PageTable::with_fast_page(m_inode_pages.paddr(), [&] {
layer0_page = PageTable::fast_page_as_sized<PageInfo>(layer0_index);
ino_t ino = first_inode;
TRY(for_each_indirect_paddr_allocating(m_inode_pages, [&](paddr_t paddr, bool) {
BAN::Iteration result = BAN::Iteration::Continue;
PageTable::with_fast_page(paddr, [&] {
for (size_t i = 0; i < inodes_per_page; i++, ino++)
{
auto& inode_info = PageTable::fast_page_as_sized<TmpInodeInfo>(i);
if (inode_info.mode != 0)
continue;
inode_info = info;
result = BAN::Iteration::Break;
return;
}
});
return result;
}, 2));
if (!(layer0_page.flags() & PageInfo::Flags::Present))
{
if (m_used_pages >= m_max_pages)
return BAN::Error::from_errno(ENOSPC);
const paddr_t paddr = Heap::get().take_free_page();
if (paddr == 0)
return BAN::Error::from_errno(ENOMEM);
PageTable::with_fast_page(paddr, [&] {
memset(PageTable::fast_page_as_ptr(), 0, PAGE_SIZE);
});
PageTable::with_fast_page(m_inode_pages.paddr(), [&] {
auto& page_info = PageTable::fast_page_as_sized<PageInfo>(layer0_index);
page_info.set_paddr(paddr);
page_info.set_flags(PageInfo::Flags::Present);
layer0_page = page_info;
});
m_used_pages++;
}
for (size_t layer1_index = 0; layer1_index < page_infos_per_page; layer1_index++)
{
PageInfo layer1_page;
PageTable::with_fast_page(layer0_page.paddr(), [&] {
layer1_page = PageTable::fast_page_as_sized<PageInfo>(layer1_index);
});
if (!(layer1_page.flags() & PageInfo::Flags::Present))
{
if (m_used_pages >= m_max_pages)
return BAN::Error::from_errno(ENOSPC);
const paddr_t paddr = Heap::get().take_free_page();
if (paddr == 0)
return BAN::Error::from_errno(ENOMEM);
PageTable::with_fast_page(paddr, [&] {
memset(PageTable::fast_page_as_ptr(), 0, PAGE_SIZE);
});
PageTable::with_fast_page(layer0_page.paddr(), [&] {
auto& page_info = PageTable::fast_page_as_sized<PageInfo>(layer1_index);
page_info.set_paddr(paddr);
page_info.set_flags(PageInfo::Flags::Present);
layer1_page = page_info;
});
m_used_pages++;
}
size_t layer2_index = SIZE_MAX;
PageTable::with_fast_page(layer1_page.paddr(), [&] {
for (size_t i = 0; i < PAGE_SIZE / sizeof(TmpInodeInfo); i++)
{
auto& inode_info = PageTable::fast_page_as_sized<TmpInodeInfo>(i);
if (inode_info.mode != 0)
continue;
inode_info = info;
layer2_index = i;
return;
}
});
if (layer2_index != SIZE_MAX)
{
const size_t layer0_offset = layer0_index * inode_infos_per_page * page_infos_per_page;
const size_t layer1_offset = layer1_index * inode_infos_per_page;
const size_t layer2_offset = layer2_index;
return layer0_offset + layer1_offset + layer2_offset + first_inode;
}
}
}
ASSERT_NOT_REACHED();
return ino;
}
TmpFileSystem::InodeLocation TmpFileSystem::find_inode(ino_t ino)
@ -230,30 +168,16 @@ namespace Kernel
LockGuard _(m_mutex);
ASSERT(ino >= first_inode);
ASSERT(ino - first_inode < max_inodes);
ASSERT(ino < max_inodes);
constexpr size_t inode_infos_per_page = PAGE_SIZE / sizeof(TmpInodeInfo);
constexpr size_t page_infos_per_page = PAGE_SIZE / sizeof(PageInfo);
const size_t layer0_index = (ino - first_inode) / inode_infos_per_page / page_infos_per_page;
const size_t layer1_index = (ino - first_inode) / inode_infos_per_page % page_infos_per_page;
const size_t layer2_index = (ino - first_inode) % inode_infos_per_page;
ASSERT(layer0_index < page_infos_per_page);
constexpr size_t inodes_per_page = PAGE_SIZE / sizeof(TmpInodeInfo);
PageInfo layer0_page;
PageTable::with_fast_page(m_inode_pages.paddr(), [&] {
layer0_page = PageTable::fast_page_as_sized<PageInfo>(layer0_index);
});
ASSERT(layer0_page.flags() & PageInfo::Flags::Present);
PageInfo layer1_page;
PageTable::with_fast_page(layer0_page.paddr(), [&] {
layer1_page = PageTable::fast_page_as_sized<PageInfo>(layer1_index);
});
ASSERT(layer1_page.flags() & PageInfo::Flags::Present);
size_t index_of_page = (ino - first_inode) / inodes_per_page;
size_t index_in_page = (ino - first_inode) % inodes_per_page;
return {
.paddr = layer1_page.paddr(),
.index = layer2_index,
.paddr = find_indirect(m_inode_pages, index_of_page, 2),
.index = index_in_page
};
}
@ -261,185 +185,145 @@ namespace Kernel
{
LockGuard _(m_mutex);
ASSERT(index >= first_data_page);
ASSERT(index - first_data_page < max_data_pages);
constexpr size_t addresses_per_page = PAGE_SIZE / sizeof(PageInfo);
constexpr size_t page_infos_per_page = PAGE_SIZE / sizeof(PageInfo);
const size_t layer0_index = (index - first_data_page) / (page_infos_per_page - 1) / page_infos_per_page;
const size_t layer1_index = (index - first_data_page) / (page_infos_per_page - 1) % page_infos_per_page;
const size_t layer2_index = (index - first_data_page) % (page_infos_per_page - 1);
ASSERT(layer0_index < page_infos_per_page);
const size_t index_of_page = (index - first_data_page) / addresses_per_page;
const size_t index_in_page = (index - first_data_page) % addresses_per_page;
PageInfo layer0_page;
PageTable::with_fast_page(m_data_pages.paddr(), [&] {
layer0_page = PageTable::fast_page_as_sized<PageInfo>(layer0_index);
});
ASSERT(layer0_page.flags() & PageInfo::Flags::Present);
paddr_t page_containing = find_indirect(m_data_pages, index_of_page, 2);
PageInfo layer1_page;
PageTable::with_fast_page(layer0_page.paddr(), [&] {
layer1_page = PageTable::fast_page_as_sized<PageInfo>(layer1_index);
});
ASSERT(layer1_page.flags() & PageInfo::Flags::Present);
paddr_t page_to_free;
PageTable::with_fast_page(layer1_page.paddr(), [&] {
auto& allocated_pages = PageTable::fast_page_as_sized<size_t>(page_infos_per_page - 1);
ASSERT(allocated_pages > 0);
allocated_pages--;
auto& page_info = PageTable::fast_page_as_sized<PageInfo>(layer2_index);
paddr_t paddr_to_free = 0;
PageTable::with_fast_page(page_containing, [&] {
auto& page_info = PageTable::fast_page_as_sized<PageInfo>(index_in_page);
ASSERT(page_info.flags() & PageInfo::Flags::Present);
page_to_free = page_info.paddr();
paddr_to_free = page_info.paddr();
m_used_pages--;
page_info.set_paddr(0);
page_info.set_flags(0);
});
Heap::get().release_page(page_to_free);
}
paddr_t TmpFileSystem::find_block(size_t index)
{
LockGuard _(m_mutex);
ASSERT(index >= first_data_page);
ASSERT(index - first_data_page < max_data_pages);
constexpr size_t page_infos_per_page = PAGE_SIZE / sizeof(PageInfo);
const size_t layer0_index = (index - first_data_page) / (page_infos_per_page - 1) / page_infos_per_page;
const size_t layer1_index = (index - first_data_page) / (page_infos_per_page - 1) % page_infos_per_page;
const size_t layer2_index = (index - first_data_page) % (page_infos_per_page - 1);
ASSERT(layer0_index < page_infos_per_page);
PageInfo layer0_page;
PageTable::with_fast_page(m_data_pages.paddr(), [&] {
layer0_page = PageTable::fast_page_as_sized<PageInfo>(layer0_index);
});
ASSERT(layer0_page.flags() & PageInfo::Flags::Present);
PageInfo layer1_page;
PageTable::with_fast_page(layer0_page.paddr(), [&] {
layer1_page = PageTable::fast_page_as_sized<PageInfo>(layer1_index);
});
ASSERT(layer1_page.flags() & PageInfo::Flags::Present);
PageInfo layer2_page;
PageTable::with_fast_page(layer1_page.paddr(), [&] {
layer2_page = PageTable::fast_page_as_sized<PageInfo>(layer2_index);
});
ASSERT(layer2_page.flags() & PageInfo::Flags::Present);
return layer2_page.paddr();
Heap::get().release_page(paddr_to_free);
}
BAN::ErrorOr<size_t> TmpFileSystem::allocate_block()
{
LockGuard _(m_mutex);
if (m_used_pages >= m_max_pages)
return BAN::Error::from_errno(ENOSPC);
size_t result = first_data_page;
TRY(for_each_indirect_paddr_allocating(m_data_pages, [&] (paddr_t, bool allocated) {
if (allocated)
return BAN::Iteration::Break;
result++;
return BAN::Iteration::Continue;
}, 3));
return result;
}
const paddr_t new_block = Heap::get().take_free_page();
if (new_block == 0)
return BAN::Error::from_errno(ENOMEM);
PageTable::with_fast_page(new_block, [] {
memset(PageTable::fast_page_as_ptr(), 0, PAGE_SIZE);
});
BAN::ScopeGuard block_deleter([new_block] { Heap::get().release_page(new_block); });
paddr_t TmpFileSystem::find_block(size_t index)
{
LockGuard _(m_mutex);
constexpr size_t page_infos_per_page = PAGE_SIZE / sizeof(PageInfo);
ASSERT(index > 0);
return find_indirect(m_data_pages, index - first_data_page, 3);
}
for (size_t layer0_index = 0; layer0_index < PAGE_SIZE / sizeof(PageInfo); layer0_index++)
paddr_t TmpFileSystem::find_indirect(PageInfo root, size_t index, size_t depth)
{
LockGuard _(m_mutex);
ASSERT(root.flags() & PageInfo::Flags::Present);
if (depth == 0)
{
PageInfo layer0_page;
PageTable::with_fast_page(m_data_pages.paddr(), [&] {
layer0_page = PageTable::fast_page_as_sized<PageInfo>(layer0_index);
ASSERT(index == 0);
return root.paddr();
}
constexpr size_t addresses_per_page = PAGE_SIZE / sizeof(PageInfo);
size_t divisor = 1;
for (size_t i = 1; i < depth; i++)
divisor *= addresses_per_page;
size_t index_of_page = index / divisor;
size_t index_in_page = index % divisor;
ASSERT(index_of_page < addresses_per_page);
PageInfo next;
PageTable::with_fast_page(root.paddr(), [&] {
next = PageTable::fast_page_as_sized<PageInfo>(index_of_page);
});
return find_indirect(next, index_in_page, depth - 1);
}
template<TmpFuncs::for_each_indirect_paddr_allocating_callback F>
BAN::ErrorOr<BAN::Iteration> TmpFileSystem::for_each_indirect_paddr_allocating_internal(PageInfo page_info, F callback, size_t depth)
{
LockGuard _(m_mutex);
ASSERT(page_info.flags() & PageInfo::Flags::Present);
if (depth == 0)
{
bool is_new_block = page_info.flags() & PageInfo::Flags::Internal;
return callback(page_info.paddr(), is_new_block);
}
for (size_t i = 0; i < PAGE_SIZE / sizeof(PageInfo); i++)
{
PageInfo next_info;
PageTable::with_fast_page(page_info.paddr(), [&] {
next_info = PageTable::fast_page_as_sized<PageInfo>(i);
});
if (!(layer0_page.flags() & PageInfo::Flags::Present))
if (!(next_info.flags() & PageInfo::Flags::Present))
{
if (m_used_pages + 1 >= m_max_pages)
if (m_used_pages >= m_max_pages)
return BAN::Error::from_errno(ENOSPC);
const paddr_t paddr = Heap::get().take_free_page();
if (paddr == 0)
paddr_t new_paddr = Heap::get().take_free_page();
if (new_paddr == 0)
return BAN::Error::from_errno(ENOMEM);
PageTable::with_fast_page(paddr, [&] {
memset(PageTable::fast_page_as_ptr(), 0, PAGE_SIZE);
});
PageTable::with_fast_page(m_data_pages.paddr(), [&] {
auto& page_info = PageTable::fast_page_as_sized<PageInfo>(layer0_index);
page_info.set_paddr(paddr);
page_info.set_flags(PageInfo::Flags::Present);
layer0_page = page_info;
});
m_used_pages++;
PageTable::with_fast_page(new_paddr, [&] {
memset(PageTable::fast_page_as_ptr(), 0x00, PAGE_SIZE);
});
next_info.set_paddr(new_paddr);
next_info.set_flags(PageInfo::Flags::Present);
PageTable::with_fast_page(page_info.paddr(), [&] {
auto& to_update_info = PageTable::fast_page_as_sized<PageInfo>(i);
to_update_info = next_info;
});
// Don't sync the internal bit to actual memory
next_info.set_flags(PageInfo::Flags::Internal | PageInfo::Flags::Present);
}
for (size_t layer1_index = 0; layer1_index < PAGE_SIZE / sizeof(PageInfo); layer1_index++)
auto result = TRY(for_each_indirect_paddr_allocating_internal(next_info, callback, depth - 1));
switch (result)
{
PageInfo layer1_page;
PageTable::with_fast_page(layer0_page.paddr(), [&] {
layer1_page = PageTable::fast_page_as_sized<PageInfo>(layer1_index);
});
if (!(layer1_page.flags() & PageInfo::Flags::Present))
{
if (m_used_pages + 1 >= m_max_pages)
return BAN::Error::from_errno(ENOSPC);
const paddr_t paddr = Heap::get().take_free_page();
if (paddr == 0)
return BAN::Error::from_errno(ENOMEM);
PageTable::with_fast_page(paddr, [&] {
memset(PageTable::fast_page_as_ptr(), 0, PAGE_SIZE);
});
PageTable::with_fast_page(layer0_page.paddr(), [&] {
auto& page_info = PageTable::fast_page_as_sized<PageInfo>(layer1_index);
page_info.set_paddr(paddr);
page_info.set_flags(PageInfo::Flags::Present);
layer1_page = page_info;
});
m_used_pages++;
}
size_t layer2_index = SIZE_MAX;
PageTable::with_fast_page(layer1_page.paddr(), [&] {
constexpr size_t pages_per_block = page_infos_per_page - 1;
auto& allocated_pages = PageTable::fast_page_as_sized<size_t>(pages_per_block);
if (allocated_pages == pages_per_block)
return;
for (size_t i = 0; i < pages_per_block; i++)
{
auto& page_info = PageTable::fast_page_as_sized<PageInfo>(i);
if (page_info.flags() & PageInfo::Flags::Present)
continue;
page_info.set_paddr(new_block);
page_info.set_flags(PageInfo::Flags::Present);
allocated_pages++;
layer2_index = i;
return;
}
case BAN::Iteration::Continue:
break;
case BAN::Iteration::Break:
return BAN::Iteration::Break;
default:
ASSERT_NOT_REACHED();
});
if (layer2_index != SIZE_MAX)
{
block_deleter.disable();
m_used_pages++;
const size_t layer0_offset = layer0_index * (page_infos_per_page - 1) * page_infos_per_page;
const size_t layer1_offset = layer1_index * (page_infos_per_page - 1);
const size_t layer2_offset = layer2_index;
return layer0_offset + layer1_offset + layer2_offset + first_data_page;
}
}
}
ASSERT_NOT_REACHED();
return BAN::Iteration::Continue;
}
template<TmpFuncs::for_each_indirect_paddr_allocating_callback F>
BAN::ErrorOr<void> TmpFileSystem::for_each_indirect_paddr_allocating(PageInfo page_info, F callback, size_t depth)
{
LockGuard _(m_mutex);
BAN::Iteration result = TRY(for_each_indirect_paddr_allocating_internal(page_info, callback, depth));
ASSERT(result == BAN::Iteration::Break);
return {};
}
}

View File

@ -50,7 +50,7 @@ namespace Kernel
dev_t TmpInode::dev() const
{
return m_fs.rdev();
return m_fs.dev();
}
BAN::ErrorOr<BAN::RefPtr<TmpInode>> TmpInode::create_from_existing(TmpFileSystem& fs, ino_t ino, const TmpInodeInfo& info)
@ -94,19 +94,12 @@ namespace Kernel
BAN::ErrorOr<void> TmpInode::chmod_impl(mode_t new_mode)
{
ASSERT(!(new_mode & Inode::Mode::TYPE_MASK));
m_inode_info.mode &= Inode::Mode::TYPE_MASK;
ASSERT(!(new_mode & Mode::TYPE_MASK));
m_inode_info.mode &= ~Mode::TYPE_MASK;
m_inode_info.mode |= new_mode;
return {};
}
BAN::ErrorOr<void> TmpInode::chown_impl(uid_t new_uid, gid_t new_gid)
{
m_inode_info.uid = new_uid;
m_inode_info.gid = new_gid;
return {};
}
BAN::ErrorOr<void> TmpInode::utimens_impl(const timespec times[2])
{
if (times[0].tv_nsec != UTIME_OMIT)
@ -124,160 +117,36 @@ namespace Kernel
void TmpInode::free_all_blocks()
{
for (size_t i = 0; i < TmpInodeInfo::direct_block_count; i++)
{
if (m_inode_info.block[i])
m_fs.free_block(m_inode_info.block[i]);
if (size_t block = m_inode_info.block[TmpInodeInfo::direct_block_count + 0])
free_indirect_blocks(block, 1);
if (size_t block = m_inode_info.block[TmpInodeInfo::direct_block_count + 1])
free_indirect_blocks(block, 2);
if (size_t block = m_inode_info.block[TmpInodeInfo::direct_block_count + 2])
free_indirect_blocks(block, 3);
for (auto& block : m_inode_info.block)
block = 0;
}
void TmpInode::free_indirect_blocks(size_t block, uint32_t depth)
{
ASSERT(block != 0);
if (depth == 0)
{
m_fs.free_block(block);
return;
m_inode_info.block[i] = 0;
}
const size_t indices_per_block = blksize() / sizeof(size_t);
for (size_t index = 0; index < indices_per_block; index++)
{
size_t next_block;
m_fs.with_block_buffer(block, [&](BAN::ByteSpan block_buffer) {
next_block = block_buffer.as_span<size_t>()[index];
});
if (next_block == 0)
continue;
free_indirect_blocks(next_block, depth - 1);
}
m_fs.free_block(block);
for (auto block : m_inode_info.block)
ASSERT(block == 0);
}
BAN::Optional<size_t> TmpInode::block_index(size_t data_block_index)
{
if (data_block_index < TmpInodeInfo::direct_block_count)
{
if (m_inode_info.block[data_block_index] == 0)
return {};
ASSERT(data_block_index < TmpInodeInfo::direct_block_count);
if (m_inode_info.block[data_block_index])
return m_inode_info.block[data_block_index];
}
data_block_index -= TmpInodeInfo::direct_block_count;
const size_t indices_per_block = blksize() / sizeof(size_t);
if (data_block_index < indices_per_block)
return block_index_from_indirect(m_inode_info.block[TmpInodeInfo::direct_block_count + 0], data_block_index, 1);
data_block_index -= indices_per_block;
if (data_block_index < indices_per_block * indices_per_block)
return block_index_from_indirect(m_inode_info.block[TmpInodeInfo::direct_block_count + 1], data_block_index, 2);
data_block_index -= indices_per_block * indices_per_block;
if (data_block_index < indices_per_block * indices_per_block * indices_per_block)
return block_index_from_indirect(m_inode_info.block[TmpInodeInfo::direct_block_count + 2], data_block_index, 3);
ASSERT_NOT_REACHED();
}
BAN::Optional<size_t> TmpInode::block_index_from_indirect(size_t block, size_t index, uint32_t depth)
{
if (block == 0)
return {};
ASSERT(depth >= 1);
const size_t indices_per_block = blksize() / sizeof(size_t);
size_t divisor = 1;
for (size_t i = 1; i < depth; i++)
divisor *= indices_per_block;
size_t next_block;
m_fs.with_block_buffer(block, [&](BAN::ByteSpan block_buffer) {
next_block = block_buffer.as_span<size_t>()[(index / divisor) % indices_per_block];
});
if (next_block == 0)
return {};
if (depth == 1)
return next_block;
return block_index_from_indirect(next_block, index, depth - 1);
return {};
}
BAN::ErrorOr<size_t> TmpInode::block_index_with_allocation(size_t data_block_index)
{
if (data_block_index < TmpInodeInfo::direct_block_count)
if (data_block_index >= TmpInodeInfo::direct_block_count)
{
if (m_inode_info.block[data_block_index] == 0)
{
m_inode_info.block[data_block_index] = TRY(m_fs.allocate_block());
m_inode_info.blocks++;
}
return m_inode_info.block[data_block_index];
dprintln("only {} blocks supported :D", TmpInodeInfo::direct_block_count);
return BAN::Error::from_errno(ENOSPC);
}
data_block_index -= TmpInodeInfo::direct_block_count;
const size_t indices_per_block = blksize() / sizeof(size_t);
if (data_block_index < indices_per_block)
return block_index_from_indirect_with_allocation(m_inode_info.block[TmpInodeInfo::direct_block_count + 0], data_block_index, 1);
data_block_index -= indices_per_block;
if (data_block_index < indices_per_block * indices_per_block)
return block_index_from_indirect_with_allocation(m_inode_info.block[TmpInodeInfo::direct_block_count + 1], data_block_index, 2);
data_block_index -= indices_per_block * indices_per_block;
if (data_block_index < indices_per_block * indices_per_block * indices_per_block)
return block_index_from_indirect_with_allocation(m_inode_info.block[TmpInodeInfo::direct_block_count + 2], data_block_index, 3);
ASSERT_NOT_REACHED();
}
BAN::ErrorOr<size_t> TmpInode::block_index_from_indirect_with_allocation(size_t& block, size_t index, uint32_t depth)
{
if (block == 0)
if (m_inode_info.block[data_block_index] == 0)
{
block = TRY(m_fs.allocate_block());
m_inode_info.block[data_block_index] = TRY(m_fs.allocate_block());
m_inode_info.blocks++;
}
ASSERT(depth >= 1);
const size_t indices_per_block = blksize() / sizeof(size_t);
size_t divisor = 1;
for (size_t i = 1; i < depth; i++)
divisor *= indices_per_block;
size_t next_block;
m_fs.with_block_buffer(block, [&](BAN::ByteSpan block_buffer) {
next_block = block_buffer.as_span<size_t>()[(index / divisor) % indices_per_block];
});
if (next_block == 0)
{
next_block = TRY(m_fs.allocate_block());
m_inode_info.blocks++;
m_fs.with_block_buffer(block, [&](BAN::ByteSpan block_buffer) {
block_buffer.as_span<size_t>()[(index / divisor) % indices_per_block] = next_block;
});
}
if (depth == 1)
return next_block;
return block_index_from_indirect_with_allocation(next_block, index, depth - 1);
return m_inode_info.block[data_block_index];
}
/* FILE INODE */
@ -365,9 +234,6 @@ namespace Kernel
BAN::ErrorOr<void> TmpFileInode::truncate_impl(size_t new_size)
{
// FIXME: if size is decreased, we should probably free
// unused blocks
m_inode_info.size = new_size;
return {};
}
@ -422,7 +288,7 @@ namespace Kernel
{
}
BAN::ErrorOr<void> TmpSymlinkInode::set_link_target_impl(BAN::StringView new_target)
BAN::ErrorOr<void> TmpSymlinkInode::set_link_target(BAN::StringView new_target)
{
free_all_blocks();
m_inode_info.size = 0;
@ -634,9 +500,6 @@ namespace Kernel
case Mode::IFREG:
new_inode = TRY(TmpFileInode::create_new(m_fs, mode, uid, gid));
break;
case Mode::IFLNK:
new_inode = TRY(TmpSymlinkInode::create_new(m_fs, mode, uid, gid, ""_sv));
break;
case Mode::IFSOCK:
new_inode = TRY(TmpSocketInode::create_new(m_fs, mode, uid, gid));
break;

View File

@ -1,166 +0,0 @@
#include <BAN/ScopeGuard.h>
#include <kernel/FS/USTARModule.h>
#include <tar.h>
namespace Kernel
{
bool is_ustar_boot_module(const BootModule& module)
{
if (module.start % PAGE_SIZE)
{
dprintln("ignoring non-page-aligned module");
return false;
}
if (module.size < 512)
return false;
bool has_ustar_signature;
PageTable::with_fast_page(module.start, [&] {
has_ustar_signature = memcmp(PageTable::fast_page_as_ptr(257), "ustar", 5) == 0;
});
return has_ustar_signature;
}
BAN::ErrorOr<void> unpack_boot_module_into_filesystem(BAN::RefPtr<FileSystem> filesystem, const BootModule& module)
{
ASSERT(is_ustar_boot_module(module));
auto root_inode = filesystem->root_inode();
uint8_t* temp_page = static_cast<uint8_t*>(kmalloc(PAGE_SIZE));
if (temp_page == nullptr)
return BAN::Error::from_errno(ENOMEM);
BAN::ScopeGuard _([temp_page] { kfree(temp_page); });
size_t offset = 0;
while (offset + 512 <= module.size)
{
size_t file_size = 0;
mode_t file_mode = 0;
uid_t file_uid = 0;
gid_t file_gid = 0;
uint8_t file_type = 0;
char file_path[100 + 1 + 155 + 1] {};
PageTable::with_fast_page((module.start + offset) & PAGE_ADDR_MASK, [&] {
const size_t page_off = offset % PAGE_SIZE;
const auto parse_octal =
[page_off](size_t offset, size_t length) -> size_t
{
size_t result = 0;
for (size_t i = 0; i < length; i++)
{
const char ch = PageTable::fast_page_as<char>(page_off + offset + i);
if (ch == '\0')
break;
result = (result * 8) + (ch - '0');
}
return result;
};
if (memcmp(PageTable::fast_page_as_ptr(page_off + 257), "ustar", 5)) {
file_size = SIZE_MAX;
return;
}
memcpy(file_path, PageTable::fast_page_as_ptr(page_off + 345), 155);
const size_t prefix_len = strlen(file_path);
file_path[prefix_len] = '/';
memcpy(file_path + prefix_len + 1, PageTable::fast_page_as_ptr(page_off), 100);
file_mode = parse_octal(100, 8);
file_uid = parse_octal(108, 8);
file_gid = parse_octal(116, 8);
file_size = parse_octal(124, 12);
file_type = PageTable::fast_page_as<char>(page_off + 156);
});
if (file_size == SIZE_MAX)
break;
if (offset + 512 + file_size > module.size)
break;
auto parent_inode = filesystem->root_inode();
auto file_path_parts = TRY(BAN::StringView(file_path).split('/'));
for (size_t i = 0; i < file_path_parts.size() - 1; i++)
parent_inode = TRY(parent_inode->find_inode(file_path_parts[i]));
switch (file_type)
{
case REGTYPE:
case AREGTYPE: file_mode |= Inode::Mode::IFREG; break;
case LNKTYPE: break;
case SYMTYPE: file_mode |= Inode::Mode::IFLNK; break;
case CHRTYPE: file_mode |= Inode::Mode::IFCHR; break;
case BLKTYPE: file_mode |= Inode::Mode::IFBLK; break;
case DIRTYPE: file_mode |= Inode::Mode::IFDIR; break;
case FIFOTYPE: file_mode |= Inode::Mode::IFIFO; break;
default:
ASSERT_NOT_REACHED();
}
auto file_name_sv = file_path_parts.back();
if (file_type == DIRTYPE)
{
TRY(parent_inode->create_directory(file_name_sv, file_mode, file_uid, file_gid));
}
else if (file_type == LNKTYPE)
{
dwarnln("TODO: hardlink");
}
else if (file_type == SYMTYPE)
{
TRY(parent_inode->create_file(file_name_sv, file_mode, file_uid, file_gid));
char link_target[101] {};
const paddr_t paddr = module.start + offset;
PageTable::with_fast_page(paddr & PAGE_ADDR_MASK, [&] {
memcpy(link_target, PageTable::fast_page_as_ptr((paddr % PAGE_SIZE) + 157), 100);
});
if (link_target[0])
{
auto inode = TRY(parent_inode->find_inode(file_name_sv));
TRY(inode->set_link_target(link_target));
}
}
else
{
TRY(parent_inode->create_file(file_name_sv, file_mode, file_uid, file_gid));
if (file_size)
{
auto inode = TRY(parent_inode->find_inode(file_name_sv));
size_t nwritten = 0;
while (nwritten < file_size)
{
const paddr_t paddr = module.start + offset + 512 + nwritten;
PageTable::with_fast_page(paddr & PAGE_ADDR_MASK, [&] {
memcpy(temp_page, PageTable::fast_page_as_ptr(), PAGE_SIZE);
});
const size_t page_off = paddr % PAGE_SIZE;
const size_t to_write = BAN::Math::min(file_size - nwritten, PAGE_SIZE - page_off);
TRY(inode->write(nwritten, { temp_page + page_off, to_write }));
nwritten += to_write;
}
}
}
offset += 512 + file_size;
if (auto rem = offset % 512)
offset += 512 - rem;
}
return {};
}
}

View File

@ -3,7 +3,6 @@
#include <kernel/FS/DevFS/FileSystem.h>
#include <kernel/FS/ProcFS/FileSystem.h>
#include <kernel/FS/TmpFS/FileSystem.h>
#include <kernel/FS/USTARModule.h>
#include <kernel/FS/VirtualFileSystem.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Storage/Partition.h>
@ -52,31 +51,7 @@ namespace Kernel
return BAN::RefPtr<BlockDevice>(static_cast<BlockDevice*>(device_inode.ptr()));
}
static BAN::RefPtr<FileSystem> load_fallback_root_filesystem()
{
if (g_boot_info.modules.empty())
panic("No fallback boot modules given");
auto filesystem_or_error = TmpFileSystem::create(-1, 0755, 0, 0);
if (filesystem_or_error.is_error())
panic("Failed to create fallback filesystem: {}", filesystem_or_error.error());
dwarnln("Attempting to load fallback filesystem from {} modules", g_boot_info.modules.size());
auto filesystem = BAN::RefPtr<FileSystem>::adopt(filesystem_or_error.release_value());
for (const auto& module : g_boot_info.modules)
{
if (!is_ustar_boot_module(module))
continue;
if (auto ret = unpack_boot_module_into_filesystem(filesystem, module); ret.is_error())
dwarnln("Failed to unpack boot module: {}", ret.error());
}
return filesystem;
}
static BAN::RefPtr<FileSystem> load_root_filesystem(BAN::StringView root_path)
static BAN::RefPtr<BlockDevice> find_root_device(BAN::StringView root_path)
{
enum class RootType
{
@ -91,26 +66,19 @@ namespace Kernel
{
entry = root_path.substring(9);
if (entry.size() != 36)
{
derrorln("Invalid UUID '{}'", entry);
return load_fallback_root_filesystem();
}
panic("Invalid UUID '{}'", entry);
type = RootType::PartitionUUID;
}
else if (root_path.starts_with("/dev/"_sv))
{
entry = root_path.substring(5);
if (entry.empty() || entry.contains('/'))
{
derrorln("Invalid root path '{}'", root_path);
return load_fallback_root_filesystem();
}
panic("Invalid root path '{}'", root_path);
type = RootType::BlockDeviceName;
}
else
{
derrorln("Unsupported root path format '{}'", root_path);
return load_fallback_root_filesystem();
panic("Unsupported root path format '{}'", root_path);
}
constexpr size_t timeout_ms = 10'000;
@ -131,30 +99,15 @@ namespace Kernel
}
if (!ret.is_error())
{
auto filesystem_or_error = FileSystem::from_block_device(ret.release_value());
if (filesystem_or_error.is_error())
{
derrorln("Could not create filesystem from '{}': {}", root_path, filesystem_or_error.error());
return load_fallback_root_filesystem();
}
return filesystem_or_error.release_value();;
}
return ret.release_value();
if (ret.error().get_error_code() != ENOENT)
{
derrorln("Could not open root device '{}': {}", root_path, ret.error());
return load_fallback_root_filesystem();
}
if (i == 4)
dwarnln("Could not find specified root device, waiting for it to get loaded...");
panic("could not open root device '{}': {}", root_path, ret.error());
SystemTimer::get().sleep_ms(sleep_ms);
}
derrorln("Could not find root device '{}' after {} ms", root_path, timeout_ms);
return load_fallback_root_filesystem();
panic("could not find root device '{}' after {} ms", root_path, timeout_ms);
}
void VirtualFileSystem::initialize(BAN::StringView root_path)
@ -162,9 +115,13 @@ namespace Kernel
ASSERT(!s_instance);
s_instance = MUST(BAN::RefPtr<VirtualFileSystem>::create());
s_instance->m_root_fs = load_root_filesystem(root_path);
if (!s_instance->m_root_fs)
panic("Could not load root filesystem");
auto root_device = find_root_device(root_path);
ASSERT(root_device);
auto filesystem_result = FileSystem::from_block_device(root_device);
if (filesystem_result.is_error())
panic("Could not create filesystem from '{}': {}", root_path, filesystem_result.error());
s_instance->m_root_fs = filesystem_result.release_value();
Credentials root_creds { 0, 0, 0, 0 };
MUST(s_instance->mount(root_creds, &DevFileSystem::get(), "/dev"_sv));

View File

@ -1,11 +1,8 @@
#include <kernel/GDT.h>
#include <kernel/Memory/Types.h>
#include <kernel/Processor.h>
#include <string.h>
extern "C" uint8_t g_boot_stack_top[];
namespace Kernel
{
@ -65,7 +62,6 @@ namespace Kernel
{
memset(&m_tss, 0x00, sizeof(TaskStateSegment));
m_tss.iopb = sizeof(TaskStateSegment);
m_tss.ist1 = reinterpret_cast<vaddr_t>(g_boot_stack_top);
uintptr_t base = reinterpret_cast<uintptr_t>(&m_tss);

View File

@ -247,7 +247,7 @@ namespace Kernel
);
}
if (Thread::current().has_process())
if (Thread::current().has_process() && Process::current().is_userspace())
process_name = Process::current().name();
#if ARCH(x86_64)
@ -401,7 +401,7 @@ namespace Kernel
Thread::current().load_sse();
}
void IDT::register_interrupt_handler(uint8_t index, void (*handler)(), uint8_t ist)
void IDT::register_interrupt_handler(uint8_t index, void (*handler)())
{
auto& desc = m_idt[index];
memset(&desc, 0, sizeof(GateDescriptor));
@ -412,7 +412,6 @@ namespace Kernel
desc.offset2 = (uint32_t)((uintptr_t)handler >> 32);
#endif
desc.IST = ist;
desc.selector = 0x08;
desc.flags = 0x8E;
}
@ -454,9 +453,6 @@ namespace Kernel
ISR_LIST_X
#undef X
idt->register_interrupt_handler(DoubleFault, isr8, 1);
static_assert(DoubleFault == 8);
#define X(num) idt->register_interrupt_handler(IRQ_VECTOR_BASE + num, irq ## num);
IRQ_LIST_X
#undef X

View File

@ -12,7 +12,7 @@
namespace Kernel::Input
{
static constexpr uint64_t s_ps2_timeout_ms = 300;
static constexpr uint64_t s_ps2_timeout_ms = 100;
static PS2Controller* s_instance = nullptr;
@ -238,15 +238,6 @@ namespace Kernel::Input
return *s_instance;
}
struct PS2DeviceInitInfo
{
PS2Controller* controller;
bool valid_ports[2];
uint8_t scancode_set;
uint8_t config;
BAN::Atomic<bool> thread_started;
};
BAN::ErrorOr<void> PS2Controller::initialize_impl(uint8_t scancode_set)
{
constexpr size_t iapc_flag_off = offsetof(ACPI::FADT, iapc_boot_arch);
@ -324,54 +315,6 @@ namespace Kernel::Input
if (!valid_ports[0] && !valid_ports[1])
return {};
// Reserve IRQs
if (valid_ports[0] && InterruptController::get().reserve_irq(PS2::IRQ::DEVICE0).is_error())
{
dwarnln("Could not reserve irq for PS/2 port 1");
valid_ports[0] = false;
}
if (valid_ports[1] && InterruptController::get().reserve_irq(PS2::IRQ::DEVICE1).is_error())
{
dwarnln("Could not reserve irq for PS/2 port 2");
valid_ports[1] = false;
}
PS2DeviceInitInfo info {
.controller = this,
.valid_ports = { valid_ports[0], valid_ports[1] },
.scancode_set = scancode_set,
.config = config,
.thread_started { false },
};
auto* init_thread = TRY(Thread::create_kernel(
[](void* info) {
static_cast<PS2DeviceInitInfo*>(info)->controller->device_initialize_task(info);
}, &info
));
TRY(Processor::scheduler().add_thread(init_thread));
while (!info.thread_started)
Processor::pause();
return {};
}
void PS2Controller::device_initialize_task(void* _info)
{
bool valid_ports[2];
uint8_t scancode_set;
uint8_t config;
{
auto& info = *static_cast<PS2DeviceInitInfo*>(_info);
valid_ports[0] = info.valid_ports[0];
valid_ports[1] = info.valid_ports[1];
scancode_set = info.scancode_set;
config = info.config;
info.thread_started = true;
}
// Initialize devices
for (uint8_t device = 0; device < 2; device++)
{
@ -382,7 +325,7 @@ namespace Kernel::Input
dwarnln_if(DEBUG_PS2, "PS/2 device enable failed: {}", ret.error());
continue;
}
if (auto res = identify_device(device, scancode_set); res.is_error())
if (auto res = initialize_device(device, scancode_set); res.is_error())
{
dwarnln_if(DEBUG_PS2, "PS/2 device initialization failed: {}", res.error());
(void)send_command(device == 0 ? PS2::Command::DISABLE_FIRST_PORT : PS2::Command::DISABLE_SECOND_PORT);
@ -390,8 +333,20 @@ namespace Kernel::Input
}
}
// Reserve IRQs
if (m_devices[0] && InterruptController::get().reserve_irq(PS2::IRQ::DEVICE0).is_error())
{
dwarnln("Could not reserve irq for PS/2 port 1");
m_devices[0].clear();
}
if (m_devices[1] && InterruptController::get().reserve_irq(PS2::IRQ::DEVICE1).is_error())
{
dwarnln("Could not reserve irq for PS/2 port 2");
m_devices[1].clear();
}
if (!m_devices[0] && !m_devices[1])
return;
return {};
// Enable irqs on valid devices
if (m_devices[0])
@ -407,21 +362,21 @@ namespace Kernel::Input
config |= PS2::Config::INTERRUPT_SECOND_PORT;
}
if (auto ret = send_command(PS2::Command::WRITE_CONFIG, config); ret.is_error())
{
dwarnln("PS2 failed to enable interrupts: {}", ret.error());
m_devices[0].clear();
m_devices[1].clear();
return;
}
TRY(send_command(PS2::Command::WRITE_CONFIG, config));
// Send device initialization sequence after interrupts are enabled
for (uint8_t i = 0; i < 2; i++)
if (m_devices[i])
m_devices[i]->send_initialize();
{
if (!m_devices[i])
continue;
m_devices[i]->send_initialize();
DevFileSystem::get().add_device(m_devices[i]);
}
return {};
}
BAN::ErrorOr<void> PS2Controller::identify_device(uint8_t device, uint8_t scancode_set)
BAN::ErrorOr<void> PS2Controller::initialize_device(uint8_t device, uint8_t scancode_set)
{
// Reset device
TRY(device_send_byte_and_wait_ack(device, PS2::DeviceCommand::RESET));

View File

@ -9,14 +9,7 @@ namespace Kernel::Input
PS2Device::PS2Device(PS2Controller& controller, InputDevice::Type type)
: InputDevice(type)
, m_controller(controller)
{
DevFileSystem::get().add_device(this);
}
PS2Device::~PS2Device()
{
DevFileSystem::get().remove_device(this);
}
{ }
bool PS2Device::append_command_queue(uint8_t command, uint8_t response_size)
{

View File

@ -58,13 +58,9 @@ namespace Kernel
if (entry.type != MemoryMapEntry::Type::Available)
continue;
// FIXME: only reserve kernel area and modules, not everything from 0 -> kernel end
paddr_t start = entry.address;
if (start < (vaddr_t)g_kernel_end - KERNEL_OFFSET + g_boot_info.kernel_paddr)
start = (vaddr_t)g_kernel_end - KERNEL_OFFSET + g_boot_info.kernel_paddr;
for (const auto& module : g_boot_info.modules)
if (start < module.start + module.size)
start = module.start + module.size;
if (auto rem = start % PAGE_SIZE)
start += PAGE_SIZE - rem;

View File

@ -4,30 +4,21 @@
namespace Kernel
{
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_to_vaddr(PageTable& page_table, vaddr_t vaddr, size_t size, PageTable::flags_t flags, bool preallocate_pages, bool add_guard_pages)
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_to_vaddr(PageTable& page_table, vaddr_t vaddr, size_t size, PageTable::flags_t flags, bool preallocate_pages)
{
ASSERT(size % PAGE_SIZE == 0);
ASSERT(vaddr % PAGE_SIZE == 0);
ASSERT(vaddr > 0);
if (add_guard_pages)
{
vaddr -= PAGE_SIZE;
size += 2 * PAGE_SIZE;
}
auto result = TRY(BAN::UniqPtr<VirtualRange>::create(page_table, preallocate_pages, add_guard_pages, vaddr, size, flags));
auto result = TRY(BAN::UniqPtr<VirtualRange>::create(page_table, preallocate_pages, vaddr, size, flags));
ASSERT(page_table.reserve_range(vaddr, size));
TRY(result->initialize());
return result;
}
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_to_vaddr_range(PageTable& page_table, vaddr_t vaddr_start, vaddr_t vaddr_end, size_t size, PageTable::flags_t flags, bool preallocate_pages, bool add_guard_pages)
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_to_vaddr_range(PageTable& page_table, vaddr_t vaddr_start, vaddr_t vaddr_end, size_t size, PageTable::flags_t flags, bool preallocate_pages)
{
if (add_guard_pages)
size += 2 * PAGE_SIZE;
ASSERT(size % PAGE_SIZE == 0);
ASSERT(vaddr_start > 0);
ASSERT(vaddr_start + size <= vaddr_end);
@ -40,13 +31,13 @@ namespace Kernel
ASSERT(vaddr_start < vaddr_end);
ASSERT(vaddr_end - vaddr_start + 1 >= size / PAGE_SIZE);
const vaddr_t vaddr = page_table.reserve_free_contiguous_pages(size / PAGE_SIZE, vaddr_start, vaddr_end);
vaddr_t vaddr = page_table.reserve_free_contiguous_pages(size / PAGE_SIZE, vaddr_start, vaddr_end);
if (vaddr == 0)
return BAN::Error::from_errno(ENOMEM);
ASSERT(vaddr >= vaddr_start);
ASSERT(vaddr + size <= vaddr_end);
auto result_or_error = BAN::UniqPtr<VirtualRange>::create(page_table, preallocate_pages, add_guard_pages, vaddr, size, flags);
auto result_or_error = BAN::UniqPtr<VirtualRange>::create(page_table, preallocate_pages, vaddr, size, flags);
if (result_or_error.is_error())
{
page_table.unmap_range(vaddr, size);
@ -59,10 +50,9 @@ namespace Kernel
return result;
}
VirtualRange::VirtualRange(PageTable& page_table, bool preallocated, bool has_guard_pages, vaddr_t vaddr, size_t size, PageTable::flags_t flags)
VirtualRange::VirtualRange(PageTable& page_table, bool preallocated, vaddr_t vaddr, size_t size, PageTable::flags_t flags)
: m_page_table(page_table)
, m_preallocated(preallocated)
, m_has_guard_pages(has_guard_pages)
, m_vaddr(vaddr)
, m_size(size)
, m_flags(flags)
@ -80,26 +70,26 @@ namespace Kernel
BAN::ErrorOr<void> VirtualRange::initialize()
{
TRY(m_paddrs.resize(size() / PAGE_SIZE, 0));
TRY(m_paddrs.resize(m_size / PAGE_SIZE, 0));
if (!m_preallocated)
return {};
const size_t page_count = size() / PAGE_SIZE;
const size_t page_count = m_size / PAGE_SIZE;
for (size_t i = 0; i < page_count; i++)
{
m_paddrs[i] = Heap::get().take_free_page();
if (m_paddrs[i] == 0)
return BAN::Error::from_errno(ENOMEM);
m_page_table.map_page_at(m_paddrs[i], vaddr() + i * PAGE_SIZE, m_flags);
m_page_table.map_page_at(m_paddrs[i], m_vaddr + i * PAGE_SIZE, m_flags);
}
if (&PageTable::current() == &m_page_table || &PageTable::kernel() == &m_page_table)
memset(reinterpret_cast<void*>(vaddr()), 0, size());
memset(reinterpret_cast<void*>(m_vaddr), 0, m_size);
else
{
const size_t page_count = size() / PAGE_SIZE;
for (size_t i = m_has_guard_pages; i < page_count; i++)
const size_t page_count = m_size / PAGE_SIZE;
for (size_t i = 0; i < page_count; i++)
{
PageTable::with_fast_page(m_paddrs[i], [&] {
memset(PageTable::fast_page_as_ptr(), 0, PAGE_SIZE);
@ -117,10 +107,10 @@ namespace Kernel
SpinLockGuard _(m_lock);
auto result = TRY(create_to_vaddr(page_table, vaddr(), size(), m_flags, m_preallocated, m_has_guard_pages));
auto result = TRY(create_to_vaddr(page_table, m_vaddr, m_size, m_flags, m_preallocated));
const size_t page_count = size() / PAGE_SIZE;
for (size_t i = m_has_guard_pages; i < page_count; i++)
const size_t page_count = m_size / PAGE_SIZE;
for (size_t i = 0; i < page_count; i++)
{
if (m_paddrs[i] == 0)
continue;
@ -129,11 +119,11 @@ namespace Kernel
result->m_paddrs[i] = Heap::get().take_free_page();
if (result->m_paddrs[i] == 0)
return BAN::Error::from_errno(ENOMEM);
result->m_page_table.map_page_at(result->m_paddrs[i], vaddr() + i * PAGE_SIZE, m_flags);
result->m_page_table.map_page_at(result->m_paddrs[i], m_vaddr + i * PAGE_SIZE, m_flags);
}
PageTable::with_fast_page(result->m_paddrs[i], [&] {
memcpy(PageTable::fast_page_as_ptr(), reinterpret_cast<void*>(vaddr() + i * PAGE_SIZE), PAGE_SIZE);
memcpy(PageTable::fast_page_as_ptr(), reinterpret_cast<void*>(m_vaddr + i * PAGE_SIZE), PAGE_SIZE);
});
}
@ -147,7 +137,7 @@ namespace Kernel
ASSERT(contains(vaddr));
ASSERT(&PageTable::current() == &m_page_table);
const size_t index = (vaddr - this->vaddr()) / PAGE_SIZE;
const size_t index = (vaddr - m_vaddr) / PAGE_SIZE;
ASSERT(m_paddrs[index] == 0);
SpinLockGuard _(m_lock);

View File

@ -18,14 +18,14 @@ namespace Kernel
BAN::ErrorOr<BAN::UniqPtr<ARPTable>> ARPTable::create()
{
auto arp_table = TRY(BAN::UniqPtr<ARPTable>::create());
arp_table->m_thread = TRY(Thread::create_kernel(
arp_table->m_process = Process::create_kernel(
[](void* arp_table_ptr)
{
auto& arp_table = *reinterpret_cast<ARPTable*>(arp_table_ptr);
arp_table.packet_handle_task();
}, arp_table.ptr()
));
TRY(Processor::scheduler().add_thread(arp_table->m_thread));
);
ASSERT(arp_table->m_process);
return arp_table;
}
@ -35,9 +35,9 @@ namespace Kernel
ARPTable::~ARPTable()
{
if (m_thread)
m_thread->add_signal(SIGKILL);
m_thread = nullptr;
if (m_process)
m_process->exit(0, SIGKILL);
m_process = nullptr;
}
BAN::ErrorOr<BAN::MACAddress> ARPTable::get_mac_from_ipv4(NetworkInterface& interface, BAN::IPv4Address ipv4_address)

View File

@ -21,21 +21,21 @@ namespace Kernel
BAN::ErrorOr<BAN::UniqPtr<IPv4Layer>> IPv4Layer::create()
{
auto ipv4_manager = TRY(BAN::UniqPtr<IPv4Layer>::create());
ipv4_manager->m_thread = TRY(Thread::create_kernel(
ipv4_manager->m_process = Process::create_kernel(
[](void* ipv4_manager_ptr)
{
auto& ipv4_manager = *reinterpret_cast<IPv4Layer*>(ipv4_manager_ptr);
ipv4_manager.packet_handle_task();
}, ipv4_manager.ptr()
));
TRY(Processor::scheduler().add_thread(ipv4_manager->m_thread));
);
ASSERT(ipv4_manager->m_process);
ipv4_manager->m_pending_packet_buffer = TRY(VirtualRange::create_to_vaddr_range(
PageTable::kernel(),
KERNEL_OFFSET,
~(uintptr_t)0,
pending_packet_buffer_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true, false
true
));
ipv4_manager->m_arp_table = TRY(ARPTable::create());
return ipv4_manager;
@ -46,9 +46,9 @@ namespace Kernel
IPv4Layer::~IPv4Layer()
{
if (m_thread)
m_thread->add_signal(SIGKILL);
m_thread = nullptr;
if (m_process)
m_process->exit(0, SIGKILL);
m_process = nullptr;
}
void IPv4Layer::add_ipv4_header(BAN::ByteSpan packet, BAN::IPv4Address src_ipv4, BAN::IPv4Address dst_ipv4, uint8_t protocol) const

View File

@ -16,7 +16,7 @@ namespace Kernel
BAN::numeric_limits<vaddr_t>::max(),
buffer_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true, false
true
));
loopback->set_ipv4_address({ 127, 0, 0, 1 });
loopback->set_netmask({ 255, 0, 0, 0 });

View File

@ -1,7 +1,6 @@
#include <kernel/Lock/LockGuard.h>
#include <kernel/Networking/NetworkManager.h>
#include <kernel/Networking/TCPSocket.h>
#include <kernel/Process.h>
#include <kernel/Random.h>
#include <kernel/Timer/Timer.h>
@ -32,7 +31,7 @@ namespace Kernel
~(vaddr_t)0,
s_recv_window_buffer_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true, false
true
));
socket->m_recv_window.scale_shift = PAGE_SIZE_SHIFT; // use PAGE_SIZE windows
socket->m_send_window.buffer = TRY(VirtualRange::create_to_vaddr_range(
@ -41,15 +40,14 @@ namespace Kernel
~(vaddr_t)0,
s_send_window_buffer_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true, false
true
));
socket->m_thread = TRY(Thread::create_kernel(
socket->m_process = Process::create_kernel(
[](void* socket_ptr)
{
reinterpret_cast<TCPSocket*>(socket_ptr)->process_task();
}, socket.ptr()
));
TRY(Processor::scheduler().add_thread(socket->m_thread));
);
// hack to keep socket alive until its process starts
socket->ref();
return socket;
@ -65,7 +63,7 @@ namespace Kernel
TCPSocket::~TCPSocket()
{
ASSERT(!is_bound());
ASSERT(m_thread == nullptr);
ASSERT(m_process == nullptr);
dprintln_if(DEBUG_TCP, "Socket destroyed");
}
@ -622,7 +620,7 @@ namespace Kernel
dprintln_if(DEBUG_TCP, "Socket unbound");
}
m_thread = nullptr;
m_process = nullptr;
}
void TCPSocket::remove_listen_child(BAN::RefPtr<TCPSocket> socket)
@ -654,7 +652,7 @@ namespace Kernel
LockGuard _(m_mutex);
while (m_thread)
while (m_process)
{
const uint64_t current_ms = SystemTimer::get().ms_since_boot();

View File

@ -17,7 +17,7 @@ namespace Kernel
~(uintptr_t)0,
packet_buffer_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true, false
true
));
return socket;
}

View File

@ -3,7 +3,6 @@
#include <kernel/Lock/SpinLockAsMutex.h>
#include <kernel/Networking/NetworkManager.h>
#include <kernel/Networking/UNIX/Socket.h>
#include <kernel/Process.h>
#include <kernel/Scheduler.h>
#include <fcntl.h>
@ -29,7 +28,7 @@ namespace Kernel
~(uintptr_t)0,
s_packet_buffer_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true, false
true
));
return socket;
}

View File

@ -3,7 +3,6 @@
#include <kernel/Lock/LockGuard.h>
#include <kernel/Networking/NetworkManager.h>
#include <kernel/OpenFileDescriptorSet.h>
#include <kernel/Process.h>
#include <fcntl.h>
#include <sys/file.h>

View File

@ -97,6 +97,21 @@ namespace Kernel
MUST(Processor::scheduler().add_thread(thread));
}
Process* Process::create_kernel()
{
auto* process = create_process({ 0, 0, 0, 0 }, 0);
return process;
}
Process* Process::create_kernel(entry_t entry, void* data)
{
auto* process = create_process({ 0, 0, 0, 0 }, 0);
auto* thread = MUST(Thread::create_kernel(entry, data, process));
process->add_thread(thread);
process->register_to_scheduler();
return process;
}
BAN::ErrorOr<Process*> Process::create_userspace(const Credentials& credentials, BAN::StringView path, BAN::Span<BAN::StringView> arguments)
{
auto* process = create_process(credentials, 0);
@ -594,6 +609,7 @@ namespace Kernel
forked->m_page_table = BAN::move(page_table);
forked->m_open_file_descriptors = BAN::move(*open_file_descriptors);
forked->m_mapped_regions = BAN::move(mapped_regions);
forked->m_is_userspace = m_is_userspace;
forked->m_has_called_exec = false;
memcpy(forked->m_signal_handlers, m_signal_handlers, sizeof(m_signal_handlers));

View File

@ -4,7 +4,6 @@
#include <kernel/Lock/Mutex.h>
#include <kernel/Process.h>
#include <kernel/Scheduler.h>
#include <kernel/SchedulerQueueNode.h>
#include <kernel/Thread.h>
#include <kernel/Timer/Timer.h>
@ -119,7 +118,7 @@ namespace Kernel
BAN::ErrorOr<void> Scheduler::initialize()
{
m_idle_thread = TRY(Thread::create_kernel([](void*) { asm volatile("1: hlt; jmp 1b"); }, nullptr));
m_idle_thread = TRY(Thread::create_kernel([](void*) { asm volatile("1: hlt; jmp 1b"); }, nullptr, nullptr));
ASSERT(m_idle_thread);
size_t processor_index = 0;
@ -308,11 +307,8 @@ namespace Kernel
while (!m_block_queue.empty() && current_ns >= m_block_queue.front()->wake_time_ns)
{
auto* node = m_block_queue.pop_front();
{
SpinLockGuard _(node->blocker_lock);
if (node->blocker)
node->blocker->remove_blocked_thread(node);
}
if (node->blocker)
node->blocker->remove_blocked_thread(node);
node->blocked = false;
update_most_loaded_node_queue(node, &m_run_queue);
m_run_queue.add_thread_to_back(node);
@ -340,11 +336,8 @@ namespace Kernel
return;
if (node != m_current)
m_block_queue.remove_node(node);
{
SpinLockGuard _(node->blocker_lock);
if (node->blocker)
node->blocker->remove_blocked_thread(node);
}
if (node->blocker)
node->blocker->remove_blocked_thread(node);
node->blocked = false;
if (node != m_current)
m_run_queue.add_thread_to_back(node);
@ -625,13 +618,8 @@ namespace Kernel
m_current->blocked = true;
m_current->wake_time_ns = wake_time_ns;
{
SpinLockGuard _(m_current->blocker_lock);
if (blocker)
blocker->add_thread_to_block_queue(m_current);
}
if (blocker)
blocker->add_thread_to_block_queue(m_current);
update_most_loaded_node_queue(m_current, &m_block_queue);
uint32_t lock_depth = 0;
@ -654,7 +642,10 @@ namespace Kernel
void Scheduler::unblock_thread(Thread* thread)
{
auto state = Processor::get_interrupt_state();
Processor::set_interrupt_state(InterruptState::Disabled);
unblock_thread(thread->m_scheduler_node);
Processor::set_interrupt_state(state);
}
Thread& Scheduler::current_thread()

View File

@ -126,7 +126,26 @@ namespace Kernel
{
if (io_read(ATA_PORT_STATUS) & ATA_STATUS_ERR)
dprintln("ATA Error: {}", error());
m_thread_blocker.unblock();
bool expected { false };
[[maybe_unused]] bool success = m_has_got_irq.compare_exchange(expected, true);
ASSERT(success);
}
BAN::ErrorOr<void> ATABus::block_until_irq()
{
const uint64_t timeout_ms = SystemTimer::get().ms_since_boot() + s_ata_timeout_ms;
bool expected { true };
while (!m_has_got_irq.compare_exchange(expected, false))
{
if (SystemTimer::get().ms_since_boot() >= timeout_ms)
return BAN::Error::from_errno(ETIMEDOUT);
Processor::pause();
expected = true;
}
return {};
}
uint8_t ATABus::io_read(uint16_t port)
@ -173,30 +192,22 @@ namespace Kernel
for (uint32_t i = 0; i < 4; i++)
io_read(ATA_PORT_ALT_STATUS);
const uint64_t start_ms = SystemTimer::get().ms_since_boot();
const uint64_t timeout_ms = start_ms + s_ata_timeout_ms;
uint64_t timeout = SystemTimer::get().ms_since_boot() + s_ata_timeout_ms;
for (;;)
uint8_t status;
while ((status = io_read(ATA_PORT_STATUS)) & ATA_STATUS_BSY)
if (SystemTimer::get().ms_since_boot() >= timeout)
return BAN::Error::from_errno(ETIMEDOUT);
while (wait_drq && !(status & ATA_STATUS_DRQ))
{
const uint8_t status = io_read(ATA_PORT_ALT_STATUS);
if (status & ATA_STATUS_BSY)
goto drive_not_ready;
if (!wait_drq || (status & ATA_STATUS_DRQ))
break;
if (SystemTimer::get().ms_since_boot() >= timeout)
return BAN::Error::from_errno(ETIMEDOUT);
if (status & ATA_STATUS_ERR)
return error();
if (status & ATA_STATUS_DF)
return BAN::Error::from_errno(EIO);
drive_not_ready:
const uint64_t current_ms = SystemTimer::get().ms_since_boot();
if (current_ms >= timeout_ms)
return BAN::Error::from_errno(ETIMEDOUT);
// NODE: poll for 5 milliseconds, then just block
// until timeout or irq
if (current_ms < start_ms + 5)
continue;
m_thread_blocker.block_with_timeout_ms(timeout_ms - current_ms, nullptr);
status = io_read(ATA_PORT_STATUS);
}
return {};
@ -238,7 +249,7 @@ namespace Kernel
for (uint32_t sector = 0; sector < sector_count; sector++)
{
TRY(wait(true));
TRY(block_until_irq());
read_buffer(ATA_PORT_DATA, (uint16_t*)buffer.data() + sector * device.words_per_sector(), device.words_per_sector());
}
@ -258,12 +269,12 @@ namespace Kernel
for (uint32_t sector = 0; sector < sector_count; sector++)
{
TRY(wait(true));
write_buffer(ATA_PORT_DATA, (uint16_t*)buffer.data() + sector * device.words_per_sector(), device.words_per_sector());
TRY(block_until_irq());
}
TRY(wait(false));
io_write(ATA_PORT_COMMAND, ATA_COMMAND_CACHE_FLUSH);
TRY(block_until_irq());
return {};
}
@ -299,10 +310,9 @@ namespace Kernel
io_lba2 = (cylinder >> 8) & 0xFF;
}
TRY(wait(false));
io_write(ATA_PORT_DRIVE_SELECT, io_select);
select_delay();
io_write(ATA_PORT_CONTROL, 0);
io_write(ATA_PORT_SECTOR_COUNT, sector_count);
io_write(ATA_PORT_LBA0, io_lba0);

View File

@ -79,7 +79,9 @@ namespace Kernel
for (auto& pixel : m_cursor_data)
pixel = color.rgb;
m_framebuffer_device->fill(color.rgb);
for (uint32_t y = 0; y < m_framebuffer_device->height(); y++)
for (uint32_t x = 0; x < m_framebuffer_device->width(); x++)
m_framebuffer_device->set_pixel(x, y, color.rgb);
m_framebuffer_device->sync_pixels_full();
if (m_cursor_shown)

View File

@ -44,8 +44,7 @@ namespace Kernel
PageTable::kernel(),
KERNEL_OFFSET, static_cast<vaddr_t>(-1),
16 * PAGE_SIZE,
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true, false
PageTable::Flags::ReadWrite | PageTable::Flags::Present, true
));
auto pts_master = TRY(BAN::RefPtr<PseudoTerminalMaster>::create(BAN::move(pts_master_buffer), mode, uid, gid));
DevFileSystem::get().remove_from_cache(pts_master);

View File

@ -161,7 +161,7 @@ namespace Kernel
static bool initialized = false;
ASSERT(!initialized);
auto* thread = MUST(Thread::create_kernel(&TTY::keyboard_task, nullptr));
auto* thread = MUST(Thread::create_kernel(&TTY::keyboard_task, nullptr, nullptr));
MUST(Processor::scheduler().add_thread(thread));
DevFileSystem::get().add_inode("tty", MUST(DevTTY::create(0666, 0, 0)));

View File

@ -69,10 +69,10 @@ namespace Kernel
s_default_sse_storage_initialized = true;
}
BAN::ErrorOr<Thread*> Thread::create_kernel(entry_t entry, void* data)
BAN::ErrorOr<Thread*> Thread::create_kernel(entry_t entry, void* data, Process* process)
{
// Create the thread object
Thread* thread = new Thread(s_next_tid++, nullptr);
Thread* thread = new Thread(s_next_tid++, process);
if (thread == nullptr)
return BAN::Error::from_errno(ENOMEM);
BAN::ScopeGuard thread_deleter([thread] { delete thread; });
@ -84,7 +84,7 @@ namespace Kernel
~(uintptr_t)0,
kernel_stack_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true, true
true
));
// Initialize stack for returning
@ -124,7 +124,7 @@ namespace Kernel
0x200000, USERSPACE_END,
kernel_stack_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true, true
true
));
thread->m_userspace_stack = TRY(VirtualRange::create_to_vaddr_range(
@ -132,7 +132,7 @@ namespace Kernel
0x200000, USERSPACE_END,
userspace_stack_size,
PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true, true
true
));
thread_deleter.disable();
@ -285,9 +285,6 @@ namespace Kernel
// auxv
needed_size += auxv.size() * sizeof(LibELF::AuxiliaryVector);
if (auto rem = needed_size % alignof(char*))
needed_size += alignof(char*) - rem;
if (needed_size > m_userspace_stack->size())
return BAN::Error::from_errno(ENOBUFS);
@ -361,6 +358,8 @@ namespace Kernel
stack_push_str(envp[i]);
}
ASSERT(vaddr == userspace_stack_top());
setup_exec(entry, userspace_stack_top() - needed_size);
return {};

View File

@ -1,5 +1,4 @@
#include <kernel/Processor.h>
#include <kernel/SchedulerQueueNode.h>
#include <kernel/ThreadBlocker.h>
#include <kernel/Timer/Timer.h>
@ -23,60 +22,71 @@ namespace Kernel
void ThreadBlocker::unblock()
{
decltype(m_block_chain) temp_block_chain;
size_t temp_block_chain_length { 0 };
SchedulerQueue::Node* block_chain;
{
SpinLockGuard _(m_lock);
for (size_t i = 0; i < m_block_chain_length; i++)
temp_block_chain[i] = m_block_chain[i];
temp_block_chain_length = m_block_chain_length;
m_block_chain_length = 0;
block_chain = m_block_chain;
m_block_chain = nullptr;
}
for (size_t i = 0; i < temp_block_chain_length; i++)
Processor::scheduler().unblock_thread(temp_block_chain[i]);
for (auto* node = block_chain; node;)
{
ASSERT(node->blocked);
auto* next = node->block_chain_next;
node->blocker = nullptr;
node->block_chain_next = nullptr;
node->block_chain_prev = nullptr;
Processor::scheduler().unblock_thread(node);
node = next;
}
}
void ThreadBlocker::add_thread_to_block_queue(SchedulerQueue::Node* node)
{
ASSERT(node->blocker_lock.current_processor_has_lock());
SpinLockGuard _(m_lock);
ASSERT(m_block_chain_length < sizeof(m_block_chain) / sizeof(m_block_chain[0]));
ASSERT(node);
ASSERT(node->blocked);
ASSERT(node->blocker == nullptr);
ASSERT(node->block_chain_prev == nullptr);
ASSERT(node->block_chain_next == nullptr);
for (size_t i = 0 ; i < m_block_chain_length; i++)
ASSERT(m_block_chain[i] != node);
m_block_chain[m_block_chain_length++] = node;
SpinLockGuard _(m_lock);
node->blocker = this;
node->block_chain_prev = nullptr;
node->block_chain_next = m_block_chain;
if (m_block_chain)
m_block_chain->block_chain_prev = node;
m_block_chain = node;
}
void ThreadBlocker::remove_blocked_thread(SchedulerQueue::Node* node)
{
ASSERT(node->blocker_lock.current_processor_has_lock());
SpinLockGuard _(m_lock);
ASSERT(node);
ASSERT(node->blocked);
ASSERT(node->blocker == this);
for (size_t i = 0 ; i < m_block_chain_length; i++)
if (node == m_block_chain)
{
if (m_block_chain[i] != node)
continue;
for (size_t j = i + 1; j < m_block_chain_length; j++)
m_block_chain[j - 1] = m_block_chain[j];
m_block_chain_length--;
ASSERT(node->block_chain_prev == nullptr);
m_block_chain = node->block_chain_next;
if (m_block_chain)
m_block_chain->block_chain_prev = nullptr;
}
else
{
ASSERT(node->block_chain_prev);
node->block_chain_prev->block_chain_next = node->block_chain_next;
if (node->block_chain_next)
node->block_chain_next->block_chain_prev = node->block_chain_prev;
}
node->blocker = nullptr;
node->block_chain_next = nullptr;
node->block_chain_prev = nullptr;
}
}

View File

@ -215,8 +215,9 @@ namespace Kernel
m_changed_ports |= 1u << port_id;
}
m_port_updater = TRY(Thread::create_kernel([](void* data) { reinterpret_cast<USBHubDriver*>(data)->port_updater_task(); }, this));
TRY(Processor::scheduler().add_thread(m_port_updater));
m_port_updater = Process::create_kernel([](void* data) { reinterpret_cast<USBHubDriver*>(data)->port_updater_task(); }, this);
if (m_port_updater == nullptr)
return BAN::Error::from_errno(ENOMEM);
return {};
}

View File

@ -135,8 +135,9 @@ namespace Kernel
while (operational.usbsts & XHCI::USBSTS::HCHalted)
continue;
m_port_updater = TRY(Thread::create_kernel([](void* data) { reinterpret_cast<XHCIController*>(data)->port_updater_task(); }, this));
TRY(Processor::scheduler().add_thread(m_port_updater));
m_port_updater = Process::create_kernel([](void* data) { reinterpret_cast<XHCIController*>(data)->port_updater_task(); }, this);
if (m_port_updater == nullptr)
return BAN::Error::from_errno(ENOMEM);
return {};
}

View File

@ -199,8 +199,7 @@ extern "C" void kernel_main(uint32_t boot_magic, uint32_t boot_info)
Processor::wait_until_processors_ready();
MUST(Processor::scheduler().initialize());
auto* init_thread = MUST(Thread::create_kernel(init2, nullptr));
MUST(Processor::scheduler().add_thread(init_thread));
Process::create_kernel(init2, nullptr);
Processor::yield();
ASSERT_NOT_REACHED();
@ -259,9 +258,6 @@ static void init2(void*)
VirtualFileSystem::initialize(cmdline.root);
dprintln("VFS initialized");
// FIXME: release memory used by modules. If modules are used
// they are already loaded in here
TTY::initialize_devices();
auto console_path = MUST(BAN::String::formatted("/dev/{}", cmdline.console));

View File

@ -8,5 +8,4 @@ CONFIGURE_OPTIONS=(
'--disable-nls'
'--disable-posix-spawn'
'--enable-year2038'
'CFLAGS=-std=c17'
)

View File

@ -22,7 +22,6 @@ set(CMAKE_SYSTEM_NAME banan-os)
set(CMAKE_SYSTEM_PROCESSOR ${BANAN_ARCH})
set(CMAKE_SYSROOT ${BANAN_SYSROOT})
set(CMAKE_STAGING_PREFIX ${BANAN_SYSROOT}/usr)
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)

View File

@ -908,16 +908,12 @@ void setbuf(FILE* file, char* buffer)
int setvbuf(FILE* file, char* buffer, int type, size_t size)
{
ScopeLock _(file);
if (file->fd == -1)
{
errno = EBADF;
return -1;
}
(void)fflush(file);
if (size == 0)
type = _IONBF;

View File

@ -4,7 +4,6 @@ set(USERSPACE_PROGRAMS
cat
cat-mmap
chmod
chown
cp
dd
dhcp-client
@ -12,7 +11,6 @@ set(USERSPACE_PROGRAMS
DynamicLoader
echo
env
false
getopt
http-server
id
@ -38,8 +36,6 @@ set(USERSPACE_PROGRAMS
sudo
sync
tee
test
true
TaskBar
Terminal
touch

View File

@ -1,9 +0,0 @@
set(SOURCES
main.cpp
)
add_executable(chown ${SOURCES})
banan_link_library(chown ban)
banan_link_library(chown libc)
install(TARGETS chown OPTIONAL)

View File

@ -1,109 +0,0 @@
#include <ctype.h>
#include <errno.h>
#include <grp.h>
#include <pwd.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <unistd.h>
void usage(const char* argv0, int ret)
{
FILE* out = (ret == 0) ? stdout : stderr;
fprintf(out, "usage: %s [OWNER][:[GROUP]] FILE...\n", argv0);
fprintf(out, " Change the owner and/or group of each FILE.\n");
exit(ret);
}
[[noreturn]] void print_error_and_exit(const char* format, ...)
{
va_list args;
va_start(args, format);
vfprintf(stderr, format, args);
va_end(args);
exit(1);
__builtin_unreachable();
}
const passwd* get_user(const char* string)
{
bool is_numeric = true;
for (size_t i = 0; string[i] && is_numeric; i++)
if (!isdigit(string[i]))
is_numeric = false;
if (is_numeric)
return getpwuid(atoll(string));
return getpwnam(string);
}
const group* get_group(const char* string)
{
bool is_numeric = true;
for (size_t i = 0; string[i] && is_numeric; i++)
if (!isdigit(string[i]))
is_numeric = false;
if (is_numeric)
return getgrgid(atoll(string));
return getgrnam(string);
}
int main(int argc, char** argv)
{
if (argc <= 2)
usage(argv[0], 1);
uid_t uid = -1;
gid_t gid = -1;
const char* owner_string = argv[1];
const char* colon = strchr(owner_string, ':');
if (colon == owner_string)
{
const auto* group = get_group(owner_string + 1);
if (group == nullptr)
print_error_and_exit("could not find group %s\n", owner_string + 1);
gid = group->gr_gid;
}
else if (colon == nullptr)
{
const auto* user = get_user(owner_string);
if (user == nullptr)
print_error_and_exit("could not find user %s\n", owner_string);
uid = user->pw_uid;
}
else
{
char* user_name = strndup(owner_string, colon - owner_string);
if (user_name == nullptr)
print_error_and_exit("strndup: %s\n", strerror(errno));
const auto* user = get_user(user_name);
if (user == nullptr)
print_error_and_exit("could not find user %s\n", user_name);
free(user_name);
uid = user->pw_uid;
if (colon[1] == '\0')
gid = user->pw_gid;
else
{
const auto* group = get_group(colon + 1);
if (group == nullptr)
print_error_and_exit("could not find group %s\n", colon + 1);
gid = group->gr_gid;
}
}
int ret = 0;
for (int i = 2; i < argc; i++)
{
if (chown(argv[i], uid, gid) == -1)
{
perror("chown");
ret = 1;
}
}
return ret;
}

View File

@ -1,9 +0,0 @@
set(SOURCES
main.cpp
)
add_executable(false ${SOURCES})
banan_link_library(false ban)
banan_link_library(false libc)
install(TARGETS false OPTIONAL)

View File

@ -1,6 +0,0 @@
#include <stdlib.h>
int main()
{
return EXIT_FAILURE;
}

View File

@ -1,9 +0,0 @@
set(SOURCES
main.cpp
)
add_executable(true ${SOURCES})
banan_link_library(true ban)
banan_link_library(true libc)
install(TARGETS true OPTIONAL)

View File

@ -1,6 +0,0 @@
#include <stdlib.h>
int main()
{
return EXIT_SUCCESS;
}