Compare commits
11 Commits
264eff3ad0
...
21f05eb118
Author | SHA1 | Date |
---|---|---|
Bananymous | 21f05eb118 | |
Bananymous | d94f6388b7 | |
Bananymous | 1971813336 | |
Bananymous | 3c88d2aad3 | |
Bananymous | 5c39903323 | |
Bananymous | 6d59a2b45d | |
Bananymous | 09c24088a2 | |
Bananymous | efdc4817bb | |
Bananymous | 0c97abb053 | |
Bananymous | 1759d247d9 | |
Bananymous | 21dc64dc21 |
|
@ -52,11 +52,14 @@ namespace BAN
|
|||
ErrorOr<void> reserve(size_type);
|
||||
|
||||
void remove(const Key&);
|
||||
void remove(iterator it);
|
||||
void clear();
|
||||
|
||||
T& operator[](const Key&);
|
||||
const T& operator[](const Key&) const;
|
||||
|
||||
iterator find(const Key& key);
|
||||
const_iterator find(const Key& key) const;
|
||||
bool contains(const Key&) const;
|
||||
|
||||
bool empty() const;
|
||||
|
@ -66,6 +69,8 @@ namespace BAN
|
|||
ErrorOr<void> rebucket(size_type);
|
||||
LinkedList<Entry>& get_bucket(const Key&);
|
||||
const LinkedList<Entry>& get_bucket(const Key&) const;
|
||||
Vector<LinkedList<Entry>>::iterator get_bucket_iterator(const Key&);
|
||||
Vector<LinkedList<Entry>>::const_iterator get_bucket_iterator(const Key&) const;
|
||||
|
||||
private:
|
||||
Vector<LinkedList<Entry>> m_buckets;
|
||||
|
@ -145,17 +150,16 @@ namespace BAN
|
|||
template<typename Key, typename T, typename HASH>
|
||||
void HashMap<Key, T, HASH>::remove(const Key& key)
|
||||
{
|
||||
if (empty()) return;
|
||||
auto& bucket = get_bucket(key);
|
||||
for (auto it = bucket.begin(); it != bucket.end(); it++)
|
||||
auto it = find(key);
|
||||
if (it != end())
|
||||
remove(it);
|
||||
}
|
||||
|
||||
template<typename Key, typename T, typename HASH>
|
||||
void HashMap<Key, T, HASH>::remove(iterator it)
|
||||
{
|
||||
if (it->key == key)
|
||||
{
|
||||
bucket.remove(it);
|
||||
it.outer_current()->remove(it.inner_current());
|
||||
m_size--;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template<typename Key, typename T, typename HASH>
|
||||
|
@ -187,15 +191,34 @@ namespace BAN
|
|||
ASSERT(false);
|
||||
}
|
||||
|
||||
template<typename Key, typename T, typename HASH>
|
||||
typename HashMap<Key, T, HASH>::iterator HashMap<Key, T, HASH>::find(const Key& key)
|
||||
{
|
||||
if (empty())
|
||||
return end();
|
||||
auto bucket_it = get_bucket_iterator(key);
|
||||
for (auto it = bucket_it->begin(); it != bucket_it->end(); it++)
|
||||
if (it->key == key)
|
||||
return iterator(m_buckets.end(), bucket_it, it);
|
||||
return end();
|
||||
}
|
||||
|
||||
template<typename Key, typename T, typename HASH>
|
||||
typename HashMap<Key, T, HASH>::const_iterator HashMap<Key, T, HASH>::find(const Key& key) const
|
||||
{
|
||||
if (empty())
|
||||
return end();
|
||||
auto bucket_it = get_bucket_iterator(key);
|
||||
for (auto it = bucket_it->begin(); it != bucket_it->end(); it++)
|
||||
if (it->key == key)
|
||||
return const_iterator(m_buckets.end(), bucket_it, it);
|
||||
return end();
|
||||
}
|
||||
|
||||
template<typename Key, typename T, typename HASH>
|
||||
bool HashMap<Key, T, HASH>::contains(const Key& key) const
|
||||
{
|
||||
if (empty()) return false;
|
||||
const auto& bucket = get_bucket(key);
|
||||
for (const Entry& entry : bucket)
|
||||
if (entry.key == key)
|
||||
return true;
|
||||
return false;
|
||||
return find(key) != end();
|
||||
}
|
||||
|
||||
template<typename Key, typename T, typename HASH>
|
||||
|
@ -236,17 +259,29 @@ namespace BAN
|
|||
template<typename Key, typename T, typename HASH>
|
||||
LinkedList<typename HashMap<Key, T, HASH>::Entry>& HashMap<Key, T, HASH>::get_bucket(const Key& key)
|
||||
{
|
||||
ASSERT(!m_buckets.empty());
|
||||
auto index = HASH()(key) % m_buckets.size();
|
||||
return m_buckets[index];
|
||||
return *get_bucket_iterator(key);
|
||||
}
|
||||
|
||||
template<typename Key, typename T, typename HASH>
|
||||
const LinkedList<typename HashMap<Key, T, HASH>::Entry>& HashMap<Key, T, HASH>::get_bucket(const Key& key) const
|
||||
{
|
||||
return *get_bucket_iterator(key);
|
||||
}
|
||||
|
||||
template<typename Key, typename T, typename HASH>
|
||||
Vector<LinkedList<typename HashMap<Key, T, HASH>::Entry>>::iterator HashMap<Key, T, HASH>::get_bucket_iterator(const Key& key)
|
||||
{
|
||||
ASSERT(!m_buckets.empty());
|
||||
auto index = HASH()(key) % m_buckets.size();
|
||||
return m_buckets[index];
|
||||
return next(m_buckets.begin(), index);
|
||||
}
|
||||
|
||||
template<typename Key, typename T, typename HASH>
|
||||
Vector<LinkedList<typename HashMap<Key, T, HASH>::Entry>>::const_iterator HashMap<Key, T, HASH>::get_bucket_iterator(const Key& key) const
|
||||
{
|
||||
ASSERT(!m_buckets.empty());
|
||||
auto index = HASH()(key) % m_buckets.size();
|
||||
return next(m_buckets.begin(), index);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -284,6 +284,14 @@ namespace BAN
|
|||
}
|
||||
}
|
||||
|
||||
IteratorDoubleGeneral(const OuterIterator& outer_end, const OuterIterator& outer_current, const InnerIterator& inner_current)
|
||||
: m_outer_end(outer_end)
|
||||
, m_outer_current(outer_current)
|
||||
, m_inner_current(inner_current)
|
||||
{
|
||||
find_valid_or_end();
|
||||
}
|
||||
|
||||
void find_valid_or_end()
|
||||
{
|
||||
while (m_inner_current == m_outer_current->end())
|
||||
|
@ -295,6 +303,9 @@ namespace BAN
|
|||
}
|
||||
}
|
||||
|
||||
OuterIterator outer_current() { return m_outer_current; }
|
||||
InnerIterator inner_current() { return m_inner_current; }
|
||||
|
||||
private:
|
||||
OuterIterator m_outer_end;
|
||||
OuterIterator m_outer_current;
|
||||
|
|
|
@ -25,7 +25,7 @@ namespace LibELF
|
|||
BAN::Vector<uint8_t> buffer;
|
||||
TRY(buffer.resize(inode->size()));
|
||||
|
||||
TRY(inode->read(0, { buffer.data(), inode->size() }));
|
||||
TRY(inode->read(0, buffer.data(), inode->size()));
|
||||
|
||||
ELF* elf_ptr = new ELF(BAN::move(buffer));
|
||||
if (elf_ptr == nullptr)
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
#include <BAN/ScopeGuard.h>
|
||||
#include <kernel/CriticalScope.h>
|
||||
#include <kernel/Memory/Heap.h>
|
||||
#include <kernel/Lock/LockGuard.h>
|
||||
#include <LibELF/LoadableELF.h>
|
||||
|
@ -281,10 +282,6 @@ namespace LibELF
|
|||
|
||||
elf->reserve_address_space();
|
||||
|
||||
ASSERT(&PageTable::current() == &m_page_table);
|
||||
LockGuard _(m_page_table);
|
||||
ASSERT(m_page_table.is_page_free(0));
|
||||
|
||||
for (const auto& program_header : m_program_headers)
|
||||
{
|
||||
switch (program_header.p_type)
|
||||
|
|
|
@ -42,6 +42,7 @@ set(KERNEL_SOURCES
|
|||
kernel/Input/PS2/Mouse.cpp
|
||||
kernel/InterruptController.cpp
|
||||
kernel/kernel.cpp
|
||||
kernel/Lock/SpinLock.cpp
|
||||
kernel/Memory/DMARegion.cpp
|
||||
kernel/Memory/FileBackedRegion.cpp
|
||||
kernel/Memory/Heap.cpp
|
||||
|
@ -68,8 +69,6 @@ set(KERNEL_SOURCES
|
|||
kernel/Random.cpp
|
||||
kernel/Scheduler.cpp
|
||||
kernel/Semaphore.cpp
|
||||
kernel/Lock/Mutex.cpp
|
||||
kernel/Lock/SpinLock.cpp
|
||||
kernel/SSP.cpp
|
||||
kernel/Storage/ATA/AHCI/Controller.cpp
|
||||
kernel/Storage/ATA/AHCI/Device.cpp
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#include <kernel/Arch.h>
|
||||
#include <kernel/CPUID.h>
|
||||
#include <kernel/InterruptController.h>
|
||||
#include <kernel/Lock/LockGuard.h>
|
||||
#include <kernel/Lock/SpinLock.h>
|
||||
#include <kernel/Memory/kmalloc.h>
|
||||
#include <kernel/Memory/PageTable.h>
|
||||
|
||||
|
@ -17,13 +17,13 @@ extern uint8_t g_userspace_end[];
|
|||
namespace Kernel
|
||||
{
|
||||
|
||||
SpinLock PageTable::s_fast_page_lock;
|
||||
|
||||
static PageTable* s_kernel = nullptr;
|
||||
static PageTable* s_current = nullptr;
|
||||
static bool s_has_nxe = false;
|
||||
static bool s_has_pge = false;
|
||||
|
||||
SpinLock PageTable::s_fast_page_lock;
|
||||
|
||||
// PML4 entry for kernel memory
|
||||
static paddr_t s_global_pml4e = 0;
|
||||
|
||||
|
@ -252,7 +252,7 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<PageTable*> PageTable::create_userspace()
|
||||
{
|
||||
LockGuard _(s_kernel->m_lock);
|
||||
SpinLockGuard _(s_kernel->m_lock);
|
||||
PageTable* page_table = new PageTable;
|
||||
if (page_table == nullptr)
|
||||
return BAN::Error::from_errno(ENOMEM);
|
||||
|
@ -333,7 +333,7 @@ namespace Kernel
|
|||
uint64_t pde = (uc_vaddr >> 21) & 0x1FF;
|
||||
uint64_t pte = (uc_vaddr >> 12) & 0x1FF;
|
||||
|
||||
LockGuard _(m_lock);
|
||||
SpinLockGuard _(m_lock);
|
||||
|
||||
if (is_page_free(vaddr))
|
||||
{
|
||||
|
@ -355,7 +355,7 @@ namespace Kernel
|
|||
vaddr_t s_page = vaddr / PAGE_SIZE;
|
||||
vaddr_t e_page = BAN::Math::div_round_up<vaddr_t>(vaddr + size, PAGE_SIZE);
|
||||
|
||||
LockGuard _(m_lock);
|
||||
SpinLockGuard _(m_lock);
|
||||
for (vaddr_t page = s_page; page < e_page; page++)
|
||||
unmap_page(page * PAGE_SIZE);
|
||||
}
|
||||
|
@ -394,7 +394,7 @@ namespace Kernel
|
|||
// NOTE: we add present here, since it has to be available in higher level structures
|
||||
flags_t uwr_flags = (flags & (Flags::UserSupervisor | Flags::ReadWrite)) | Flags::Present;
|
||||
|
||||
LockGuard _(m_lock);
|
||||
SpinLockGuard _(m_lock);
|
||||
|
||||
uint64_t* pml4 = (uint64_t*)P2V(m_highest_paging_struct);
|
||||
if ((pml4[pml4e] & uwr_flags) != uwr_flags)
|
||||
|
@ -439,7 +439,7 @@ namespace Kernel
|
|||
|
||||
size_t page_count = range_page_count(vaddr, size);
|
||||
|
||||
LockGuard _(m_lock);
|
||||
SpinLockGuard _(m_lock);
|
||||
for (size_t page = 0; page < page_count; page++)
|
||||
map_page_at(paddr + page * PAGE_SIZE, vaddr + page * PAGE_SIZE, flags);
|
||||
}
|
||||
|
@ -456,7 +456,7 @@ namespace Kernel
|
|||
uint64_t pde = (uc_vaddr >> 21) & 0x1FF;
|
||||
uint64_t pte = (uc_vaddr >> 12) & 0x1FF;
|
||||
|
||||
LockGuard _(m_lock);
|
||||
SpinLockGuard _(m_lock);
|
||||
|
||||
uint64_t* pml4 = (uint64_t*)P2V(m_highest_paging_struct);
|
||||
if (!(pml4[pml4e] & Flags::Present))
|
||||
|
@ -490,7 +490,7 @@ namespace Kernel
|
|||
|
||||
bool PageTable::reserve_page(vaddr_t vaddr, bool only_free)
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
SpinLockGuard _(m_lock);
|
||||
ASSERT(vaddr % PAGE_SIZE == 0);
|
||||
if (only_free && !is_page_free(vaddr))
|
||||
return false;
|
||||
|
@ -504,7 +504,7 @@ namespace Kernel
|
|||
bytes += PAGE_SIZE - rem;
|
||||
ASSERT(vaddr % PAGE_SIZE == 0);
|
||||
|
||||
LockGuard _(m_lock);
|
||||
SpinLockGuard _(m_lock);
|
||||
if (only_free && !is_range_free(vaddr, bytes))
|
||||
return false;
|
||||
for (size_t offset = 0; offset < bytes; offset += PAGE_SIZE)
|
||||
|
@ -536,7 +536,7 @@ namespace Kernel
|
|||
const uint16_t e_pde = (uc_vaddr_end >> 21) & 0x1FF;
|
||||
const uint16_t e_pte = (uc_vaddr_end >> 12) & 0x1FF;
|
||||
|
||||
LockGuard _(m_lock);
|
||||
SpinLockGuard _(m_lock);
|
||||
|
||||
// Try to find free page that can be mapped without
|
||||
// allocations (page table with unused entries)
|
||||
|
@ -609,7 +609,7 @@ namespace Kernel
|
|||
ASSERT(is_canonical(first_address));
|
||||
ASSERT(is_canonical(last_address));
|
||||
|
||||
LockGuard _(m_lock);
|
||||
SpinLockGuard _(m_lock);
|
||||
|
||||
for (vaddr_t vaddr = first_address; vaddr < last_address;)
|
||||
{
|
||||
|
@ -650,7 +650,7 @@ namespace Kernel
|
|||
vaddr_t s_page = vaddr / PAGE_SIZE;
|
||||
vaddr_t e_page = BAN::Math::div_round_up<vaddr_t>(vaddr + size, PAGE_SIZE);
|
||||
|
||||
LockGuard _(m_lock);
|
||||
SpinLockGuard _(m_lock);
|
||||
for (vaddr_t page = s_page; page < e_page; page++)
|
||||
if (!is_page_free(page * PAGE_SIZE))
|
||||
return false;
|
||||
|
@ -673,7 +673,7 @@ namespace Kernel
|
|||
|
||||
void PageTable::debug_dump()
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
SpinLockGuard _(m_lock);
|
||||
|
||||
flags_t flags = 0;
|
||||
vaddr_t start = 0;
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
#include <BAN/Atomic.h>
|
||||
#include <kernel/Panic.h>
|
||||
|
||||
#define ATEXIT_MAX_FUNCS 128
|
||||
|
@ -113,18 +114,18 @@ void __cxa_finalize(void *f)
|
|||
|
||||
namespace __cxxabiv1
|
||||
{
|
||||
|
||||
/* The ABI requires a 64-bit type. */
|
||||
__extension__ typedef int __guard __attribute__((mode(__DI__)));
|
||||
using __guard = uint64_t;
|
||||
|
||||
int __cxa_guard_acquire (__guard* g)
|
||||
{
|
||||
return !*(int*)g;
|
||||
auto& atomic = *reinterpret_cast<BAN::Atomic<__guard>*>(g);
|
||||
return atomic == 0;
|
||||
}
|
||||
|
||||
void __cxa_guard_release (__guard* g)
|
||||
{
|
||||
*(int*)g = 1;
|
||||
auto& atomic = *reinterpret_cast<BAN::Atomic<__guard>*>(g);
|
||||
atomic = 1;
|
||||
}
|
||||
|
||||
void __cxa_guard_abort (__guard*)
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
|
||||
#include <BAN/Vector.h>
|
||||
#include <kernel/InterruptController.h>
|
||||
#include <kernel/Lock/SpinLock.h>
|
||||
#include <kernel/Memory/Types.h>
|
||||
|
||||
namespace Kernel
|
||||
|
@ -59,7 +58,6 @@ namespace Kernel
|
|||
BAN::Vector<IOAPIC> m_io_apics;
|
||||
uint8_t m_irq_overrides[0x100] {};
|
||||
uint8_t m_reserved_gsis[0x100 / 8] {};
|
||||
SpinLock m_lock;
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
#pragma once
|
||||
|
||||
#include <BAN/NoCopyMove.h>
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
namespace Kernel
|
||||
{
|
||||
|
||||
class CriticalScope
|
||||
{
|
||||
BAN_NON_COPYABLE(CriticalScope);
|
||||
BAN_NON_MOVABLE(CriticalScope);
|
||||
|
||||
public:
|
||||
CriticalScope()
|
||||
{
|
||||
asm volatile("pushf; cli; pop %0" : "=r"(m_flags) :: "memory");
|
||||
}
|
||||
|
||||
~CriticalScope()
|
||||
{
|
||||
asm volatile("push %0; popf" :: "rm"(m_flags) : "memory", "cc");
|
||||
}
|
||||
|
||||
private:
|
||||
size_t m_flags;
|
||||
};
|
||||
|
||||
}
|
|
@ -5,29 +5,26 @@
|
|||
|
||||
#define dprintln(...) \
|
||||
do { \
|
||||
Debug::s_debug_lock.lock(); \
|
||||
Kernel::SpinLockGuard _(Debug::s_debug_lock); \
|
||||
Debug::print_prefix(__FILE__, __LINE__); \
|
||||
BAN::Formatter::print(Debug::putchar, __VA_ARGS__); \
|
||||
BAN::Formatter::print(Debug::putchar, "\r\n"); \
|
||||
Debug::s_debug_lock.unlock(); \
|
||||
} while(false)
|
||||
|
||||
#define dwarnln(...) \
|
||||
do { \
|
||||
Debug::s_debug_lock.lock(); \
|
||||
Kernel::SpinLockGuard _(Debug::s_debug_lock); \
|
||||
BAN::Formatter::print(Debug::putchar, "\e[33m"); \
|
||||
dprintln(__VA_ARGS__); \
|
||||
BAN::Formatter::print(Debug::putchar, "\e[m"); \
|
||||
Debug::s_debug_lock.unlock(); \
|
||||
} while(false)
|
||||
|
||||
#define derrorln(...) \
|
||||
do { \
|
||||
Debug::s_debug_lock.lock(); \
|
||||
Kernel::SpinLockGuard _(Debug::s_debug_lock); \
|
||||
BAN::Formatter::print(Debug::putchar, "\e[31m"); \
|
||||
dprintln(__VA_ARGS__); \
|
||||
BAN::Formatter::print(Debug::putchar, "\e[m"); \
|
||||
Debug::s_debug_lock.unlock(); \
|
||||
} while(false)
|
||||
|
||||
#define dprintln_if(cond, ...) \
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
#include <BAN/Vector.h>
|
||||
#include <kernel/Device/Device.h>
|
||||
#include <kernel/FS/TmpFS/FileSystem.h>
|
||||
#include <kernel/Lock/Mutex.h>
|
||||
#include <kernel/Semaphore.h>
|
||||
|
||||
namespace Kernel
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
|
||||
#include <kernel/API/DirectoryEntry.h>
|
||||
#include <kernel/Credentials.h>
|
||||
#include <kernel/Debug.h>
|
||||
#include <kernel/Lock/Mutex.h>
|
||||
|
||||
#include <sys/socket.h>
|
||||
|
@ -157,7 +158,7 @@ namespace Kernel
|
|||
virtual BAN::ErrorOr<long> ioctl_impl(int request, void* arg) { return BAN::Error::from_errno(ENOTSUP); }
|
||||
|
||||
protected:
|
||||
mutable Mutex m_mutex;
|
||||
mutable PriorityMutex m_mutex;
|
||||
|
||||
private:
|
||||
BAN::WeakPtr<SharedFileData> m_shared_region;
|
||||
|
|
|
@ -20,8 +20,6 @@ namespace Kernel::Input
|
|||
KeyboardLayout();
|
||||
|
||||
private:
|
||||
SpinLock m_lock;
|
||||
|
||||
BAN::Array<Key, 0xFF> m_keycode_to_key_normal;
|
||||
BAN::Array<Key, 0xFF> m_keycode_to_key_shift;
|
||||
BAN::Array<Key, 0xFF> m_keycode_to_key_altgr;
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#include <kernel/Device/Device.h>
|
||||
#include <kernel/Input/PS2/Config.h>
|
||||
#include <kernel/InterruptController.h>
|
||||
#include <kernel/Lock/Mutex.h>
|
||||
|
||||
namespace Kernel::Input
|
||||
{
|
||||
|
@ -60,7 +61,6 @@ namespace Kernel::Input
|
|||
private:
|
||||
BAN::RefPtr<PS2Device> m_devices[2];
|
||||
Mutex m_mutex;
|
||||
RecursiveSpinLock m_cmd_lock;
|
||||
|
||||
BAN::CircularQueue<Command, 128> m_command_queue;
|
||||
uint64_t m_command_send_time { 0 };
|
||||
|
|
|
@ -45,7 +45,6 @@ namespace Kernel::Input
|
|||
PS2Keymap m_keymap;
|
||||
|
||||
Semaphore m_semaphore;
|
||||
SpinLock m_event_lock;
|
||||
|
||||
protected:
|
||||
virtual BAN::ErrorOr<size_t> read_impl(off_t, BAN::ByteSpan) override;
|
||||
|
|
|
@ -38,7 +38,6 @@ namespace Kernel::Input
|
|||
|
||||
BAN::CircularQueue<MouseEvent, 128> m_event_queue;
|
||||
|
||||
SpinLock m_event_lock;
|
||||
Semaphore m_semaphore;
|
||||
|
||||
protected:
|
||||
|
|
|
@ -51,6 +51,11 @@ namespace Kernel
|
|||
bool m_using_apic { false };
|
||||
};
|
||||
|
||||
bool interrupts_enabled();
|
||||
inline bool interrupts_enabled()
|
||||
{
|
||||
uintptr_t flags;
|
||||
asm volatile("pushf; pop %0" : "=r"(flags) :: "memory");
|
||||
return flags & (1 << 9);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
#include <BAN/Atomic.h>
|
||||
#include <BAN/NoCopyMove.h>
|
||||
#include <kernel/Scheduler.h>
|
||||
|
||||
#include <sys/types.h>
|
||||
|
||||
|
@ -16,9 +17,41 @@ namespace Kernel
|
|||
public:
|
||||
Mutex() = default;
|
||||
|
||||
void lock();
|
||||
bool try_lock();
|
||||
void unlock();
|
||||
void lock()
|
||||
{
|
||||
auto tid = Scheduler::current_tid();
|
||||
if (tid == m_locker)
|
||||
ASSERT_GT(m_lock_depth, 0);
|
||||
else
|
||||
{
|
||||
while (!m_locker.compare_exchange(-1, tid))
|
||||
Scheduler::get().reschedule();
|
||||
ASSERT_EQ(m_lock_depth, 0);
|
||||
}
|
||||
m_lock_depth++;
|
||||
}
|
||||
|
||||
bool try_lock()
|
||||
{
|
||||
auto tid = Scheduler::current_tid();
|
||||
if (tid == m_locker)
|
||||
ASSERT_GT(m_lock_depth, 0);
|
||||
else
|
||||
{
|
||||
if (!m_locker.compare_exchange(-1, tid))
|
||||
return false;
|
||||
ASSERT_EQ(m_lock_depth, 0);
|
||||
}
|
||||
m_lock_depth++;
|
||||
}
|
||||
|
||||
void unlock()
|
||||
{
|
||||
ASSERT_EQ(m_locker.load(), Scheduler::current_tid());
|
||||
ASSERT_GT(m_lock_depth, 0);
|
||||
if (--m_lock_depth == 0)
|
||||
m_locker = -1;
|
||||
}
|
||||
|
||||
pid_t locker() const { return m_locker; }
|
||||
bool is_locked() const { return m_locker != -1; }
|
||||
|
@ -37,9 +70,53 @@ namespace Kernel
|
|||
public:
|
||||
PriorityMutex() = default;
|
||||
|
||||
void lock();
|
||||
bool try_lock();
|
||||
void unlock();
|
||||
void lock()
|
||||
{
|
||||
auto tid = Scheduler::current_tid();
|
||||
if (tid == m_locker)
|
||||
ASSERT_GT(m_lock_depth, 0);
|
||||
else
|
||||
{
|
||||
bool has_priority = tid ? !Thread::current().is_userspace() : true;
|
||||
if (has_priority)
|
||||
m_queue_length++;
|
||||
while (!(has_priority || m_queue_length == 0) || !m_locker.compare_exchange(-1, tid))
|
||||
Scheduler::get().reschedule();
|
||||
ASSERT_EQ(m_lock_depth, 0);
|
||||
}
|
||||
m_lock_depth++;
|
||||
}
|
||||
|
||||
bool try_lock()
|
||||
{
|
||||
auto tid = Scheduler::current_tid();
|
||||
if (tid == m_locker)
|
||||
ASSERT_GT(m_lock_depth, 0);
|
||||
else
|
||||
{
|
||||
bool has_priority = tid ? !Thread::current().is_userspace() : true;
|
||||
if (!(has_priority || m_queue_length == 0) || !m_locker.compare_exchange(-1, tid))
|
||||
return false;
|
||||
if (has_priority)
|
||||
m_queue_length++;
|
||||
ASSERT_EQ(m_lock_depth, 0);
|
||||
}
|
||||
m_lock_depth++;
|
||||
}
|
||||
|
||||
void unlock()
|
||||
{
|
||||
auto tid = Scheduler::current_tid();
|
||||
ASSERT_EQ(m_locker.load(), tid);
|
||||
ASSERT_GT(m_lock_depth, 0);
|
||||
if (--m_lock_depth == 0)
|
||||
{
|
||||
bool has_priority = tid ? !Thread::current().is_userspace() : true;
|
||||
if (has_priority)
|
||||
m_queue_length--;
|
||||
m_locker = -1;
|
||||
}
|
||||
}
|
||||
|
||||
pid_t locker() const { return m_locker; }
|
||||
bool is_locked() const { return m_locker != -1; }
|
||||
|
@ -48,7 +125,7 @@ namespace Kernel
|
|||
private:
|
||||
BAN::Atomic<pid_t> m_locker { -1 };
|
||||
uint32_t m_lock_depth { 0 };
|
||||
BAN::Atomic<uint32_t> m_queue_depth { 0 };
|
||||
BAN::Atomic<uint32_t> m_queue_length { 0 };
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -8,6 +8,8 @@
|
|||
namespace Kernel
|
||||
{
|
||||
|
||||
using InterruptState = bool;
|
||||
|
||||
class SpinLock
|
||||
{
|
||||
BAN_NON_COPYABLE(SpinLock);
|
||||
|
@ -16,17 +18,11 @@ namespace Kernel
|
|||
public:
|
||||
SpinLock() = default;
|
||||
|
||||
void lock();
|
||||
bool try_lock();
|
||||
void unlock();
|
||||
|
||||
pid_t locker() const { return m_locker; }
|
||||
bool is_locked() const { return m_locker != -1; }
|
||||
uint32_t lock_depth() const { return is_locked(); }
|
||||
InterruptState lock();
|
||||
void unlock(InterruptState state);
|
||||
|
||||
private:
|
||||
BAN::Atomic<pid_t> m_locker { -1 };
|
||||
uintptr_t m_flags { 0 };
|
||||
};
|
||||
|
||||
class RecursiveSpinLock
|
||||
|
@ -37,18 +33,35 @@ namespace Kernel
|
|||
public:
|
||||
RecursiveSpinLock() = default;
|
||||
|
||||
void lock();
|
||||
bool try_lock();
|
||||
void unlock();
|
||||
|
||||
pid_t locker() const { return m_locker; }
|
||||
bool is_locked() const { return m_locker != -1; }
|
||||
uint32_t lock_depth() const { return m_lock_depth; }
|
||||
InterruptState lock();
|
||||
void unlock(InterruptState state);
|
||||
|
||||
private:
|
||||
BAN::Atomic<pid_t> m_locker { -1 };
|
||||
uint32_t m_lock_depth { 0 };
|
||||
uintptr_t m_flags { 0 };
|
||||
};
|
||||
|
||||
template<typename Lock>
|
||||
class SpinLockGuard
|
||||
{
|
||||
BAN_NON_COPYABLE(SpinLockGuard);
|
||||
BAN_NON_MOVABLE(SpinLockGuard);
|
||||
|
||||
public:
|
||||
SpinLockGuard(Lock& lock)
|
||||
: m_lock(lock)
|
||||
{
|
||||
m_state = m_lock.lock();
|
||||
}
|
||||
|
||||
~SpinLockGuard()
|
||||
{
|
||||
m_lock.unlock(m_state);
|
||||
}
|
||||
|
||||
private:
|
||||
Lock& m_lock;
|
||||
InterruptState m_state;
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
#include <BAN/Errors.h>
|
||||
#include <BAN/Traits.h>
|
||||
#include <kernel/CriticalScope.h>
|
||||
#include <kernel/Lock/SpinLock.h>
|
||||
#include <kernel/Memory/Types.h>
|
||||
|
||||
|
@ -42,30 +43,27 @@ namespace Kernel
|
|||
static PageTable& kernel();
|
||||
static PageTable& current();
|
||||
|
||||
public:
|
||||
static constexpr vaddr_t fast_page() { return KERNEL_OFFSET; }
|
||||
|
||||
template<with_fast_page_callback F>
|
||||
static void with_fast_page(paddr_t paddr, F callback)
|
||||
{
|
||||
s_fast_page_lock.lock();
|
||||
SpinLockGuard _(s_fast_page_lock);
|
||||
map_fast_page(paddr);
|
||||
callback();
|
||||
unmap_fast_page();
|
||||
s_fast_page_lock.unlock();
|
||||
}
|
||||
|
||||
template<with_fast_page_callback_error F>
|
||||
static BAN::ErrorOr<void> with_fast_page(paddr_t paddr, F callback)
|
||||
{
|
||||
s_fast_page_lock.lock();
|
||||
SpinLockGuard _(s_fast_page_lock);
|
||||
map_fast_page(paddr);
|
||||
auto ret = callback();
|
||||
unmap_fast_page();
|
||||
s_fast_page_lock.unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static constexpr vaddr_t fast_page() { return KERNEL_OFFSET; }
|
||||
|
||||
// FIXME: implement sized checks, return span, etc
|
||||
static void* fast_page_as_ptr(size_t offset = 0)
|
||||
{
|
||||
|
@ -113,8 +111,8 @@ namespace Kernel
|
|||
|
||||
void load();
|
||||
|
||||
void lock() const { m_lock.lock(); }
|
||||
void unlock() const { m_lock.unlock(); }
|
||||
InterruptState lock() const { return m_lock.lock(); }
|
||||
void unlock(InterruptState state) const { m_lock.unlock(state); }
|
||||
|
||||
void debug_dump();
|
||||
|
||||
|
|
|
@ -18,8 +18,6 @@ namespace Kernel
|
|||
static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_to_vaddr(PageTable&, vaddr_t, size_t, PageTable::flags_t flags, bool preallocate_pages);
|
||||
// Create virtual range to virtual address range
|
||||
static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_to_vaddr_range(PageTable&, vaddr_t vaddr_start, vaddr_t vaddr_end, size_t, PageTable::flags_t flags, bool preallocate_pages);
|
||||
// Create virtual range in kernel memory with kmalloc
|
||||
static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_kmalloc(size_t);
|
||||
~VirtualRange();
|
||||
|
||||
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> clone(PageTable&);
|
||||
|
@ -35,14 +33,13 @@ namespace Kernel
|
|||
void copy_from(size_t offset, const uint8_t* buffer, size_t bytes);
|
||||
|
||||
private:
|
||||
VirtualRange(PageTable&, bool preallocated, bool kmalloc);
|
||||
VirtualRange(PageTable&, bool preallocated);
|
||||
|
||||
void set_zero();
|
||||
|
||||
private:
|
||||
PageTable& m_page_table;
|
||||
const bool m_preallocated;
|
||||
const bool m_kmalloc;
|
||||
vaddr_t m_vaddr { 0 };
|
||||
size_t m_size { 0 };
|
||||
PageTable::flags_t m_flags { 0 };
|
||||
|
|
|
@ -51,7 +51,6 @@ namespace Kernel
|
|||
};
|
||||
|
||||
private:
|
||||
SpinLock m_pending_lock;
|
||||
SpinLock m_table_lock;
|
||||
|
||||
BAN::HashMap<BAN::IPv4Address, BAN::MACAddress> m_arp_table;
|
||||
|
|
|
@ -67,8 +67,6 @@ namespace Kernel
|
|||
bool m_has_eerprom { false };
|
||||
|
||||
private:
|
||||
SpinLock m_lock;
|
||||
|
||||
BAN::UniqPtr<DMARegion> m_rx_buffer_region;
|
||||
BAN::UniqPtr<DMARegion> m_tx_buffer_region;
|
||||
BAN::UniqPtr<DMARegion> m_rx_descriptor_region;
|
||||
|
|
|
@ -67,8 +67,7 @@ namespace Kernel
|
|||
};
|
||||
|
||||
private:
|
||||
RecursiveSpinLock m_packet_lock;
|
||||
RecursiveSpinLock m_socket_lock;
|
||||
RecursiveSpinLock m_bound_socket_lock;
|
||||
|
||||
BAN::UniqPtr<ARPTable> m_arp_table;
|
||||
Process* m_process { nullptr };
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#pragma once
|
||||
|
||||
#include <BAN/Endianness.h>
|
||||
#include <kernel/Lock/Mutex.h>
|
||||
#include <kernel/Memory/VirtualRange.h>
|
||||
#include <kernel/Networking/NetworkInterface.h>
|
||||
#include <kernel/Networking/NetworkSocket.h>
|
||||
|
@ -119,6 +120,7 @@ namespace Kernel
|
|||
|
||||
uint64_t m_time_wait_start_ms { 0 };
|
||||
|
||||
Mutex m_lock;
|
||||
Semaphore m_semaphore;
|
||||
|
||||
BAN::Atomic<bool> m_should_ack { false };
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
#include <BAN/CircularQueue.h>
|
||||
#include <BAN/Endianness.h>
|
||||
#include <kernel/Lock/SpinLock.h>
|
||||
#include <kernel/Memory/VirtualRange.h>
|
||||
#include <kernel/Networking/NetworkInterface.h>
|
||||
#include <kernel/Networking/NetworkSocket.h>
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#include <BAN/WeakPtr.h>
|
||||
#include <kernel/FS/Socket.h>
|
||||
#include <kernel/FS/TmpFS/Inode.h>
|
||||
#include <kernel/Lock/SpinLock.h>
|
||||
|
||||
namespace Kernel
|
||||
{
|
||||
|
@ -65,6 +66,7 @@ namespace Kernel
|
|||
BAN::CircularQueue<size_t, 128> m_packet_sizes;
|
||||
size_t m_packet_size_total { 0 };
|
||||
BAN::UniqPtr<VirtualRange> m_packet_buffer;
|
||||
SpinLock m_packet_lock;
|
||||
Semaphore m_packet_semaphore;
|
||||
|
||||
friend class BAN::RefPtr<UnixDomainSocket>;
|
||||
|
|
|
@ -20,11 +20,8 @@ namespace Kernel
|
|||
|
||||
private:
|
||||
static PIC* create();
|
||||
|
||||
private:
|
||||
SpinLock m_lock;
|
||||
uint16_t m_reserved_irqs { 0 };
|
||||
friend class InterruptController;
|
||||
uint16_t m_reserved_irqs { 0 };
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
#include <BAN/Vector.h>
|
||||
#include <kernel/Credentials.h>
|
||||
#include <kernel/FS/Inode.h>
|
||||
#include <kernel/Lock/Mutex.h>
|
||||
#include <kernel/Memory/Heap.h>
|
||||
#include <kernel/Memory/MemoryRegion.h>
|
||||
#include <kernel/OpenFileDescriptorSet.h>
|
||||
|
@ -50,9 +51,6 @@ namespace Kernel
|
|||
void register_to_scheduler();
|
||||
void exit(int status, int signal);
|
||||
|
||||
static void for_each_process(const BAN::Function<BAN::Iteration(Process&)>& callback);
|
||||
static void for_each_process_in_session(pid_t sid, const BAN::Function<BAN::Iteration(Process&)>& callback);
|
||||
|
||||
void add_thread(Thread*);
|
||||
void on_thread_exit(Thread&);
|
||||
|
||||
|
@ -221,8 +219,7 @@ namespace Kernel
|
|||
const pid_t m_pid;
|
||||
const pid_t m_parent;
|
||||
|
||||
mutable Mutex m_big_mutex;
|
||||
SpinLock m_signal_lock;
|
||||
mutable Mutex m_process_lock;
|
||||
|
||||
BAN::String m_working_directory;
|
||||
BAN::Vector<Thread*> m_threads;
|
||||
|
|
|
@ -7,21 +7,6 @@
|
|||
namespace Kernel
|
||||
{
|
||||
|
||||
class SchedulerLock
|
||||
{
|
||||
public:
|
||||
void lock();
|
||||
void unlock();
|
||||
void unlock_all();
|
||||
pid_t locker() const;
|
||||
|
||||
private:
|
||||
BAN::Atomic<pid_t> m_locker { -1 };
|
||||
uint32_t m_lock_depth { 0 };
|
||||
|
||||
friend class Scheduler;
|
||||
};
|
||||
|
||||
class Scheduler
|
||||
{
|
||||
public:
|
||||
|
@ -34,8 +19,6 @@ namespace Kernel
|
|||
void reschedule();
|
||||
void reschedule_if_idling();
|
||||
|
||||
void reschedule_current_no_save();
|
||||
|
||||
void set_current_thread_sleeping(uint64_t wake_time);
|
||||
|
||||
void block_current_thread(Semaphore*, uint64_t wake_time);
|
||||
|
@ -46,8 +29,8 @@ namespace Kernel
|
|||
Thread& current_thread();
|
||||
static pid_t current_tid();
|
||||
|
||||
BAN::ErrorOr<void> add_thread(Thread*);
|
||||
|
||||
[[noreturn]] void execute_current_thread();
|
||||
[[noreturn]] void _execute_current_thread();
|
||||
[[noreturn]] void delete_current_process_and_thread();
|
||||
|
||||
private:
|
||||
|
@ -60,8 +43,7 @@ namespace Kernel
|
|||
void remove_and_advance_current_thread();
|
||||
void advance_current_thread();
|
||||
|
||||
[[noreturn]] void execute_current_thread();
|
||||
[[noreturn]] void _execute_current_thread();
|
||||
BAN::ErrorOr<void> add_thread(Thread*);
|
||||
|
||||
private:
|
||||
struct SchedulerThread
|
||||
|
@ -75,13 +57,13 @@ namespace Kernel
|
|||
Semaphore* semaphore;
|
||||
};
|
||||
|
||||
SchedulerLock m_lock;
|
||||
|
||||
Thread* m_idle_thread { nullptr };
|
||||
BAN::LinkedList<SchedulerThread> m_active_threads;
|
||||
BAN::LinkedList<SchedulerThread> m_sleeping_threads;
|
||||
|
||||
BAN::LinkedList<SchedulerThread>::iterator m_current_thread;
|
||||
|
||||
friend class Process;
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
#include <BAN/Vector.h>
|
||||
#include <kernel/Device/Device.h>
|
||||
#include <kernel/Lock/Mutex.h>
|
||||
#include <kernel/Storage/DiskCache.h>
|
||||
#include <kernel/Storage/Partition.h>
|
||||
|
||||
|
|
|
@ -59,7 +59,6 @@ namespace Kernel
|
|||
bool initialize();
|
||||
|
||||
private:
|
||||
SpinLock m_lock;
|
||||
BAN::String m_name;
|
||||
Serial m_serial;
|
||||
BAN::CircularQueue<uint8_t, 128> m_input;
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
#include <BAN/Array.h>
|
||||
#include <kernel/Device/Device.h>
|
||||
#include <kernel/Input/KeyEvent.h>
|
||||
#include <kernel/Lock/SpinLock.h>
|
||||
#include <kernel/Terminal/TerminalDriver.h>
|
||||
#include <kernel/Terminal/termios.h>
|
||||
#include <kernel/Semaphore.h>
|
||||
|
@ -83,6 +84,8 @@ namespace Kernel
|
|||
Semaphore semaphore;
|
||||
};
|
||||
Buffer m_output;
|
||||
|
||||
RecursiveSpinLock m_write_lock;
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -69,8 +69,6 @@ namespace Kernel
|
|||
private:
|
||||
BAN::String m_name;
|
||||
|
||||
RecursiveSpinLock m_write_lock;
|
||||
|
||||
State m_state { State::Normal };
|
||||
AnsiState m_ansi_state { };
|
||||
UTF8State m_utf8_state { };
|
||||
|
|
|
@ -113,8 +113,6 @@ namespace Kernel
|
|||
Process* m_process { nullptr };
|
||||
bool m_is_userspace { false };
|
||||
|
||||
mutable RecursiveSpinLock m_lock;
|
||||
|
||||
uintptr_t* m_return_rsp { nullptr };
|
||||
uintptr_t* m_return_rip { nullptr };
|
||||
|
||||
|
|
|
@ -30,8 +30,6 @@ namespace Kernel
|
|||
uint64_t read_main_counter() const;
|
||||
|
||||
private:
|
||||
mutable SpinLock m_lock;
|
||||
|
||||
bool m_is_64bit { false };
|
||||
|
||||
uint64_t m_last_ticks { 0 };
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
#include <kernel/CPUID.h>
|
||||
#include <kernel/Debug.h>
|
||||
#include <kernel/IDT.h>
|
||||
#include <kernel/Lock/LockGuard.h>
|
||||
#include <kernel/Memory/PageTable.h>
|
||||
#include <kernel/MMIO.h>
|
||||
|
||||
|
@ -224,7 +223,7 @@ namespace Kernel
|
|||
|
||||
void APIC::enable_irq(uint8_t irq)
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
CriticalScope _;
|
||||
|
||||
uint32_t gsi = m_irq_overrides[irq];
|
||||
|
||||
|
@ -269,7 +268,7 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<void> APIC::reserve_irq(uint8_t irq)
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
CriticalScope _;
|
||||
|
||||
uint32_t gsi = m_irq_overrides[irq];
|
||||
|
||||
|
@ -302,7 +301,7 @@ namespace Kernel
|
|||
|
||||
BAN::Optional<uint8_t> APIC::get_free_irq()
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
CriticalScope _;
|
||||
for (int irq = 0; irq <= 0xFF; irq++)
|
||||
{
|
||||
uint32_t gsi = m_irq_overrides[irq];
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
#include <kernel/Debug.h>
|
||||
#include <kernel/InterruptController.h>
|
||||
#include <kernel/Lock/SpinLock.h>
|
||||
#include <kernel/Memory/PageTable.h>
|
||||
#include <kernel/Terminal/Serial.h>
|
||||
#include <kernel/Terminal/TTY.h>
|
||||
|
|
|
@ -20,7 +20,7 @@ namespace Kernel
|
|||
BAN::ErrorOr<size_t> DebugDevice::write_impl(off_t, BAN::ConstByteSpan buffer)
|
||||
{
|
||||
auto ms_since_boot = SystemTimer::get().ms_since_boot();
|
||||
Debug::s_debug_lock.lock();
|
||||
SpinLockGuard _(Debug::s_debug_lock);
|
||||
BAN::Formatter::print(Debug::putchar, "[{5}.{3}] {}: ",
|
||||
ms_since_boot / 1000,
|
||||
ms_since_boot % 1000,
|
||||
|
@ -28,7 +28,6 @@ namespace Kernel
|
|||
);
|
||||
for (size_t i = 0; i < buffer.size(); i++)
|
||||
Debug::putchar(buffer[i]);
|
||||
Debug::s_debug_lock.unlock();
|
||||
return buffer.size();
|
||||
}
|
||||
|
||||
|
|
|
@ -56,12 +56,9 @@ namespace Kernel
|
|||
sync_process->add_thread(MUST(Thread::create_kernel(
|
||||
[](void*)
|
||||
{
|
||||
// NOTE: we lock the device lock here and unlock
|
||||
// it only while semaphore is blocking
|
||||
s_instance->m_device_lock.lock();
|
||||
|
||||
while (true)
|
||||
{
|
||||
LockGuard _(s_instance->m_device_lock);
|
||||
while (!s_instance->m_should_sync)
|
||||
{
|
||||
LockFreeGuard _(s_instance->m_device_lock);
|
||||
|
|
|
@ -42,6 +42,7 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<size_t> Pipe::read_impl(off_t, BAN::ByteSpan buffer)
|
||||
{
|
||||
LockGuard _(m_mutex);
|
||||
while (m_buffer.empty())
|
||||
{
|
||||
if (m_writing_count == 0)
|
||||
|
@ -65,6 +66,8 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<size_t> Pipe::write_impl(off_t, BAN::ConstByteSpan buffer)
|
||||
{
|
||||
LockGuard _(m_mutex);
|
||||
|
||||
size_t old_size = m_buffer.size();
|
||||
|
||||
TRY(m_buffer.resize(old_size + buffer.size()));
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
#include <kernel/FS/ProcFS/FileSystem.h>
|
||||
#include <kernel/FS/ProcFS/Inode.h>
|
||||
#include <kernel/Lock/LockGuard.h>
|
||||
|
||||
namespace Kernel
|
||||
{
|
||||
|
|
|
@ -44,8 +44,6 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<void> VirtualFileSystem::mount(const Credentials& credentials, BAN::StringView block_device_path, BAN::StringView target)
|
||||
{
|
||||
LockGuard _(m_mutex);
|
||||
|
||||
auto block_device_file = TRY(file_from_absolute_path(credentials, block_device_path, true));
|
||||
if (!block_device_file.inode->is_device())
|
||||
return BAN::Error::from_errno(ENOTBLK);
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#include <BAN/HashMap.h>
|
||||
#include <kernel/CriticalScope.h>
|
||||
#include <kernel/FS/VirtualFileSystem.h>
|
||||
#include <kernel/Input/KeyboardLayout.h>
|
||||
#include <kernel/Lock/LockGuard.h>
|
||||
|
||||
#include <ctype.h>
|
||||
|
||||
|
@ -74,7 +74,6 @@ namespace Kernel::Input
|
|||
|
||||
Key KeyboardLayout::get_key_from_event(KeyEvent event)
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
if (event.shift())
|
||||
return m_keycode_to_key_shift[event.keycode];
|
||||
if (event.ralt())
|
||||
|
@ -257,7 +256,7 @@ namespace Kernel::Input
|
|||
}
|
||||
}
|
||||
|
||||
LockGuard _(m_lock);
|
||||
CriticalScope _;
|
||||
|
||||
for (size_t i = 0; i < new_layout->m_keycode_to_key_normal.size(); i++)
|
||||
if (new_layout->m_keycode_to_key_normal[i] != Key::None)
|
||||
|
|
|
@ -101,7 +101,8 @@ namespace Kernel::Input
|
|||
|
||||
bool PS2Controller::append_command_queue(PS2Device* device, uint8_t command, uint8_t response_size)
|
||||
{
|
||||
LockGuard _(m_cmd_lock);
|
||||
// NOTE: command queue push/pop must be done without interrupts
|
||||
CriticalScope _;
|
||||
if (m_command_queue.size() + 1 >= m_command_queue.capacity())
|
||||
{
|
||||
dprintln("PS/2 command queue full");
|
||||
|
@ -120,7 +121,8 @@ namespace Kernel::Input
|
|||
|
||||
bool PS2Controller::append_command_queue(PS2Device* device, uint8_t command, uint8_t data, uint8_t response_size)
|
||||
{
|
||||
LockGuard _(m_cmd_lock);
|
||||
// NOTE: command queue push/pop must be done without interrupts
|
||||
CriticalScope _;
|
||||
if (m_command_queue.size() + 1 >= m_command_queue.capacity())
|
||||
{
|
||||
dprintln("PS/2 command queue full");
|
||||
|
@ -141,9 +143,6 @@ namespace Kernel::Input
|
|||
{
|
||||
ASSERT(interrupts_enabled());
|
||||
|
||||
// NOTE: CircularQueue reads don't need locking, as long as
|
||||
// we can guarantee that read element is not popped
|
||||
|
||||
if (m_command_queue.empty())
|
||||
return;
|
||||
auto& command = m_command_queue.front();
|
||||
|
@ -153,8 +152,6 @@ namespace Kernel::Input
|
|||
{
|
||||
dwarnln_if(DEBUG_PS2, "Command timedout");
|
||||
m_devices[command.device_index]->command_timedout(command.out_data, command.out_count);
|
||||
|
||||
LockGuard _(m_cmd_lock);
|
||||
m_command_queue.pop();
|
||||
}
|
||||
return;
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
#include <BAN/ScopeGuard.h>
|
||||
#include <kernel/CriticalScope.h>
|
||||
#include <kernel/FS/DevFS/FileSystem.h>
|
||||
#include <kernel/Input/KeyboardLayout.h>
|
||||
#include <kernel/Input/PS2/Config.h>
|
||||
|
@ -164,7 +165,6 @@ namespace Kernel::Input
|
|||
event.modifier = m_modifiers | (released ? 0 : KeyEvent::Modifier::Pressed);
|
||||
event.keycode = keycode.value();
|
||||
|
||||
LockGuard _(m_event_lock);
|
||||
if (m_event_queue.full())
|
||||
{
|
||||
dwarnln("PS/2 event queue full");
|
||||
|
@ -197,7 +197,7 @@ namespace Kernel::Input
|
|||
if (m_event_queue.empty())
|
||||
TRY(Thread::current().block_or_eintr_indefinite(m_semaphore));
|
||||
|
||||
LockGuard _(m_event_lock);
|
||||
CriticalScope _;
|
||||
if (m_event_queue.empty())
|
||||
continue;
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
#include <BAN/ScopeGuard.h>
|
||||
#include <kernel/CriticalScope.h>
|
||||
#include <kernel/FS/DevFS/FileSystem.h>
|
||||
#include <kernel/Input/PS2/Config.h>
|
||||
#include <kernel/Input/PS2/Mouse.h>
|
||||
|
@ -157,7 +158,6 @@ namespace Kernel::Input
|
|||
event.scroll_event.scroll = rel_z;
|
||||
}
|
||||
|
||||
LockGuard _(m_event_lock);
|
||||
for (int i = 0; i < event_count; i++)
|
||||
{
|
||||
if (m_event_queue.full())
|
||||
|
@ -181,7 +181,7 @@ namespace Kernel::Input
|
|||
if (m_event_queue.empty())
|
||||
TRY(Thread::current().block_or_eintr_indefinite(m_semaphore));
|
||||
|
||||
LockGuard _(m_event_lock);
|
||||
CriticalScope _;
|
||||
if (m_event_queue.empty())
|
||||
continue;
|
||||
|
||||
|
|
|
@ -67,11 +67,4 @@ namespace Kernel
|
|||
dwarnln("could not enter acpi mode");
|
||||
}
|
||||
|
||||
bool interrupts_enabled()
|
||||
{
|
||||
uintptr_t flags;
|
||||
asm volatile("pushf; pop %0" : "=r"(flags) :: "memory");
|
||||
return flags & (1 << 9);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,68 +0,0 @@
|
|||
#include <kernel/Lock/Mutex.h>
|
||||
#include <kernel/Scheduler.h>
|
||||
|
||||
namespace Kernel
|
||||
{
|
||||
|
||||
void Mutex::lock()
|
||||
{
|
||||
auto tid = Scheduler::current_tid();
|
||||
if (tid != m_locker)
|
||||
while (!m_locker.compare_exchange(-1, tid))
|
||||
Scheduler::get().reschedule();
|
||||
m_lock_depth++;
|
||||
}
|
||||
|
||||
bool Mutex::try_lock()
|
||||
{
|
||||
auto tid = Scheduler::current_tid();
|
||||
if (tid != m_locker)
|
||||
if (!m_locker.compare_exchange(-1, tid))
|
||||
return false;
|
||||
m_lock_depth++;
|
||||
return true;
|
||||
}
|
||||
|
||||
void Mutex::unlock()
|
||||
{
|
||||
ASSERT_EQ(m_locker.load(), Scheduler::current_tid());
|
||||
if (--m_lock_depth == 0)
|
||||
m_locker = -1;
|
||||
}
|
||||
|
||||
void PriorityMutex::lock()
|
||||
{
|
||||
const auto tid = Scheduler::current_tid();
|
||||
const bool has_priority = tid ? !Thread::current().is_userspace() : true;
|
||||
if (has_priority)
|
||||
m_queue_depth++;
|
||||
if (tid != m_locker)
|
||||
while ((!has_priority && m_queue_depth > 0) || !m_locker.compare_exchange(-1, tid))
|
||||
asm volatile("pause");
|
||||
m_lock_depth++;
|
||||
}
|
||||
|
||||
bool PriorityMutex::try_lock()
|
||||
{
|
||||
const auto tid = Scheduler::current_tid();
|
||||
const bool has_priority = tid ? !Thread::current().is_userspace() : true;
|
||||
if (tid != m_locker)
|
||||
while ((!has_priority && m_queue_depth > 0) || !m_locker.compare_exchange(-1, tid))
|
||||
return false;
|
||||
if (has_priority)
|
||||
m_queue_depth++;
|
||||
m_lock_depth++;
|
||||
return true;
|
||||
}
|
||||
|
||||
void PriorityMutex::unlock()
|
||||
{
|
||||
const auto tid = Scheduler::current_tid();
|
||||
const bool has_priority = tid ? !Thread::current().is_userspace() : true;
|
||||
if (has_priority)
|
||||
m_queue_depth--;
|
||||
if (--m_lock_depth)
|
||||
m_locker = -1;
|
||||
}
|
||||
|
||||
}
|
|
@ -1,80 +1,64 @@
|
|||
#include <kernel/InterruptController.h>
|
||||
#include <kernel/Lock/SpinLock.h>
|
||||
#include <kernel/Scheduler.h>
|
||||
|
||||
// FIXME: try to move these to header
|
||||
|
||||
namespace Kernel
|
||||
{
|
||||
|
||||
static inline uintptr_t get_flags_and_disable_interrupts()
|
||||
{
|
||||
uintptr_t flags;
|
||||
asm volatile("pushf; cli; pop %0" : "=r"(flags) :: "memory");
|
||||
return flags;
|
||||
}
|
||||
|
||||
static inline void restore_flags(uintptr_t flags)
|
||||
{
|
||||
asm volatile("push %0; popf" :: "rm"(flags) : "memory", "cc");
|
||||
}
|
||||
|
||||
void SpinLock::lock()
|
||||
{
|
||||
const auto tid = Scheduler::current_tid();
|
||||
ASSERT_NEQ(m_locker.load(), tid);
|
||||
while (!m_locker.compare_exchange(-1, tid))
|
||||
__builtin_ia32_pause();
|
||||
m_flags = get_flags_and_disable_interrupts();
|
||||
}
|
||||
|
||||
bool SpinLock::try_lock()
|
||||
{
|
||||
const auto tid = Scheduler::current_tid();
|
||||
ASSERT_NEQ(m_locker.load(), tid);
|
||||
if (!m_locker.compare_exchange(-1, tid))
|
||||
return false;
|
||||
m_flags = get_flags_and_disable_interrupts();
|
||||
return true;
|
||||
}
|
||||
|
||||
void SpinLock::unlock()
|
||||
{
|
||||
ASSERT_EQ(m_locker.load(), Scheduler::current_tid());
|
||||
restore_flags(m_flags);
|
||||
m_locker = -1;
|
||||
}
|
||||
|
||||
void RecursiveSpinLock::lock()
|
||||
InterruptState SpinLock::lock()
|
||||
{
|
||||
auto tid = Scheduler::current_tid();
|
||||
if (m_locker != tid)
|
||||
{
|
||||
while (!m_locker.compare_exchange(-1, tid))
|
||||
__builtin_ia32_pause();
|
||||
m_flags = get_flags_and_disable_interrupts();
|
||||
}
|
||||
m_lock_depth++;
|
||||
}
|
||||
ASSERT_NEQ(m_locker.load(), tid);
|
||||
|
||||
InterruptState state = interrupts_enabled();
|
||||
DISABLE_INTERRUPTS();
|
||||
|
||||
bool RecursiveSpinLock::try_lock()
|
||||
{
|
||||
auto tid = Scheduler::current_tid();
|
||||
if (m_locker != tid)
|
||||
{
|
||||
if (!m_locker.compare_exchange(-1, tid))
|
||||
return false;
|
||||
m_flags = get_flags_and_disable_interrupts();
|
||||
}
|
||||
m_lock_depth++;
|
||||
return true;
|
||||
ASSERT_NOT_REACHED();
|
||||
|
||||
return state;
|
||||
}
|
||||
|
||||
void RecursiveSpinLock::unlock()
|
||||
void SpinLock::unlock(InterruptState state)
|
||||
{
|
||||
ASSERT_EQ(m_locker.load(), Scheduler::current_tid());
|
||||
m_locker.store(-1);
|
||||
if (state)
|
||||
ENABLE_INTERRUPTS();
|
||||
}
|
||||
|
||||
InterruptState RecursiveSpinLock::lock()
|
||||
{
|
||||
auto tid = Scheduler::current_tid();
|
||||
|
||||
InterruptState state = interrupts_enabled();
|
||||
DISABLE_INTERRUPTS();
|
||||
|
||||
if (tid == m_locker)
|
||||
ASSERT_GT(m_lock_depth, 0);
|
||||
else
|
||||
{
|
||||
if (!m_locker.compare_exchange(-1, tid))
|
||||
ASSERT_NOT_REACHED();
|
||||
ASSERT_EQ(m_lock_depth, 0);
|
||||
}
|
||||
|
||||
m_lock_depth++;
|
||||
|
||||
return state;
|
||||
}
|
||||
|
||||
void RecursiveSpinLock::unlock(InterruptState state)
|
||||
{
|
||||
auto tid = Scheduler::current_tid();
|
||||
ASSERT_EQ(m_locker.load(), tid);
|
||||
ASSERT_GT(m_lock_depth, 0);
|
||||
if (--m_lock_depth == 0)
|
||||
{
|
||||
restore_flags(m_flags);
|
||||
m_locker = -1;
|
||||
}
|
||||
if (state)
|
||||
ENABLE_INTERRUPTS();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
Binary file not shown.
|
@ -1,3 +1,4 @@
|
|||
#include <kernel/CriticalScope.h>
|
||||
#include <kernel/Lock/LockGuard.h>
|
||||
#include <kernel/Memory/FileBackedRegion.h>
|
||||
#include <kernel/Memory/Heap.h>
|
||||
|
@ -82,9 +83,12 @@ namespace Kernel
|
|||
if (pages[page_index] == 0)
|
||||
return;
|
||||
|
||||
{
|
||||
CriticalScope _;
|
||||
PageTable::with_fast_page(pages[page_index], [&] {
|
||||
memcpy(page_buffer, PageTable::fast_page_as_ptr(), PAGE_SIZE);
|
||||
});
|
||||
}
|
||||
|
||||
if (auto ret = inode->write(page_index * PAGE_SIZE, BAN::ConstByteSpan::from(page_buffer)); ret.is_error())
|
||||
dwarnln("{}", ret.error());
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
#include <kernel/BootInfo.h>
|
||||
#include <kernel/Lock/LockGuard.h>
|
||||
#include <kernel/Memory/Heap.h>
|
||||
#include <kernel/Memory/PageTable.h>
|
||||
|
||||
|
@ -67,7 +66,7 @@ namespace Kernel
|
|||
|
||||
paddr_t Heap::take_free_page()
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
SpinLockGuard _(m_lock);
|
||||
for (auto& range : m_physical_ranges)
|
||||
if (range.free_pages() >= 1)
|
||||
return range.reserve_page();
|
||||
|
@ -76,7 +75,7 @@ namespace Kernel
|
|||
|
||||
void Heap::release_page(paddr_t paddr)
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
SpinLockGuard _(m_lock);
|
||||
for (auto& range : m_physical_ranges)
|
||||
if (range.contains(paddr))
|
||||
return range.release_page(paddr);
|
||||
|
@ -85,7 +84,7 @@ namespace Kernel
|
|||
|
||||
paddr_t Heap::take_free_contiguous_pages(size_t pages)
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
SpinLockGuard _(m_lock);
|
||||
for (auto& range : m_physical_ranges)
|
||||
if (range.free_pages() >= pages)
|
||||
if (paddr_t paddr = range.reserve_contiguous_pages(pages))
|
||||
|
@ -95,7 +94,7 @@ namespace Kernel
|
|||
|
||||
void Heap::release_contiguous_pages(paddr_t paddr, size_t pages)
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
SpinLockGuard _(m_lock);
|
||||
for (auto& range : m_physical_ranges)
|
||||
if (range.contains(paddr))
|
||||
return range.release_contiguous_pages(paddr, pages);
|
||||
|
@ -104,7 +103,7 @@ namespace Kernel
|
|||
|
||||
size_t Heap::used_pages() const
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
SpinLockGuard _(m_lock);
|
||||
size_t result = 0;
|
||||
for (const auto& range : m_physical_ranges)
|
||||
result += range.used_pages();
|
||||
|
@ -113,7 +112,7 @@ namespace Kernel
|
|||
|
||||
size_t Heap::free_pages() const
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
SpinLockGuard _(m_lock);
|
||||
size_t result = 0;
|
||||
for (const auto& range : m_physical_ranges)
|
||||
result += range.free_pages();
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
#include <kernel/CriticalScope.h>
|
||||
#include <kernel/Lock/LockGuard.h>
|
||||
#include <kernel/Memory/Heap.h>
|
||||
#include <kernel/Memory/MemoryBackedRegion.h>
|
||||
|
@ -56,10 +57,9 @@ namespace Kernel
|
|||
m_page_table.map_page_at(paddr, vaddr, m_flags);
|
||||
|
||||
// Zero out the new page
|
||||
if (&PageTable::current() == &m_page_table)
|
||||
memset((void*)vaddr, 0x00, PAGE_SIZE);
|
||||
else
|
||||
PageTable::with_fast_page(paddr, [] { memset(PageTable::fast_page_as_ptr(), 0x00, PAGE_SIZE); });
|
||||
PageTable::with_fast_page(paddr, [&] {
|
||||
memset(PageTable::fast_page_as_ptr(), 0x00, PAGE_SIZE);
|
||||
});
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -94,13 +94,9 @@ namespace Kernel
|
|||
|
||||
TRY(allocate_page_containing(write_vaddr));
|
||||
|
||||
if (&PageTable::current() == &m_page_table)
|
||||
memcpy((void*)write_vaddr, (void*)(buffer + written), bytes);
|
||||
else
|
||||
PageTable::with_fast_page(
|
||||
m_page_table.physical_address_of(write_vaddr & PAGE_ADDR_MASK),
|
||||
[&] { memcpy(PageTable::fast_page_as_ptr(page_offset), (void*)(buffer + written), bytes); }
|
||||
);
|
||||
PageTable::with_fast_page(m_page_table.physical_address_of(write_vaddr & PAGE_ADDR_MASK), [&] {
|
||||
memcpy(PageTable::fast_page_as_ptr(page_offset), (void*)(buffer + written), bytes);
|
||||
});
|
||||
|
||||
written += bytes;
|
||||
}
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
#include <kernel/CriticalScope.h>
|
||||
#include <kernel/Lock/LockGuard.h>
|
||||
#include <kernel/Memory/Heap.h>
|
||||
#include <kernel/Memory/VirtualRange.h>
|
||||
|
@ -11,7 +12,7 @@ namespace Kernel
|
|||
ASSERT(vaddr % PAGE_SIZE == 0);
|
||||
ASSERT(vaddr > 0);
|
||||
|
||||
VirtualRange* result_ptr = new VirtualRange(page_table, preallocate_pages, false);
|
||||
VirtualRange* result_ptr = new VirtualRange(page_table, preallocate_pages);
|
||||
if (result_ptr == nullptr)
|
||||
return BAN::Error::from_errno(ENOMEM);
|
||||
|
||||
|
@ -67,31 +68,14 @@ namespace Kernel
|
|||
}
|
||||
ASSERT(vaddr + size <= vaddr_end);
|
||||
|
||||
LockGuard _(page_table);
|
||||
SpinLockGuard _(page_table);
|
||||
page_table.unmap_range(vaddr, size); // We have to unmap here to allow reservation in create_to_vaddr()
|
||||
return create_to_vaddr(page_table, vaddr, size, flags, preallocate_pages);
|
||||
}
|
||||
|
||||
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_kmalloc(size_t size)
|
||||
{
|
||||
auto* result_ptr = new VirtualRange(PageTable::kernel(), false, true);
|
||||
if (!result_ptr)
|
||||
return BAN::Error::from_errno(ENOMEM);
|
||||
|
||||
auto result = BAN::UniqPtr<VirtualRange>::adopt(result_ptr);
|
||||
result->m_size = size;
|
||||
result->m_flags = PageTable::Flags::ReadWrite | PageTable::Flags::Present;
|
||||
result->m_vaddr = (vaddr_t)kmalloc(size);
|
||||
if (result->m_vaddr == 0)
|
||||
return BAN::Error::from_errno(ENOMEM);
|
||||
result->set_zero();
|
||||
return result;
|
||||
}
|
||||
|
||||
VirtualRange::VirtualRange(PageTable& page_table, bool preallocated, bool kmalloc)
|
||||
VirtualRange::VirtualRange(PageTable& page_table, bool preallocated)
|
||||
: m_page_table(page_table)
|
||||
, m_preallocated(preallocated)
|
||||
, m_kmalloc(kmalloc)
|
||||
{ }
|
||||
|
||||
VirtualRange::~VirtualRange()
|
||||
|
@ -99,10 +83,6 @@ namespace Kernel
|
|||
if (m_vaddr == 0)
|
||||
return;
|
||||
|
||||
if (m_kmalloc)
|
||||
kfree((void*)m_vaddr);
|
||||
else
|
||||
{
|
||||
for (size_t offset = 0; offset < size(); offset += PAGE_SIZE)
|
||||
{
|
||||
paddr_t paddr = m_page_table.physical_address_of(vaddr() + offset);
|
||||
|
@ -111,7 +91,6 @@ namespace Kernel
|
|||
}
|
||||
m_page_table.unmap_range(vaddr(), size());
|
||||
}
|
||||
}
|
||||
|
||||
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::clone(PageTable& page_table)
|
||||
{
|
||||
|
@ -120,7 +99,7 @@ namespace Kernel
|
|||
|
||||
auto result = TRY(create_to_vaddr(page_table, vaddr(), size(), flags(), m_preallocated));
|
||||
|
||||
LockGuard _(m_page_table);
|
||||
SpinLockGuard _(m_page_table);
|
||||
for (size_t offset = 0; offset < size(); offset += PAGE_SIZE)
|
||||
{
|
||||
if (!m_preallocated && m_page_table.physical_address_of(vaddr() + offset))
|
||||
|
@ -141,7 +120,6 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<void> VirtualRange::allocate_page_for_demand_paging(vaddr_t address)
|
||||
{
|
||||
ASSERT(!m_kmalloc);
|
||||
ASSERT(!m_preallocated);
|
||||
ASSERT(contains(address));
|
||||
ASSERT(&PageTable::current() == &m_page_table);
|
||||
|
@ -161,9 +139,7 @@ namespace Kernel
|
|||
|
||||
void VirtualRange::set_zero()
|
||||
{
|
||||
PageTable& page_table = PageTable::current();
|
||||
|
||||
if (m_kmalloc || &page_table == &m_page_table)
|
||||
if (&PageTable::current() == &m_page_table || &PageTable::kernel() == &m_page_table)
|
||||
{
|
||||
memset((void*)vaddr(), 0, size());
|
||||
return;
|
||||
|
@ -187,7 +163,7 @@ namespace Kernel
|
|||
ASSERT_LTE(offset, size());
|
||||
ASSERT_LTE(offset, size() - bytes);
|
||||
|
||||
if (m_kmalloc || &PageTable::current() == &m_page_table)
|
||||
if (&PageTable::current() == &m_page_table || &PageTable::kernel() == &m_page_table)
|
||||
{
|
||||
memcpy((void*)(vaddr() + offset), buffer, bytes);
|
||||
return;
|
||||
|
|
|
@ -1,13 +1,10 @@
|
|||
#include <BAN/Errors.h>
|
||||
#include <kernel/CriticalScope.h>
|
||||
#include <kernel/kprint.h>
|
||||
#include <kernel/Lock/LockGuard.h>
|
||||
#include <kernel/Memory/kmalloc.h>
|
||||
|
||||
#include <kernel/Thread.h>
|
||||
|
||||
using Kernel::LockGuard;
|
||||
using Kernel::SpinLock;
|
||||
|
||||
#define MB (1 << 20)
|
||||
|
||||
extern uint8_t g_kernel_end[];
|
||||
|
@ -84,8 +81,6 @@ struct kmalloc_info
|
|||
};
|
||||
static kmalloc_info s_kmalloc_info;
|
||||
|
||||
static SpinLock s_kmalloc_lock;
|
||||
|
||||
template<size_t SIZE>
|
||||
struct kmalloc_fixed_node
|
||||
{
|
||||
|
@ -149,8 +144,6 @@ void kmalloc_initialize()
|
|||
|
||||
void kmalloc_dump_info()
|
||||
{
|
||||
LockGuard _(s_kmalloc_lock);
|
||||
|
||||
kprintln("kmalloc: 0x{8H}->0x{8H}", s_kmalloc_info.base, s_kmalloc_info.end);
|
||||
kprintln(" used: 0x{8H}", s_kmalloc_info.used);
|
||||
kprintln(" free: 0x{8H}", s_kmalloc_info.free);
|
||||
|
@ -162,18 +155,14 @@ void kmalloc_dump_info()
|
|||
|
||||
static bool is_corrupted()
|
||||
{
|
||||
LockGuard _(s_kmalloc_lock);
|
||||
auto& info = s_kmalloc_info;
|
||||
auto* temp = info.first();
|
||||
for (; temp->end() <= info.end; temp = temp->after())
|
||||
continue;
|
||||
for (; temp->end() <= info.end; temp = temp->after());
|
||||
return (uintptr_t)temp != info.end;
|
||||
}
|
||||
|
||||
[[maybe_unused]] static void debug_dump()
|
||||
{
|
||||
LockGuard _(s_kmalloc_lock);
|
||||
|
||||
auto& info = s_kmalloc_info;
|
||||
|
||||
uint32_t used = 0;
|
||||
|
@ -194,8 +183,6 @@ static void* kmalloc_fixed()
|
|||
{
|
||||
auto& info = s_kmalloc_fixed_info;
|
||||
|
||||
LockGuard _(s_kmalloc_lock);
|
||||
|
||||
if (!info.free_list_head)
|
||||
return nullptr;
|
||||
|
||||
|
@ -238,8 +225,6 @@ static void* kmalloc_impl(size_t size, size_t align)
|
|||
|
||||
auto& info = s_kmalloc_info;
|
||||
|
||||
LockGuard _(s_kmalloc_lock);
|
||||
|
||||
for (auto* node = info.first(); node->end() <= info.end; node = node->after())
|
||||
{
|
||||
if (node->used())
|
||||
|
@ -319,6 +304,8 @@ void* kmalloc(size_t size, size_t align, bool force_identity_map)
|
|||
align = s_kmalloc_min_align;
|
||||
ASSERT(align <= PAGE_SIZE);
|
||||
|
||||
Kernel::CriticalScope critical;
|
||||
|
||||
if (size == 0 || size >= info.size)
|
||||
goto no_memory;
|
||||
|
||||
|
@ -351,7 +338,7 @@ void kfree(void* address)
|
|||
uintptr_t address_uint = (uintptr_t)address;
|
||||
ASSERT(address_uint % s_kmalloc_min_align == 0);
|
||||
|
||||
LockGuard _(s_kmalloc_lock);
|
||||
Kernel::CriticalScope critical;
|
||||
|
||||
if (s_kmalloc_fixed_info.base <= address_uint && address_uint < s_kmalloc_fixed_info.end)
|
||||
{
|
||||
|
@ -412,9 +399,10 @@ void kfree(void* address)
|
|||
|
||||
BAN::Optional<Kernel::paddr_t> kmalloc_paddr_of(Kernel::vaddr_t vaddr)
|
||||
{
|
||||
using Kernel::vaddr_t;
|
||||
LockGuard _(s_kmalloc_lock);
|
||||
using namespace Kernel;
|
||||
|
||||
if ((vaddr_t)s_kmalloc_storage <= vaddr && vaddr < (vaddr_t)s_kmalloc_storage + sizeof(s_kmalloc_storage))
|
||||
return V2P(vaddr);
|
||||
|
||||
return {};
|
||||
}
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
#include <kernel/Lock/LockGuard.h>
|
||||
#include <kernel/Networking/ARPTable.h>
|
||||
#include <kernel/Scheduler.h>
|
||||
#include <kernel/Timer/Timer.h>
|
||||
|
@ -52,9 +51,10 @@ namespace Kernel
|
|||
ipv4_address = interface.get_gateway();
|
||||
|
||||
{
|
||||
LockGuard _(m_table_lock);
|
||||
if (m_arp_table.contains(ipv4_address))
|
||||
return m_arp_table[ipv4_address];
|
||||
SpinLockGuard _(m_table_lock);
|
||||
auto it = m_arp_table.find(ipv4_address);
|
||||
if (it != m_arp_table.end())
|
||||
return it->value;
|
||||
}
|
||||
|
||||
ARPPacket arp_request;
|
||||
|
@ -74,9 +74,10 @@ namespace Kernel
|
|||
while (SystemTimer::get().ms_since_boot() < timeout)
|
||||
{
|
||||
{
|
||||
LockGuard _(m_table_lock);
|
||||
if (m_arp_table.contains(ipv4_address))
|
||||
return m_arp_table[ipv4_address];
|
||||
SpinLockGuard _(m_table_lock);
|
||||
auto it = m_arp_table.find(ipv4_address);
|
||||
if (it != m_arp_table.end())
|
||||
return it->value;
|
||||
}
|
||||
Scheduler::get().reschedule();
|
||||
}
|
||||
|
@ -114,13 +115,15 @@ namespace Kernel
|
|||
}
|
||||
case ARPOperation::Reply:
|
||||
{
|
||||
LockGuard _(m_table_lock);
|
||||
if (m_arp_table.contains(packet.spa))
|
||||
SpinLockGuard _(m_table_lock);
|
||||
auto it = m_arp_table.find(packet.spa);
|
||||
|
||||
if (it != m_arp_table.end())
|
||||
{
|
||||
if (m_arp_table[packet.spa] != packet.sha)
|
||||
if (it->value != packet.sha)
|
||||
{
|
||||
dprintln("Update IPv4 {} MAC to {}", packet.spa, packet.sha);
|
||||
m_arp_table[packet.spa] = packet.sha;
|
||||
it->value = packet.sha;
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -145,7 +148,7 @@ namespace Kernel
|
|||
BAN::Optional<PendingArpPacket> pending;
|
||||
|
||||
{
|
||||
LockGuard _(m_pending_lock);
|
||||
CriticalScope _;
|
||||
if (!m_pending_packets.empty())
|
||||
{
|
||||
pending = m_pending_packets.front();
|
||||
|
@ -168,12 +171,12 @@ namespace Kernel
|
|||
{
|
||||
auto& arp_packet = buffer.as<const ARPPacket>();
|
||||
|
||||
LockGuard _(m_pending_lock);
|
||||
if (m_pending_packets.full())
|
||||
{
|
||||
dprintln("arp packet queue full");
|
||||
return;
|
||||
}
|
||||
|
||||
m_pending_packets.push({ .interface = interface, .packet = arp_packet });
|
||||
m_pending_semaphore.unblock();
|
||||
}
|
||||
|
|
|
@ -261,7 +261,7 @@ namespace Kernel
|
|||
{
|
||||
ASSERT_LTE(buffer.size() + sizeof(EthernetHeader), E1000_TX_BUFFER_SIZE);
|
||||
|
||||
LockGuard _(m_lock);
|
||||
CriticalScope _;
|
||||
|
||||
size_t tx_current = read32(REG_TDT) % E1000_TX_DESCRIPTOR_COUNT;
|
||||
|
||||
|
|
|
@ -70,12 +70,14 @@ namespace Kernel
|
|||
|
||||
void IPv4Layer::unbind_socket(BAN::RefPtr<NetworkSocket> socket, uint16_t port)
|
||||
{
|
||||
LockGuard _(m_socket_lock);
|
||||
if (m_bound_sockets.contains(port))
|
||||
{
|
||||
ASSERT(m_bound_sockets[port].valid());
|
||||
ASSERT(m_bound_sockets[port].lock() == socket);
|
||||
m_bound_sockets.remove(port);
|
||||
SpinLockGuard _(m_bound_socket_lock);
|
||||
auto it = m_bound_sockets.find(port);
|
||||
if (it != m_bound_sockets.end())
|
||||
{
|
||||
ASSERT(it->value.lock() == socket);
|
||||
m_bound_sockets.remove(it);
|
||||
}
|
||||
}
|
||||
NetworkManager::get().TmpFileSystem::remove_from_cache(socket);
|
||||
}
|
||||
|
@ -88,11 +90,11 @@ namespace Kernel
|
|||
return BAN::Error::from_errno(EAFNOSUPPORT);
|
||||
auto& sockaddr_in = *reinterpret_cast<const struct sockaddr_in*>(address);
|
||||
|
||||
LockGuard _(m_socket_lock);
|
||||
SpinLockGuard _(m_bound_socket_lock);
|
||||
|
||||
uint16_t port = NetworkSocket::PORT_NONE;
|
||||
for (uint32_t i = 0; i < 100 && port == NetworkSocket::PORT_NONE; i++)
|
||||
if (uint32_t temp = 0xC000 | (Random::get_u32() & 0x3FFF); !m_bound_sockets.contains(temp) || !m_bound_sockets[temp].valid())
|
||||
if (uint32_t temp = 0xC000 | (Random::get_u32() & 0x3FFF); !m_bound_sockets.contains(temp))
|
||||
port = temp;
|
||||
for (uint32_t temp = 0xC000; temp < 0xFFFF && port == NetworkSocket::PORT_NONE; temp++)
|
||||
if (!m_bound_sockets.contains(temp))
|
||||
|
@ -124,17 +126,11 @@ namespace Kernel
|
|||
auto& sockaddr_in = *reinterpret_cast<const struct sockaddr_in*>(address);
|
||||
uint16_t port = BAN::host_to_network_endian(sockaddr_in.sin_port);
|
||||
|
||||
LockGuard _(m_socket_lock);
|
||||
SpinLockGuard _(m_bound_socket_lock);
|
||||
|
||||
if (!m_bound_sockets.contains(port))
|
||||
TRY(m_bound_sockets.insert(port, TRY(socket->get_weak_ptr())));
|
||||
else
|
||||
{
|
||||
auto& bound = m_bound_sockets[port];
|
||||
if (bound.valid())
|
||||
if (m_bound_sockets.contains(port))
|
||||
return BAN::Error::from_errno(EADDRINUSE);
|
||||
bound = TRY(socket->get_weak_ptr());
|
||||
}
|
||||
TRY(m_bound_sockets.insert(port, TRY(socket->get_weak_ptr())));
|
||||
|
||||
// FIXME: actually determine proper interface
|
||||
auto interface = NetworkManager::get().interfaces().front();
|
||||
|
@ -256,13 +252,14 @@ namespace Kernel
|
|||
BAN::RefPtr<Kernel::NetworkSocket> bound_socket;
|
||||
|
||||
{
|
||||
LockGuard _(m_socket_lock);
|
||||
if (!m_bound_sockets.contains(dst_port))
|
||||
SpinLockGuard _(m_bound_socket_lock);
|
||||
auto it = m_bound_sockets.find(dst_port);
|
||||
if (it == m_bound_sockets.end())
|
||||
{
|
||||
dprintln_if(DEBUG_IPV4, "no one is listening on port {}", dst_port);
|
||||
return {};
|
||||
}
|
||||
bound_socket = m_bound_sockets[dst_port].lock();
|
||||
bound_socket = it->value.lock();
|
||||
}
|
||||
|
||||
if (!bound_socket)
|
||||
|
@ -293,7 +290,7 @@ namespace Kernel
|
|||
BAN::Optional<PendingIPv4Packet> pending;
|
||||
|
||||
{
|
||||
LockGuard _(m_packet_lock);
|
||||
CriticalScope _;
|
||||
if (!m_pending_packets.empty())
|
||||
{
|
||||
pending = m_pending_packets.front();
|
||||
|
@ -313,7 +310,7 @@ namespace Kernel
|
|||
if (auto ret = handle_ipv4_packet(pending->interface, BAN::ByteSpan(buffer_start, ipv4_packet_size)); ret.is_error())
|
||||
dwarnln("{}", ret.error());
|
||||
|
||||
LockGuard _(m_packet_lock);
|
||||
CriticalScope _;
|
||||
m_pending_total_size -= ipv4_packet_size;
|
||||
if (m_pending_total_size)
|
||||
memmove(buffer_start, buffer_start + ipv4_packet_size, m_pending_total_size);
|
||||
|
@ -322,8 +319,6 @@ namespace Kernel
|
|||
|
||||
void IPv4Layer::add_ipv4_packet(NetworkInterface& interface, BAN::ConstByteSpan buffer)
|
||||
{
|
||||
LockGuard _(m_packet_lock);
|
||||
|
||||
if (m_pending_packets.full())
|
||||
{
|
||||
dwarnln("IPv4 packet queue full");
|
||||
|
|
|
@ -68,6 +68,8 @@ namespace Kernel
|
|||
|
||||
void TCPSocket::on_close_impl()
|
||||
{
|
||||
LockGuard _(m_mutex);
|
||||
|
||||
if (!is_bound())
|
||||
return;
|
||||
|
||||
|
@ -101,6 +103,8 @@ namespace Kernel
|
|||
if (address_len > (socklen_t)sizeof(sockaddr_storage))
|
||||
address_len = sizeof(sockaddr_storage);
|
||||
|
||||
LockGuard _(m_mutex);
|
||||
|
||||
ASSERT(!m_connection_info.has_value());
|
||||
|
||||
switch (m_state)
|
||||
|
@ -191,8 +195,6 @@ namespace Kernel
|
|||
|
||||
void TCPSocket::add_protocol_header(BAN::ByteSpan packet, uint16_t dst_port, PseudoHeader pseudo_header)
|
||||
{
|
||||
LockGuard _(m_mutex);
|
||||
|
||||
auto& header = packet.as<TCPHeader>();
|
||||
memset(&header, 0, sizeof(TCPHeader));
|
||||
memset(header.options, TCPOption::End, m_tcp_options_bytes);
|
||||
|
@ -210,6 +212,7 @@ namespace Kernel
|
|||
{
|
||||
case State::Closed:
|
||||
{
|
||||
LockGuard _(m_mutex);
|
||||
header.syn = 1;
|
||||
add_tcp_header_option<0, TCPOption::MaximumSeqmentSize>(header, m_interface->payload_mtu() - m_network_layer.header_size());
|
||||
add_tcp_header_option<4, TCPOption::WindowScale>(header, 0);
|
||||
|
@ -230,6 +233,7 @@ namespace Kernel
|
|||
break;
|
||||
case State::CloseWait:
|
||||
{
|
||||
LockGuard _(m_mutex);
|
||||
header.ack = 1;
|
||||
header.fin = 1;
|
||||
m_state = State::LastAck;
|
||||
|
@ -238,6 +242,7 @@ namespace Kernel
|
|||
}
|
||||
case State::FinWait1:
|
||||
{
|
||||
LockGuard _(m_mutex);
|
||||
header.ack = 1;
|
||||
header.fin = 1;
|
||||
m_state = State::FinWait2;
|
||||
|
@ -245,6 +250,7 @@ namespace Kernel
|
|||
}
|
||||
case State::FinWait2:
|
||||
{
|
||||
LockGuard _(m_mutex);
|
||||
header.ack = 1;
|
||||
m_state = State::TimeWait;
|
||||
m_time_wait_start_ms = SystemTimer::get().ms_since_boot();
|
||||
|
@ -297,10 +303,6 @@ namespace Kernel
|
|||
|
||||
auto payload = buffer.slice(header.data_offset * sizeof(uint32_t));
|
||||
|
||||
// FIXME: Internet layer packet receive thread should not be able to be
|
||||
// blocked by inode's mutex
|
||||
LockGuard _(m_mutex);
|
||||
|
||||
switch (m_state)
|
||||
{
|
||||
case State::Closed:
|
||||
|
@ -310,6 +312,7 @@ namespace Kernel
|
|||
if (!header.ack || !header.syn)
|
||||
break;
|
||||
|
||||
LockGuard _(m_mutex);
|
||||
|
||||
if (header.ack_number != m_send_window.current_seq)
|
||||
{
|
||||
|
@ -342,6 +345,8 @@ namespace Kernel
|
|||
if (!header.ack)
|
||||
break;
|
||||
|
||||
LockGuard _(m_mutex);
|
||||
|
||||
if (header.fin)
|
||||
{
|
||||
if (m_recv_window.start_seq + m_recv_window.data_size != header.seq_number)
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
#include <kernel/Lock/LockGuard.h>
|
||||
#include <kernel/Memory/Heap.h>
|
||||
#include <kernel/Networking/UDPSocket.h>
|
||||
#include <kernel/Thread.h>
|
||||
|
@ -46,7 +45,7 @@ namespace Kernel
|
|||
//auto& header = packet.as<const UDPHeader>();
|
||||
auto payload = packet.slice(sizeof(UDPHeader));
|
||||
|
||||
LockGuard _(m_packet_lock);
|
||||
SpinLockGuard _(m_packet_lock);
|
||||
|
||||
if (m_packets.full())
|
||||
{
|
||||
|
@ -88,12 +87,12 @@ namespace Kernel
|
|||
}
|
||||
ASSERT(m_port != PORT_NONE);
|
||||
|
||||
LockGuard _(m_packet_lock);
|
||||
|
||||
auto state = m_packet_lock.lock();
|
||||
while (m_packets.empty())
|
||||
{
|
||||
LockFreeGuard free(m_packet_lock);
|
||||
m_packet_lock.unlock(state);
|
||||
TRY(Thread::current().block_or_eintr_indefinite(m_packet_semaphore));
|
||||
state = m_packet_lock.lock();
|
||||
}
|
||||
|
||||
auto packet_info = m_packets.front();
|
||||
|
@ -115,6 +114,8 @@ namespace Kernel
|
|||
|
||||
m_packet_total_size -= packet_info.packet_size;
|
||||
|
||||
m_packet_lock.unlock(state);
|
||||
|
||||
if (address && address_len)
|
||||
{
|
||||
if (*address_len > (socklen_t)sizeof(sockaddr_storage))
|
||||
|
|
|
@ -51,9 +51,10 @@ namespace Kernel
|
|||
{
|
||||
if (is_bound() && !is_bound_to_unused())
|
||||
{
|
||||
LockGuard _(s_bound_socket_lock);
|
||||
if (s_bound_sockets.contains(m_bound_path))
|
||||
s_bound_sockets.remove(m_bound_path);
|
||||
SpinLockGuard _(s_bound_socket_lock);
|
||||
auto it = s_bound_sockets.find(m_bound_path);
|
||||
if (it != s_bound_sockets.end())
|
||||
s_bound_sockets.remove(it);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -71,7 +72,7 @@ namespace Kernel
|
|||
BAN::RefPtr<UnixDomainSocket> pending;
|
||||
|
||||
{
|
||||
LockGuard _(connection_info.pending_lock);
|
||||
SpinLockGuard _(connection_info.pending_lock);
|
||||
pending = connection_info.pending_connections.front();
|
||||
connection_info.pending_connections.pop();
|
||||
connection_info.pending_semaphore.unblock();
|
||||
|
@ -120,10 +121,11 @@ namespace Kernel
|
|||
BAN::RefPtr<UnixDomainSocket> target;
|
||||
|
||||
{
|
||||
LockGuard _(s_bound_socket_lock);
|
||||
if (!s_bound_sockets.contains(file.canonical_path))
|
||||
SpinLockGuard _(s_bound_socket_lock);
|
||||
auto it = s_bound_sockets.find(file.canonical_path);
|
||||
if (it == s_bound_sockets.end())
|
||||
return BAN::Error::from_errno(ECONNREFUSED);
|
||||
target = s_bound_sockets[file.canonical_path].lock();
|
||||
target = it->value.lock();
|
||||
if (!target)
|
||||
return BAN::Error::from_errno(ECONNREFUSED);
|
||||
}
|
||||
|
@ -150,7 +152,7 @@ namespace Kernel
|
|||
{
|
||||
auto& target_info = target->m_info.get<ConnectionInfo>();
|
||||
{
|
||||
LockGuard _(target_info.pending_lock);
|
||||
SpinLockGuard _(target_info.pending_lock);
|
||||
if (target_info.pending_connections.size() < target_info.pending_connections.capacity())
|
||||
{
|
||||
MUST(target_info.pending_connections.push(this));
|
||||
|
@ -205,7 +207,7 @@ namespace Kernel
|
|||
O_RDWR
|
||||
));
|
||||
|
||||
LockGuard _(s_bound_socket_lock);
|
||||
SpinLockGuard _(s_bound_socket_lock);
|
||||
ASSERT(!s_bound_sockets.contains(file.canonical_path));
|
||||
TRY(s_bound_sockets.emplace(file.canonical_path, TRY(get_weak_ptr())));
|
||||
m_bound_path = BAN::move(file.canonical_path);
|
||||
|
@ -229,12 +231,12 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<void> UnixDomainSocket::add_packet(BAN::ConstByteSpan packet)
|
||||
{
|
||||
LockGuard _(m_mutex);
|
||||
|
||||
auto state = m_packet_lock.lock();
|
||||
while (m_packet_sizes.full() || m_packet_size_total + packet.size() > s_packet_buffer_size)
|
||||
{
|
||||
LockFreeGuard _(m_mutex);
|
||||
m_packet_lock.unlock(state);
|
||||
TRY(Thread::current().block_or_eintr_indefinite(m_packet_semaphore));
|
||||
state = m_packet_lock.lock();
|
||||
}
|
||||
|
||||
uint8_t* packet_buffer = reinterpret_cast<uint8_t*>(m_packet_buffer->vaddr() + m_packet_size_total);
|
||||
|
@ -245,6 +247,7 @@ namespace Kernel
|
|||
m_packet_sizes.push(packet.size());
|
||||
|
||||
m_packet_semaphore.unblock();
|
||||
m_packet_lock.unlock(state);
|
||||
return {};
|
||||
}
|
||||
|
||||
|
@ -318,10 +321,11 @@ namespace Kernel
|
|||
canonical_path = BAN::move(file.canonical_path);
|
||||
}
|
||||
|
||||
LockGuard _(s_bound_socket_lock);
|
||||
if (!s_bound_sockets.contains(canonical_path))
|
||||
SpinLockGuard _(s_bound_socket_lock);
|
||||
auto it = s_bound_sockets.find(canonical_path);
|
||||
if (it == s_bound_sockets.end())
|
||||
return BAN::Error::from_errno(EDESTADDRREQ);
|
||||
auto target = s_bound_sockets[canonical_path].lock();
|
||||
auto target = it->value.lock();
|
||||
if (!target)
|
||||
return BAN::Error::from_errno(EDESTADDRREQ);
|
||||
TRY(target->add_packet(message));
|
||||
|
@ -338,10 +342,12 @@ namespace Kernel
|
|||
return BAN::Error::from_errno(ENOTCONN);
|
||||
}
|
||||
|
||||
auto state = m_packet_lock.lock();
|
||||
while (m_packet_size_total == 0)
|
||||
{
|
||||
LockFreeGuard _(m_mutex);
|
||||
m_packet_lock.unlock(state);
|
||||
TRY(Thread::current().block_or_eintr_indefinite(m_packet_semaphore));
|
||||
state = m_packet_lock.lock();
|
||||
}
|
||||
|
||||
uint8_t* packet_buffer = reinterpret_cast<uint8_t*>(m_packet_buffer->vaddr());
|
||||
|
@ -360,6 +366,7 @@ namespace Kernel
|
|||
m_packet_size_total -= nread;
|
||||
|
||||
m_packet_semaphore.unblock();
|
||||
m_packet_lock.unlock(state);
|
||||
|
||||
return nread;
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#include <kernel/CriticalScope.h>
|
||||
#include <kernel/IDT.h>
|
||||
#include <kernel/IO.h>
|
||||
#include <kernel/Lock/LockGuard.h>
|
||||
#include <kernel/PIC.h>
|
||||
|
||||
#include <string.h>
|
||||
|
@ -79,7 +79,7 @@ namespace Kernel
|
|||
|
||||
void PIC::enable_irq(uint8_t irq)
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
CriticalScope _;
|
||||
ASSERT(irq < 16);
|
||||
ASSERT(m_reserved_irqs & (1 << irq));
|
||||
|
||||
|
@ -99,7 +99,7 @@ namespace Kernel
|
|||
dwarnln("PIC only supports 16 irqs");
|
||||
return BAN::Error::from_errno(EFAULT);
|
||||
}
|
||||
LockGuard _(m_lock);
|
||||
CriticalScope _;
|
||||
if (m_reserved_irqs & (1 << irq))
|
||||
{
|
||||
dwarnln("irq {} is already reserved", irq);
|
||||
|
@ -111,7 +111,7 @@ namespace Kernel
|
|||
|
||||
BAN::Optional<uint8_t> PIC::get_free_irq()
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
CriticalScope _;
|
||||
for (int irq = 0; irq < 16; irq++)
|
||||
{
|
||||
if (m_reserved_irqs & (1 << irq))
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
#include <BAN/ScopeGuard.h>
|
||||
#include <BAN/StringView.h>
|
||||
#include <kernel/CriticalScope.h>
|
||||
#include <kernel/FS/DevFS/FileSystem.h>
|
||||
#include <kernel/FS/ProcFS/FileSystem.h>
|
||||
#include <kernel/FS/VirtualFileSystem.h>
|
||||
|
@ -31,9 +32,9 @@ namespace Kernel
|
|||
static BAN::Vector<Process*> s_processes;
|
||||
static RecursiveSpinLock s_process_lock;
|
||||
|
||||
void Process::for_each_process(const BAN::Function<BAN::Iteration(Process&)>& callback)
|
||||
static void for_each_process(const BAN::Function<BAN::Iteration(Process&)>& callback)
|
||||
{
|
||||
LockGuard _(s_process_lock);
|
||||
SpinLockGuard _(s_process_lock);
|
||||
|
||||
for (auto* process : s_processes)
|
||||
{
|
||||
|
@ -44,9 +45,9 @@ namespace Kernel
|
|||
}
|
||||
}
|
||||
|
||||
void Process::for_each_process_in_session(pid_t sid, const BAN::Function<BAN::Iteration(Process&)>& callback)
|
||||
static void for_each_process_in_session(pid_t sid, const BAN::Function<BAN::Iteration(Process&)>& callback)
|
||||
{
|
||||
LockGuard _(s_process_lock);
|
||||
SpinLockGuard _(s_process_lock);
|
||||
|
||||
for (auto* process : s_processes)
|
||||
{
|
||||
|
@ -65,7 +66,7 @@ namespace Kernel
|
|||
|
||||
pid_t pid;
|
||||
{
|
||||
LockGuard _(s_process_lock);
|
||||
CriticalScope _;
|
||||
pid = s_next_id;
|
||||
if (sid == 0 && pgrp == 0)
|
||||
{
|
||||
|
@ -88,9 +89,10 @@ namespace Kernel
|
|||
|
||||
void Process::register_to_scheduler()
|
||||
{
|
||||
s_process_lock.lock();
|
||||
{
|
||||
SpinLockGuard _(s_process_lock);
|
||||
MUST(s_processes.push_back(this));
|
||||
s_process_lock.unlock();
|
||||
}
|
||||
for (auto* thread : m_threads)
|
||||
MUST(Scheduler::get().add_thread(thread));
|
||||
}
|
||||
|
@ -192,14 +194,14 @@ namespace Kernel
|
|||
|
||||
void Process::add_thread(Thread* thread)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
MUST(m_threads.push_back(thread));
|
||||
}
|
||||
|
||||
void Process::cleanup_function()
|
||||
{
|
||||
{
|
||||
LockGuard _(s_process_lock);
|
||||
SpinLockGuard _(s_process_lock);
|
||||
for (size_t i = 0; i < s_processes.size(); i++)
|
||||
if (s_processes[i] == this)
|
||||
s_processes.remove(i);
|
||||
|
@ -213,8 +215,7 @@ namespace Kernel
|
|||
while (m_exit_status.waiting > 0)
|
||||
Scheduler::get().reschedule();
|
||||
|
||||
// This mutex will no longer be freed
|
||||
m_big_mutex.lock();
|
||||
m_process_lock.lock();
|
||||
|
||||
m_open_file_descriptors.close_all();
|
||||
|
||||
|
@ -235,7 +236,7 @@ namespace Kernel
|
|||
m_threads.clear();
|
||||
|
||||
thread.setup_process_cleanup();
|
||||
Scheduler::get().reschedule_current_no_save();
|
||||
Scheduler::get().execute_current_thread();
|
||||
ASSERT_NOT_REACHED();
|
||||
}
|
||||
|
||||
|
@ -253,7 +254,7 @@ namespace Kernel
|
|||
|
||||
void Process::exit(int status, int signal)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
m_exit_status.exit_code = __WGENEXITCODE(status, signal);
|
||||
for (auto* thread : m_threads)
|
||||
if (thread != &Thread::current())
|
||||
|
@ -274,7 +275,7 @@ namespace Kernel
|
|||
meminfo.phys_pages = 0;
|
||||
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
for (auto* thread : m_threads)
|
||||
{
|
||||
meminfo.virt_pages += thread->virtual_page_count();
|
||||
|
@ -323,13 +324,13 @@ namespace Kernel
|
|||
|
||||
size_t Process::proc_cmdline(off_t offset, BAN::ByteSpan buffer) const
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
return read_from_vec_of_str(m_cmdline, offset, buffer);
|
||||
}
|
||||
|
||||
size_t Process::proc_environ(off_t offset, BAN::ByteSpan buffer) const
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
return read_from_vec_of_str(m_environ, offset, buffer);
|
||||
}
|
||||
|
||||
|
@ -342,7 +343,7 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<long> Process::sys_gettermios(::termios* termios)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
|
||||
TRY(validate_pointer_access(termios, sizeof(::termios)));
|
||||
|
||||
|
@ -361,7 +362,7 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<long> Process::sys_settermios(const ::termios* termios)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
|
||||
TRY(validate_pointer_access(termios, sizeof(::termios)));
|
||||
|
||||
|
@ -400,7 +401,7 @@ namespace Kernel
|
|||
{
|
||||
auto page_table = BAN::UniqPtr<PageTable>::adopt(TRY(PageTable::create_userspace()));
|
||||
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
|
||||
BAN::String working_directory;
|
||||
TRY(working_directory.append(m_working_directory));
|
||||
|
@ -440,7 +441,7 @@ namespace Kernel
|
|||
{
|
||||
// NOTE: We scope everything for automatic deletion
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
|
||||
TRY(validate_string_access(path));
|
||||
auto loadable_elf = TRY(load_elf_for_exec(m_credentials, path, m_working_directory, page_table()));
|
||||
|
@ -539,7 +540,7 @@ namespace Kernel
|
|||
m_has_called_exec = true;
|
||||
|
||||
m_threads.front()->setup_exec();
|
||||
Scheduler::get().reschedule_current_no_save();
|
||||
Scheduler::get().execute_current_thread();
|
||||
ASSERT_NOT_REACHED();
|
||||
}
|
||||
|
||||
|
@ -576,7 +577,7 @@ namespace Kernel
|
|||
BAN::ErrorOr<long> Process::sys_wait(pid_t pid, int* stat_loc, int options)
|
||||
{
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
TRY(validate_pointer_access(stat_loc, sizeof(int)));
|
||||
}
|
||||
|
||||
|
@ -609,7 +610,7 @@ namespace Kernel
|
|||
BAN::ErrorOr<long> Process::sys_nanosleep(const timespec* rqtp, timespec* rmtp)
|
||||
{
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
TRY(validate_pointer_access(rqtp, sizeof(timespec)));
|
||||
if (rmtp)
|
||||
TRY(validate_pointer_access(rmtp, sizeof(timespec)));
|
||||
|
@ -651,7 +652,7 @@ namespace Kernel
|
|||
return BAN::Error::from_errno(ENOTSUP);
|
||||
}
|
||||
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
|
||||
auto absolute_path = TRY(absolute_path_of(path));
|
||||
|
||||
|
@ -680,7 +681,7 @@ namespace Kernel
|
|||
{
|
||||
ASSERT(&Process::current() == this);
|
||||
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
|
||||
if (Thread::current().stack().contains(address))
|
||||
{
|
||||
|
@ -708,13 +709,13 @@ namespace Kernel
|
|||
BAN::ErrorOr<long> Process::open_inode(BAN::RefPtr<Inode> inode, int flags)
|
||||
{
|
||||
ASSERT(inode);
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
return TRY(m_open_file_descriptors.open(inode, flags));
|
||||
}
|
||||
|
||||
BAN::ErrorOr<long> Process::open_file(BAN::StringView path, int flags, mode_t mode)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
|
||||
BAN::String absolute_path = TRY(absolute_path_of(path));
|
||||
|
||||
|
@ -747,14 +748,14 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<long> Process::sys_open(const char* path, int flags, mode_t mode)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
TRY(validate_string_access(path));
|
||||
return open_file(path, flags, mode);
|
||||
}
|
||||
|
||||
BAN::ErrorOr<long> Process::sys_openat(int fd, const char* path, int flags, mode_t mode)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
|
||||
TRY(validate_string_access(path));
|
||||
|
||||
|
@ -770,28 +771,28 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<long> Process::sys_close(int fd)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
TRY(m_open_file_descriptors.close(fd));
|
||||
return 0;
|
||||
}
|
||||
|
||||
BAN::ErrorOr<long> Process::sys_read(int fd, void* buffer, size_t count)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
TRY(validate_pointer_access(buffer, count));
|
||||
return TRY(m_open_file_descriptors.read(fd, BAN::ByteSpan((uint8_t*)buffer, count)));
|
||||
}
|
||||
|
||||
BAN::ErrorOr<long> Process::sys_write(int fd, const void* buffer, size_t count)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
TRY(validate_pointer_access(buffer, count));
|
||||
return TRY(m_open_file_descriptors.write(fd, BAN::ByteSpan((uint8_t*)buffer, count)));
|
||||
}
|
||||
|
||||
BAN::ErrorOr<long> Process::sys_create(const char* path, mode_t mode)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
TRY(validate_string_access(path));
|
||||
TRY(create_file_or_dir(path, mode));
|
||||
return 0;
|
||||
|
@ -799,7 +800,7 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<long> Process::sys_create_dir(const char* path, mode_t mode)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
TRY(validate_string_access(path));
|
||||
BAN::StringView path_sv(path);
|
||||
if (!path_sv.empty() && path_sv.back() == '/')
|
||||
|
@ -810,7 +811,7 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<long> Process::sys_unlink(const char* path)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
TRY(validate_string_access(path));
|
||||
|
||||
auto absolute_path = TRY(absolute_path_of(path));
|
||||
|
@ -843,7 +844,7 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<long> Process::sys_readlink(const char* path, char* buffer, size_t bufsize)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
TRY(validate_string_access(path));
|
||||
TRY(validate_pointer_access(buffer, bufsize));
|
||||
|
||||
|
@ -854,7 +855,7 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<long> Process::sys_readlinkat(int fd, const char* path, char* buffer, size_t bufsize)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
TRY(validate_string_access(path));
|
||||
TRY(validate_pointer_access(buffer, bufsize));
|
||||
|
||||
|
@ -871,7 +872,7 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<long> Process::sys_pread(int fd, void* buffer, size_t count, off_t offset)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
TRY(validate_pointer_access(buffer, count));
|
||||
auto inode = TRY(m_open_file_descriptors.inode_of(fd));
|
||||
return TRY(inode->read(offset, { (uint8_t*)buffer, count }));
|
||||
|
@ -882,7 +883,7 @@ namespace Kernel
|
|||
if (mode & S_IFMASK)
|
||||
return BAN::Error::from_errno(EINVAL);
|
||||
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
TRY(validate_string_access(path));
|
||||
|
||||
auto absolute_path = TRY(absolute_path_of(path));
|
||||
|
@ -894,7 +895,7 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<long> Process::sys_chown(const char* path, uid_t uid, gid_t gid)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
TRY(validate_string_access(path));
|
||||
|
||||
auto absolute_path = TRY(absolute_path_of(path));
|
||||
|
@ -906,7 +907,7 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<long> Process::sys_socket(int domain, int type, int protocol)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
return TRY(m_open_file_descriptors.socket(domain, type, protocol));
|
||||
}
|
||||
|
||||
|
@ -917,7 +918,7 @@ namespace Kernel
|
|||
if (!address && address_len)
|
||||
return BAN::Error::from_errno(EINVAL);
|
||||
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
if (address)
|
||||
{
|
||||
TRY(validate_pointer_access(address_len, sizeof(*address_len)));
|
||||
|
@ -933,7 +934,7 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<long> Process::sys_bind(int socket, const sockaddr* address, socklen_t address_len)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
TRY(validate_pointer_access(address, address_len));
|
||||
|
||||
auto inode = TRY(m_open_file_descriptors.inode_of(socket));
|
||||
|
@ -946,7 +947,7 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<long> Process::sys_connect(int socket, const sockaddr* address, socklen_t address_len)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
TRY(validate_pointer_access(address, address_len));
|
||||
|
||||
auto inode = TRY(m_open_file_descriptors.inode_of(socket));
|
||||
|
@ -959,7 +960,7 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<long> Process::sys_listen(int socket, int backlog)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
|
||||
auto inode = TRY(m_open_file_descriptors.inode_of(socket));
|
||||
if (!inode->mode().ifsock())
|
||||
|
@ -971,7 +972,7 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<long> Process::sys_sendto(const sys_sendto_t* arguments)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
TRY(validate_pointer_access(arguments, sizeof(sys_sendto_t)));
|
||||
TRY(validate_pointer_access(arguments->message, arguments->length));
|
||||
TRY(validate_pointer_access(arguments->dest_addr, arguments->dest_len));
|
||||
|
@ -991,7 +992,7 @@ namespace Kernel
|
|||
if (!arguments->address && arguments->address_len)
|
||||
return BAN::Error::from_errno(EINVAL);
|
||||
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
TRY(validate_pointer_access(arguments, sizeof(sys_recvfrom_t)));
|
||||
TRY(validate_pointer_access(arguments->buffer, arguments->length));
|
||||
if (arguments->address)
|
||||
|
@ -1010,14 +1011,14 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<long> Process::sys_ioctl(int fildes, int request, void* arg)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
auto inode = TRY(m_open_file_descriptors.inode_of(fildes));
|
||||
return TRY(inode->ioctl(request, arg));
|
||||
}
|
||||
|
||||
BAN::ErrorOr<long> Process::sys_pselect(sys_pselect_t* arguments)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
|
||||
TRY(validate_pointer_access(arguments, sizeof(sys_pselect_t)));
|
||||
if (arguments->readfds)
|
||||
|
@ -1086,7 +1087,7 @@ namespace Kernel
|
|||
if (set_bits > 0)
|
||||
break;
|
||||
|
||||
LockFreeGuard free(m_big_mutex);
|
||||
LockFreeGuard free(m_process_lock);
|
||||
SystemTimer::get().sleep(1);
|
||||
}
|
||||
|
||||
|
@ -1112,7 +1113,7 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<long> Process::sys_pipe(int fildes[2])
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
TRY(validate_pointer_access(fildes, sizeof(int) * 2));
|
||||
TRY(m_open_file_descriptors.pipe(fildes));
|
||||
return 0;
|
||||
|
@ -1120,32 +1121,32 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<long> Process::sys_dup(int fildes)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
return TRY(m_open_file_descriptors.dup(fildes));
|
||||
}
|
||||
|
||||
BAN::ErrorOr<long> Process::sys_dup2(int fildes, int fildes2)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
return TRY(m_open_file_descriptors.dup2(fildes, fildes2));
|
||||
}
|
||||
|
||||
BAN::ErrorOr<long> Process::sys_fcntl(int fildes, int cmd, int extra)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
return TRY(m_open_file_descriptors.fcntl(fildes, cmd, extra));
|
||||
}
|
||||
|
||||
BAN::ErrorOr<long> Process::sys_seek(int fd, off_t offset, int whence)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
TRY(m_open_file_descriptors.seek(fd, offset, whence));
|
||||
return 0;
|
||||
}
|
||||
|
||||
BAN::ErrorOr<long> Process::sys_tell(int fd)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
return TRY(m_open_file_descriptors.tell(fd));
|
||||
}
|
||||
|
||||
|
@ -1153,7 +1154,7 @@ namespace Kernel
|
|||
{
|
||||
BAN::String absolute_source, absolute_target;
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
TRY(absolute_source.append(TRY(absolute_path_of(source))));
|
||||
TRY(absolute_target.append(TRY(absolute_path_of(target))));
|
||||
}
|
||||
|
@ -1163,7 +1164,7 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<long> Process::sys_fstat(int fd, struct stat* buf)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
TRY(validate_pointer_access(buf, sizeof(struct stat)));
|
||||
TRY(m_open_file_descriptors.fstat(fd, buf));
|
||||
return 0;
|
||||
|
@ -1171,7 +1172,7 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<long> Process::sys_fstatat(int fd, const char* path, struct stat* buf, int flag)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
TRY(validate_pointer_access(buf, sizeof(struct stat)));
|
||||
TRY(m_open_file_descriptors.fstatat(fd, path, buf, flag));
|
||||
return 0;
|
||||
|
@ -1179,7 +1180,7 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<long> Process::sys_stat(const char* path, struct stat* buf, int flag)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
TRY(validate_pointer_access(buf, sizeof(struct stat)));
|
||||
TRY(m_open_file_descriptors.stat(TRY(absolute_path_of(path)), buf, flag));
|
||||
return 0;
|
||||
|
@ -1232,7 +1233,7 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<long> Process::sys_readdir(int fd, DirectoryEntryList* list, size_t list_size)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
TRY(validate_pointer_access(list, list_size));
|
||||
TRY(m_open_file_descriptors.read_dir_entries(fd, list, list_size));
|
||||
return 0;
|
||||
|
@ -1243,7 +1244,7 @@ namespace Kernel
|
|||
BAN::String absolute_path;
|
||||
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
TRY(validate_string_access(path));
|
||||
absolute_path = TRY(absolute_path_of(path));
|
||||
}
|
||||
|
@ -1252,7 +1253,7 @@ namespace Kernel
|
|||
if (!file.inode->mode().ifdir())
|
||||
return BAN::Error::from_errno(ENOTDIR);
|
||||
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
m_working_directory = BAN::move(file.canonical_path);
|
||||
|
||||
return 0;
|
||||
|
@ -1260,7 +1261,7 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<long> Process::sys_getpwd(char* buffer, size_t size)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
|
||||
TRY(validate_pointer_access(buffer, size));
|
||||
|
||||
|
@ -1276,7 +1277,7 @@ namespace Kernel
|
|||
BAN::ErrorOr<long> Process::sys_mmap(const sys_mmap_t* args)
|
||||
{
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
TRY(validate_pointer_access(args, sizeof(sys_mmap_t)));
|
||||
}
|
||||
|
||||
|
@ -1317,7 +1318,7 @@ namespace Kernel
|
|||
region_type, page_flags
|
||||
));
|
||||
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
TRY(m_mapped_regions.push_back(BAN::move(region)));
|
||||
return m_mapped_regions.back()->vaddr();
|
||||
}
|
||||
|
@ -1325,7 +1326,7 @@ namespace Kernel
|
|||
if (args->addr != nullptr)
|
||||
return BAN::Error::from_errno(ENOTSUP);
|
||||
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
|
||||
auto inode = TRY(m_open_file_descriptors.inode_of(args->fildes));
|
||||
|
||||
|
@ -1373,7 +1374,7 @@ namespace Kernel
|
|||
if (vaddr % PAGE_SIZE != 0)
|
||||
return BAN::Error::from_errno(EINVAL);
|
||||
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
|
||||
// FIXME: We should only map partial regions
|
||||
for (size_t i = 0; i < m_mapped_regions.size(); i++)
|
||||
|
@ -1392,7 +1393,7 @@ namespace Kernel
|
|||
if (vaddr % PAGE_SIZE != 0)
|
||||
return BAN::Error::from_errno(EINVAL);
|
||||
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
|
||||
for (auto& mapped_region : m_mapped_regions)
|
||||
if (mapped_region->overlaps(vaddr, len))
|
||||
|
@ -1403,7 +1404,7 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<long> Process::sys_tty_ctrl(int fildes, int command, int flags)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
|
||||
auto inode = TRY(m_open_file_descriptors.inode_of(fildes));
|
||||
if (!inode->is_tty())
|
||||
|
@ -1416,7 +1417,7 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<long> Process::sys_termid(char* buffer)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
|
||||
TRY(validate_string_access(buffer));
|
||||
|
||||
|
@ -1437,7 +1438,7 @@ namespace Kernel
|
|||
BAN::ErrorOr<long> Process::sys_clock_gettime(clockid_t clock_id, timespec* tp)
|
||||
{
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
TRY(validate_pointer_access(tp, sizeof(timespec)));
|
||||
}
|
||||
|
||||
|
@ -1462,7 +1463,7 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<long> Process::sys_load_keymap(const char* path)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
TRY(validate_string_access(path));
|
||||
|
||||
if (!m_credentials.is_superuser())
|
||||
|
@ -1479,11 +1480,11 @@ namespace Kernel
|
|||
return BAN::Error::from_errno(EINVAL);
|
||||
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
TRY(validate_pointer_access((void*)handler, sizeof(handler)));
|
||||
}
|
||||
|
||||
LockGuard _(m_signal_lock);
|
||||
CriticalScope _;
|
||||
m_signal_handlers[signal] = (vaddr_t)handler;
|
||||
return 0;
|
||||
}
|
||||
|
@ -1497,7 +1498,7 @@ namespace Kernel
|
|||
|
||||
if (pid == Process::current().pid())
|
||||
{
|
||||
LockGuard _(m_signal_lock);
|
||||
CriticalScope _;
|
||||
Process::current().m_signal_pending_mask |= 1 << signal;
|
||||
return 0;
|
||||
}
|
||||
|
@ -1511,7 +1512,7 @@ namespace Kernel
|
|||
found = true;
|
||||
if (signal)
|
||||
{
|
||||
LockGuard _(m_signal_lock);
|
||||
CriticalScope _;
|
||||
process.m_signal_pending_mask |= 1 << signal;
|
||||
// FIXME: This is super hacky
|
||||
Scheduler::get().unblock_thread(process.m_threads.front()->tid());
|
||||
|
@ -1529,7 +1530,7 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<long> Process::sys_tcsetpgrp(int fd, pid_t pgrp)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
|
||||
if (!m_controlling_terminal)
|
||||
return BAN::Error::from_errno(ENOTTY);
|
||||
|
@ -1565,7 +1566,7 @@ namespace Kernel
|
|||
if (uid < 0 || uid >= 1'000'000'000)
|
||||
return BAN::Error::from_errno(EINVAL);
|
||||
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
|
||||
// If the process has appropriate privileges, setuid() shall set the real user ID, effective user ID, and the saved
|
||||
// set-user-ID of the calling process to uid.
|
||||
|
@ -1595,7 +1596,7 @@ namespace Kernel
|
|||
if (gid < 0 || gid >= 1'000'000'000)
|
||||
return BAN::Error::from_errno(EINVAL);
|
||||
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
|
||||
// If the process has appropriate privileges, setgid() shall set the real group ID, effective group ID, and the saved
|
||||
// set-group-ID of the calling process to gid.
|
||||
|
@ -1623,7 +1624,7 @@ namespace Kernel
|
|||
if (uid < 0 || uid >= 1'000'000'000)
|
||||
return BAN::Error::from_errno(EINVAL);
|
||||
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
|
||||
// If uid is equal to the real user ID or the saved set-user-ID, or if the process has appropriate privileges, seteuid()
|
||||
// shall set the effective user ID of the calling process to uid; the real user ID and saved set-user-ID shall remain unchanged.
|
||||
|
@ -1642,7 +1643,7 @@ namespace Kernel
|
|||
if (gid < 0 || gid >= 1'000'000'000)
|
||||
return BAN::Error::from_errno(EINVAL);
|
||||
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
|
||||
// If gid is equal to the real group ID or the saved set-group-ID, or if the process has appropriate privileges, setegid()
|
||||
// shall set the effective group ID of the calling process to gid; the real group ID, saved set-group-ID, and any
|
||||
|
@ -1670,7 +1671,7 @@ namespace Kernel
|
|||
// by the ruid and euid arguments. If ruid or euid is -1, the corresponding effective or real user ID of the current
|
||||
// process shall be left unchanged.
|
||||
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
|
||||
// A process with appropriate privileges can set either ID to any value.
|
||||
if (!m_credentials.is_superuser())
|
||||
|
@ -1718,7 +1719,7 @@ namespace Kernel
|
|||
|
||||
// The real and effective group IDs may be set to different values in the same call.
|
||||
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
|
||||
// Only a process with appropriate privileges can set the real group ID and the effective group ID to any valid value.
|
||||
if (!m_credentials.is_superuser())
|
||||
|
@ -1751,7 +1752,7 @@ namespace Kernel
|
|||
if (pgid < 0)
|
||||
return BAN::Error::from_errno(EINVAL);
|
||||
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
|
||||
if (pid == 0)
|
||||
pid = m_pid;
|
||||
|
@ -1816,7 +1817,7 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<long> Process::sys_getpgid(pid_t pid)
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
|
||||
if (pid == 0 || pid == m_pid)
|
||||
return m_pgrp;
|
||||
|
@ -1848,7 +1849,7 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<BAN::String> Process::absolute_path_of(BAN::StringView path) const
|
||||
{
|
||||
LockGuard _(m_big_mutex);
|
||||
LockGuard _(m_process_lock);
|
||||
|
||||
if (path.empty() || path == "."sv)
|
||||
return m_working_directory;
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
#include <kernel/Arch.h>
|
||||
#include <kernel/Attributes.h>
|
||||
#include <kernel/CriticalScope.h>
|
||||
#include <kernel/GDT.h>
|
||||
#include <kernel/InterruptController.h>
|
||||
#include <kernel/Lock/LockGuard.h>
|
||||
#include <kernel/Process.h>
|
||||
#include <kernel/Scheduler.h>
|
||||
#include <kernel/Timer/Timer.h>
|
||||
|
@ -32,45 +32,13 @@ namespace Kernel
|
|||
asm volatile("movq %0, %%rsp" :: "r"(s_temp_stack + sizeof(s_temp_stack)));
|
||||
}
|
||||
|
||||
void SchedulerLock::lock()
|
||||
{
|
||||
auto tid = Scheduler::current_tid();
|
||||
if (tid != m_locker)
|
||||
{
|
||||
while (!m_locker.compare_exchange(-1, tid))
|
||||
__builtin_ia32_pause();
|
||||
ASSERT_EQ(m_lock_depth, 0);
|
||||
}
|
||||
m_lock_depth++;
|
||||
}
|
||||
|
||||
void SchedulerLock::unlock()
|
||||
{
|
||||
ASSERT_EQ(m_locker.load(), Scheduler::current_tid());
|
||||
ASSERT_GT(m_lock_depth, 0);
|
||||
if (--m_lock_depth == 0)
|
||||
m_locker = -1;
|
||||
}
|
||||
|
||||
void SchedulerLock::unlock_all()
|
||||
{
|
||||
ASSERT_EQ(m_locker.load(), Scheduler::current_tid());
|
||||
ASSERT_GT(m_lock_depth, 0);
|
||||
m_lock_depth = 0;
|
||||
m_locker = -1;
|
||||
}
|
||||
|
||||
pid_t SchedulerLock::locker() const
|
||||
{
|
||||
return m_locker;
|
||||
}
|
||||
|
||||
BAN::ErrorOr<void> Scheduler::initialize()
|
||||
{
|
||||
ASSERT(s_instance == nullptr);
|
||||
s_instance = new Scheduler();
|
||||
ASSERT(s_instance);
|
||||
s_instance->m_idle_thread = TRY(Thread::create_kernel([](void*) { for (;;) asm volatile("hlt"); }, nullptr, nullptr));
|
||||
Scheduler* scheduler = new Scheduler();
|
||||
ASSERT(scheduler);
|
||||
scheduler->m_idle_thread = TRY(Thread::create_kernel([](void*) { for (;;) asm volatile("hlt"); }, nullptr, nullptr));
|
||||
s_instance = scheduler;
|
||||
return {};
|
||||
}
|
||||
|
||||
|
@ -85,7 +53,6 @@ namespace Kernel
|
|||
VERIFY_CLI();
|
||||
ASSERT(!m_active_threads.empty());
|
||||
m_current_thread = m_active_threads.begin();
|
||||
m_lock.lock();
|
||||
execute_current_thread();
|
||||
ASSERT_NOT_REACHED();
|
||||
}
|
||||
|
@ -97,7 +64,7 @@ namespace Kernel
|
|||
|
||||
pid_t Scheduler::current_tid()
|
||||
{
|
||||
if (s_instance == nullptr || s_instance->m_idle_thread == nullptr)
|
||||
if (s_instance == nullptr)
|
||||
return 0;
|
||||
return Scheduler::get().current_thread().tid();
|
||||
}
|
||||
|
@ -105,7 +72,6 @@ namespace Kernel
|
|||
void Scheduler::timer_reschedule()
|
||||
{
|
||||
VERIFY_CLI();
|
||||
m_lock.lock();
|
||||
|
||||
wake_threads();
|
||||
|
||||
|
@ -119,7 +85,6 @@ namespace Kernel
|
|||
void Scheduler::reschedule()
|
||||
{
|
||||
DISABLE_INTERRUPTS();
|
||||
m_lock.lock();
|
||||
|
||||
if (save_current_thread())
|
||||
{
|
||||
|
@ -134,30 +99,20 @@ namespace Kernel
|
|||
void Scheduler::reschedule_if_idling()
|
||||
{
|
||||
VERIFY_CLI();
|
||||
m_lock.lock();
|
||||
|
||||
if (m_active_threads.empty() || ¤t_thread() != m_idle_thread)
|
||||
return m_lock.unlock();
|
||||
return;
|
||||
|
||||
if (save_current_thread())
|
||||
return;
|
||||
m_current_thread = {};
|
||||
advance_current_thread();
|
||||
m_current_thread = m_active_threads.begin();
|
||||
execute_current_thread();
|
||||
ASSERT_NOT_REACHED();
|
||||
}
|
||||
|
||||
void Scheduler::reschedule_current_no_save()
|
||||
{
|
||||
VERIFY_CLI();
|
||||
m_lock.lock();
|
||||
execute_current_thread();
|
||||
}
|
||||
|
||||
void Scheduler::wake_threads()
|
||||
{
|
||||
VERIFY_CLI();
|
||||
ASSERT_EQ(m_lock.locker(), current_tid());
|
||||
|
||||
uint64_t current_time = SystemTimer::get().ms_since_boot();
|
||||
while (!m_sleeping_threads.empty() && m_sleeping_threads.front().wake_time <= current_time)
|
||||
|
@ -172,7 +127,7 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<void> Scheduler::add_thread(Thread* thread)
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
CriticalScope _;
|
||||
TRY(m_active_threads.emplace_back(thread));
|
||||
return {};
|
||||
}
|
||||
|
@ -180,20 +135,19 @@ namespace Kernel
|
|||
void Scheduler::advance_current_thread()
|
||||
{
|
||||
VERIFY_CLI();
|
||||
ASSERT_EQ(m_lock.locker(), current_tid());
|
||||
|
||||
if (m_active_threads.empty())
|
||||
{
|
||||
m_current_thread = {};
|
||||
else if (!m_current_thread || ++m_current_thread == m_active_threads.end())
|
||||
return;
|
||||
}
|
||||
if (!m_current_thread || ++m_current_thread == m_active_threads.end())
|
||||
m_current_thread = m_active_threads.begin();
|
||||
|
||||
m_lock.m_locker = current_tid();
|
||||
}
|
||||
|
||||
void Scheduler::remove_and_advance_current_thread()
|
||||
{
|
||||
VERIFY_CLI();
|
||||
ASSERT_EQ(m_lock.locker(), current_tid());
|
||||
|
||||
ASSERT(m_current_thread);
|
||||
|
||||
|
@ -208,8 +162,6 @@ namespace Kernel
|
|||
advance_current_thread();
|
||||
m_active_threads.remove(temp);
|
||||
}
|
||||
|
||||
m_lock.m_locker = current_tid();
|
||||
}
|
||||
|
||||
// NOTE: this is declared always inline, so we don't corrupt the stack
|
||||
|
@ -217,7 +169,6 @@ namespace Kernel
|
|||
ALWAYS_INLINE bool Scheduler::save_current_thread()
|
||||
{
|
||||
VERIFY_CLI();
|
||||
ASSERT_EQ(m_lock.locker(), current_tid());
|
||||
|
||||
uintptr_t rsp, rip;
|
||||
push_callee_saved();
|
||||
|
@ -240,7 +191,6 @@ namespace Kernel
|
|||
void Scheduler::delete_current_process_and_thread()
|
||||
{
|
||||
DISABLE_INTERRUPTS();
|
||||
m_lock.lock();
|
||||
|
||||
load_temp_stack();
|
||||
PageTable::kernel().load();
|
||||
|
@ -261,7 +211,6 @@ namespace Kernel
|
|||
void Scheduler::execute_current_thread()
|
||||
{
|
||||
VERIFY_CLI();
|
||||
ASSERT_EQ(m_lock.locker(), current_tid());
|
||||
|
||||
load_temp_stack();
|
||||
PageTable::kernel().load();
|
||||
|
@ -272,7 +221,6 @@ namespace Kernel
|
|||
NEVER_INLINE void Scheduler::_execute_current_thread()
|
||||
{
|
||||
VERIFY_CLI();
|
||||
ASSERT_EQ(m_lock.locker(), current_tid());
|
||||
|
||||
#if SCHEDULER_VERIFY_STACK
|
||||
vaddr_t rsp;
|
||||
|
@ -319,12 +267,10 @@ namespace Kernel
|
|||
{
|
||||
case Thread::State::NotStarted:
|
||||
current->set_started();
|
||||
m_lock.unlock_all();
|
||||
start_thread(current->rsp(), current->rip());
|
||||
case Thread::State::Executing:
|
||||
while (current->can_add_signal_to_execute())
|
||||
current->handle_signal();
|
||||
m_lock.unlock_all();
|
||||
continue_thread(current->rsp(), current->rip());
|
||||
case Thread::State::Terminated:
|
||||
ASSERT_NOT_REACHED();
|
||||
|
@ -336,7 +282,6 @@ namespace Kernel
|
|||
void Scheduler::set_current_thread_sleeping_impl(uint64_t wake_time)
|
||||
{
|
||||
VERIFY_CLI();
|
||||
ASSERT_EQ(m_lock.locker(), current_tid());
|
||||
|
||||
if (save_current_thread())
|
||||
{
|
||||
|
@ -357,7 +302,6 @@ namespace Kernel
|
|||
);
|
||||
|
||||
m_current_thread = {};
|
||||
m_lock.m_locker = current_tid();
|
||||
advance_current_thread();
|
||||
|
||||
execute_current_thread();
|
||||
|
@ -368,7 +312,6 @@ namespace Kernel
|
|||
{
|
||||
VERIFY_STI();
|
||||
DISABLE_INTERRUPTS();
|
||||
m_lock.lock();
|
||||
|
||||
ASSERT(m_current_thread);
|
||||
|
||||
|
@ -380,7 +323,6 @@ namespace Kernel
|
|||
{
|
||||
VERIFY_STI();
|
||||
DISABLE_INTERRUPTS();
|
||||
m_lock.lock();
|
||||
|
||||
ASSERT(m_current_thread);
|
||||
|
||||
|
@ -390,7 +332,7 @@ namespace Kernel
|
|||
|
||||
void Scheduler::unblock_threads(Semaphore* semaphore)
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
CriticalScope critical;
|
||||
|
||||
for (auto it = m_sleeping_threads.begin(); it != m_sleeping_threads.end();)
|
||||
{
|
||||
|
@ -411,7 +353,7 @@ namespace Kernel
|
|||
|
||||
void Scheduler::unblock_thread(pid_t tid)
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
CriticalScope _;
|
||||
|
||||
for (auto it = m_sleeping_threads.begin(); it != m_sleeping_threads.end(); it++)
|
||||
{
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
#include <kernel/CriticalScope.h>
|
||||
#include <kernel/Lock/LockGuard.h>
|
||||
#include <kernel/Memory/Heap.h>
|
||||
#include <kernel/Memory/PageTable.h>
|
||||
|
|
|
@ -224,12 +224,15 @@ namespace Kernel
|
|||
{
|
||||
ASSERT(buffer.size() >= sector_count * sector_size());
|
||||
|
||||
{
|
||||
LockGuard _(m_mutex);
|
||||
if (!m_disk_cache.has_value())
|
||||
return read_sectors_impl(lba, sector_count, buffer);
|
||||
}
|
||||
|
||||
for (uint64_t offset = 0; offset < sector_count; offset++)
|
||||
{
|
||||
LockGuard _(m_mutex);
|
||||
auto sector_buffer = buffer.slice(offset * sector_size(), sector_size());
|
||||
if (m_disk_cache->read_from_cache(lba + offset, sector_buffer))
|
||||
continue;
|
||||
|
@ -244,12 +247,15 @@ namespace Kernel
|
|||
{
|
||||
ASSERT(buffer.size() >= sector_count * sector_size());
|
||||
|
||||
{
|
||||
LockGuard _(m_mutex);
|
||||
if (!m_disk_cache.has_value())
|
||||
return write_sectors_impl(lba, sector_count, buffer);
|
||||
}
|
||||
|
||||
for (uint8_t offset = 0; offset < sector_count; offset++)
|
||||
{
|
||||
LockGuard _(m_mutex);
|
||||
auto sector_buffer = buffer.slice(offset * sector_size(), sector_size());
|
||||
if (m_disk_cache->write_to_cache(lba + offset, sector_buffer, true).is_error())
|
||||
TRY(write_sectors_impl(lba + offset, 1, sector_buffer));
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
#include <BAN/Array.h>
|
||||
#include <kernel/CriticalScope.h>
|
||||
#include <kernel/Device/DeviceNumbers.h>
|
||||
#include <kernel/FS/DevFS/FileSystem.h>
|
||||
#include <kernel/IDT.h>
|
||||
|
@ -234,7 +235,7 @@ namespace Kernel
|
|||
uint8_t buffer[128];
|
||||
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
CriticalScope _;
|
||||
if (m_input.empty())
|
||||
return;
|
||||
uint8_t* ptr = buffer;
|
||||
|
|
|
@ -310,17 +310,16 @@ namespace Kernel
|
|||
|
||||
void TTY::putchar(uint8_t ch)
|
||||
{
|
||||
LockGuard _(m_mutex);
|
||||
SpinLockGuard _(m_write_lock);
|
||||
if (m_tty_ctrl.draw_graphics)
|
||||
putchar_impl(ch);
|
||||
}
|
||||
|
||||
BAN::ErrorOr<size_t> TTY::read_impl(off_t, BAN::ByteSpan buffer)
|
||||
{
|
||||
LockGuard _(m_mutex);
|
||||
while (!m_output.flush)
|
||||
{
|
||||
LockFreeGuard free(m_mutex);
|
||||
LockFreeGuard _(m_mutex);
|
||||
TRY(Thread::current().block_or_eintr_indefinite(m_output.semaphore));
|
||||
}
|
||||
|
||||
|
@ -346,7 +345,7 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<size_t> TTY::write_impl(off_t, BAN::ConstByteSpan buffer)
|
||||
{
|
||||
LockGuard _(m_mutex);
|
||||
SpinLockGuard _(m_write_lock);
|
||||
for (size_t i = 0; i < buffer.size(); i++)
|
||||
putchar(buffer[i]);
|
||||
return buffer.size();
|
||||
|
@ -355,7 +354,7 @@ namespace Kernel
|
|||
void TTY::putchar_current(uint8_t ch)
|
||||
{
|
||||
ASSERT(s_tty);
|
||||
LockGuard _(s_tty->m_mutex);
|
||||
SpinLockGuard _(s_tty->m_write_lock);
|
||||
s_tty->putchar(ch);
|
||||
}
|
||||
|
||||
|
|
|
@ -57,7 +57,6 @@ namespace Kernel
|
|||
|
||||
void VirtualTTY::clear()
|
||||
{
|
||||
LockGuard _(m_write_lock);
|
||||
for (uint32_t i = 0; i < m_width * m_height; i++)
|
||||
m_buffer[i] = { .foreground = m_foreground, .background = m_background, .codepoint = ' ' };
|
||||
m_terminal_driver->clear(m_background);
|
||||
|
@ -65,7 +64,6 @@ namespace Kernel
|
|||
|
||||
void VirtualTTY::set_font(const Kernel::Font& font)
|
||||
{
|
||||
LockGuard _(m_write_lock);
|
||||
m_terminal_driver->set_font(font);
|
||||
|
||||
uint32_t new_width = m_terminal_driver->width();
|
||||
|
@ -308,8 +306,6 @@ namespace Kernel
|
|||
|
||||
void VirtualTTY::putchar_impl(uint8_t ch)
|
||||
{
|
||||
LockGuard _(m_write_lock);
|
||||
|
||||
uint32_t codepoint = ch;
|
||||
|
||||
switch (m_state)
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
#include <kernel/GDT.h>
|
||||
#include <kernel/InterruptController.h>
|
||||
#include <kernel/InterruptStack.h>
|
||||
#include <kernel/Lock/LockGuard.h>
|
||||
#include <kernel/Memory/kmalloc.h>
|
||||
#include <kernel/Process.h>
|
||||
#include <kernel/Scheduler.h>
|
||||
|
@ -30,10 +29,10 @@ namespace Kernel
|
|||
|
||||
void Thread::terminate()
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
CriticalScope _;
|
||||
m_state = Thread::State::Terminated;
|
||||
if (this == &Thread::current())
|
||||
Scheduler::get().reschedule_current_no_save();
|
||||
Scheduler::get().execute_current_thread();
|
||||
}
|
||||
|
||||
static pid_t s_next_tid = 1;
|
||||
|
@ -47,7 +46,14 @@ namespace Kernel
|
|||
BAN::ScopeGuard thread_deleter([thread] { delete thread; });
|
||||
|
||||
// Initialize stack and registers
|
||||
thread->m_stack = TRY(VirtualRange::create_kmalloc(m_kernel_stack_size));
|
||||
thread->m_stack = TRY(VirtualRange::create_to_vaddr_range(
|
||||
PageTable::kernel(),
|
||||
KERNEL_OFFSET,
|
||||
~(uintptr_t)0,
|
||||
m_kernel_stack_size,
|
||||
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
|
||||
true
|
||||
));
|
||||
thread->m_rsp = thread->stack_base() + thread->stack_size();
|
||||
thread->m_rip = (uintptr_t)entry;
|
||||
|
||||
|
@ -131,8 +137,6 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<Thread*> Thread::clone(Process* new_process, uintptr_t rsp, uintptr_t rip)
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
|
||||
ASSERT(m_is_userspace);
|
||||
ASSERT(m_state == State::Executing);
|
||||
|
||||
|
@ -158,8 +162,6 @@ namespace Kernel
|
|||
|
||||
void Thread::setup_exec()
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
|
||||
ASSERT(is_userspace());
|
||||
m_state = State::NotStarted;
|
||||
static entry_t entry_trampoline(
|
||||
|
@ -176,12 +178,9 @@ namespace Kernel
|
|||
// Signal mask is inherited
|
||||
|
||||
// Setup stack for returning
|
||||
uintptr_t offset = m_rsp % PAGE_SIZE;
|
||||
if (offset == 0)
|
||||
offset = PAGE_SIZE;
|
||||
ASSERT_GTE(offset, 4 * sizeof(uintptr_t));
|
||||
PageTable::with_fast_page(process().page_table().physical_address_of((m_rsp - 4 * sizeof(uintptr_t)) & PAGE_ADDR_MASK), [&] {
|
||||
uintptr_t rsp = PageTable::fast_page() + offset;
|
||||
ASSERT_EQ(m_rsp % PAGE_SIZE, 0u);
|
||||
PageTable::with_fast_page(process().page_table().physical_address_of(m_rsp - PAGE_SIZE), [&] {
|
||||
uintptr_t rsp = PageTable::fast_page() + PAGE_SIZE;
|
||||
write_to_stack(rsp, nullptr); // alignment
|
||||
write_to_stack(rsp, this);
|
||||
write_to_stack(rsp, &Thread::on_exit);
|
||||
|
@ -192,8 +191,6 @@ namespace Kernel
|
|||
|
||||
void Thread::setup_process_cleanup()
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
|
||||
m_state = State::NotStarted;
|
||||
static entry_t entry(
|
||||
[](void* process)
|
||||
|
@ -209,12 +206,9 @@ namespace Kernel
|
|||
m_signal_pending_mask = 0;
|
||||
m_signal_block_mask = ~0ull;
|
||||
|
||||
uintptr_t offset = m_rsp % PAGE_SIZE;
|
||||
if (offset == 0)
|
||||
offset = PAGE_SIZE;
|
||||
ASSERT_GTE(offset, 4 * sizeof(uintptr_t));
|
||||
PageTable::with_fast_page(process().page_table().physical_address_of((m_rsp - 4 * sizeof(uintptr_t)) & PAGE_ADDR_MASK), [&] {
|
||||
uintptr_t rsp = PageTable::fast_page() + offset;
|
||||
ASSERT_EQ(m_rsp % PAGE_SIZE, 0u);
|
||||
PageTable::with_fast_page(process().page_table().physical_address_of(m_rsp - PAGE_SIZE), [&] {
|
||||
uintptr_t rsp = PageTable::fast_page() + PAGE_SIZE;
|
||||
write_to_stack(rsp, nullptr); // alignment
|
||||
write_to_stack(rsp, this);
|
||||
write_to_stack(rsp, &Thread::on_exit);
|
||||
|
@ -225,7 +219,6 @@ namespace Kernel
|
|||
|
||||
bool Thread::is_interrupted_by_signal()
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
while (can_add_signal_to_execute())
|
||||
handle_signal();
|
||||
return will_execute_signal();
|
||||
|
@ -233,7 +226,6 @@ namespace Kernel
|
|||
|
||||
bool Thread::can_add_signal_to_execute() const
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
if (!is_userspace() || m_state != State::Executing)
|
||||
return false;
|
||||
auto& interrupt_stack = *reinterpret_cast<InterruptStack*>(interrupt_stack_base() + interrupt_stack_size() - sizeof(InterruptStack));
|
||||
|
@ -245,7 +237,6 @@ namespace Kernel
|
|||
|
||||
bool Thread::will_execute_signal() const
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
if (!is_userspace() || m_state != State::Executing)
|
||||
return false;
|
||||
auto& interrupt_stack = *reinterpret_cast<InterruptStack*>(interrupt_stack_base() + interrupt_stack_size() - sizeof(InterruptStack));
|
||||
|
@ -254,7 +245,6 @@ namespace Kernel
|
|||
|
||||
void Thread::handle_signal(int signal)
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
ASSERT(!interrupts_enabled());
|
||||
ASSERT(&Thread::current() == this);
|
||||
ASSERT(is_userspace());
|
||||
|
@ -348,7 +338,6 @@ namespace Kernel
|
|||
|
||||
bool Thread::add_signal(int signal)
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
ASSERT(!interrupts_enabled());
|
||||
uint64_t mask = 1ull << signal;
|
||||
if (!(m_signal_block_mask & mask))
|
||||
|
@ -391,7 +380,6 @@ namespace Kernel
|
|||
|
||||
void Thread::validate_stack() const
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
if (stack_base() <= m_rsp && m_rsp <= stack_base() + stack_size())
|
||||
return;
|
||||
if (interrupt_stack_base() <= m_rsp && m_rsp <= interrupt_stack_base() + interrupt_stack_size())
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
#include <BAN/ScopeGuard.h>
|
||||
#include <kernel/Lock/LockGuard.h>
|
||||
#include <kernel/ACPI.h>
|
||||
#include <kernel/IDT.h>
|
||||
#include <kernel/InterruptController.h>
|
||||
|
@ -245,7 +244,7 @@ namespace Kernel
|
|||
if (m_is_64bit)
|
||||
return regs.main_counter.full;
|
||||
|
||||
LockGuard _(m_lock);
|
||||
CriticalScope _;
|
||||
uint32_t current_low = regs.main_counter.low;
|
||||
uint32_t wraps = m_32bit_wraps;
|
||||
if (current_low < (uint32_t)m_last_ticks)
|
||||
|
@ -257,10 +256,8 @@ namespace Kernel
|
|||
{
|
||||
auto& regs = registers();
|
||||
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
|
||||
uint64_t current_ticks;
|
||||
|
||||
if (m_is_64bit)
|
||||
current_ticks = regs.main_counter.full;
|
||||
else
|
||||
|
@ -270,8 +267,8 @@ namespace Kernel
|
|||
m_32bit_wraps++;
|
||||
current_ticks = ((uint64_t)m_32bit_wraps << 32) | current_low;
|
||||
}
|
||||
|
||||
m_last_ticks = current_ticks;
|
||||
}
|
||||
|
||||
Scheduler::get().timer_reschedule();
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue