Compare commits

..

2 Commits

Author SHA1 Message Date
c50dba900d General: Update README features and environment variables 2025-07-17 21:32:53 +03:00
502bb4f84a BuildSystem: Add support for building initrd image
This is nice for testing when there isn't xhci controller available or
my usb stack fails :)
2025-07-17 21:25:59 +03:00
690 changed files with 10307 additions and 35398 deletions

1
.gitignore vendored
View File

@@ -3,4 +3,3 @@
build/
base/
script/fakeroot-context
tools/update-image-perms

View File

@@ -35,7 +35,7 @@ namespace BAN
constexpr T& front();
Span<T> span() { return Span(m_data, size()); }
Span<const T> span() const { return Span(m_data, size()); }
const Span<T> span() const { return Span(m_data, size()); }
constexpr size_type size() const;

View File

@@ -9,35 +9,29 @@
#include <BAN/Formatter.h>
#include <stdio.h>
#define __debug_putchar [](int c) { putc_unlocked(c, stddbg); }
#define __debug_putchar [](int c) { putc(c, stddbg); }
#define dprintln(...) \
do { \
flockfile(stddbg); \
BAN::Formatter::print(__debug_putchar, __VA_ARGS__); \
BAN::Formatter::print(__debug_putchar,"\n"); \
fflush(stddbg); \
funlockfile(stddbg); \
} while (false)
#define dwarnln(...) \
do { \
flockfile(stddbg); \
BAN::Formatter::print(__debug_putchar, "\e[33m"); \
BAN::Formatter::print(__debug_putchar, __VA_ARGS__); \
BAN::Formatter::print(__debug_putchar, "\e[m\n"); \
fflush(stddbg); \
funlockfile(stddbg); \
} while(false)
#define derrorln(...) \
do { \
flockfile(stddbg); \
BAN::Formatter::print(__debug_putchar, "\e[31m"); \
BAN::Formatter::print(__debug_putchar, __VA_ARGS__); \
BAN::Formatter::print(__debug_putchar, "\e[m\n"); \
fflush(stddbg); \
funlockfile(stddbg); \
} while(false)
#define dprintln_if(cond, ...) \

View File

@@ -1,154 +1,49 @@
#pragma once
#include <BAN/HashSet.h>
#include <BAN/Hash.h>
#include <BAN/LinkedList.h>
#include <BAN/Vector.h>
namespace BAN
{
template<typename HashSetIt, typename HashMap, typename Entry>
class HashMapIterator
{
public:
HashMapIterator() = default;
Entry& operator*()
{
return const_cast<Entry&>(m_iterator.operator*());
}
const Entry& operator*() const
{
return m_iterator.operator*();
}
Entry* operator->()
{
return const_cast<Entry*>(m_iterator.operator->());
}
const Entry* operator->() const
{
return m_iterator.operator->();
}
HashMapIterator& operator++()
{
++m_iterator;
return *this;
}
HashMapIterator operator++(int)
{
auto temp = *this;
++(*this);
return temp;
}
bool operator==(HashMapIterator other) const
{
return m_iterator == other.m_iterator;
}
bool operator!=(HashMapIterator other) const
{
return m_iterator != other.m_iterator;
}
private:
explicit HashMapIterator(HashSetIt it)
: m_iterator(it)
{ }
private:
HashSetIt m_iterator;
friend HashMap;
};
namespace detail
{
template<typename T, typename Key, typename HASH, typename COMP>
concept HashMapFindable = requires(const Key& a, const T& b) { COMP()(a, b); HASH()(b); };
}
template<typename Key, typename T, typename HASH = BAN::hash<Key>, typename COMP = BAN::equal<Key>>
template<typename Key, typename T, typename HASH = BAN::hash<Key>>
class HashMap
{
public:
struct Entry
{
const Key key;
T value;
Entry() = delete;
Entry& operator=(const Entry&) = delete;
Entry& operator=(Entry&&) = delete;
Entry(const Entry& other)
: key(other.key)
, value(other.value)
{ }
Entry(Entry&& other)
: key(BAN::move(const_cast<Key&>(other.key)))
, value(BAN::move(other.value))
{ }
template<typename... Args>
Entry(const Key& key, Args&&... args) requires is_constructible_v<T, Args...>
: key(key)
, value(forward<Args>(args)...)
{}
template<typename... Args>
Entry(Key&& key, Args&&... args)
Entry(Key&& key, Args&&... args) requires is_constructible_v<T, Args...>
: key(BAN::move(key))
, value(BAN::forward<Args>(args)...)
{ }
};
, value(forward<Args>(args)...)
{}
struct EntryHash
{
constexpr bool operator()(const Key& a)
{
return HASH()(a);
}
constexpr bool operator()(const Entry& a)
{
return HASH()(a.key);
}
};
struct EntryComp
{
constexpr bool operator()(const Entry& a, const Key& b)
{
return COMP()(a.key, b);
}
constexpr bool operator()(const Entry& a, const Entry& b)
{
return COMP()(a.key, b.key);
}
Key key;
T value;
};
public:
using size_type = size_t;
using key_type = Key;
using value_type = T;
using iterator = HashMapIterator<typename HashSet<Entry, EntryHash, EntryComp>::iterator, HashMap, Entry>;
using const_iterator = HashMapIterator<typename HashSet<Entry, EntryHash, EntryComp>::const_iterator, HashMap, const Entry>;
using size_type = size_t;
using key_type = Key;
using value_type = T;
using iterator = IteratorDouble<Entry, Vector, LinkedList, HashMap>;
using const_iterator = ConstIteratorDouble<Entry, Vector, LinkedList, HashMap>;
public:
HashMap() = default;
~HashMap() { clear(); }
HashMap(const HashMap<Key, T, HASH>&);
HashMap(HashMap<Key, T, HASH>&&);
~HashMap();
HashMap(const HashMap& other) { *this = other; }
HashMap& operator=(const HashMap& other)
{
m_hash_set = other.m_hash_set;
return *this;
}
HashMap(HashMap&& other) { *this = BAN::move(other); }
HashMap& operator=(HashMap&& other)
{
m_hash_set = BAN::move(other.m_hash_set);
return *this;
}
iterator begin() { return iterator(m_hash_set.begin()); }
iterator end() { return iterator(m_hash_set.end()); }
const_iterator begin() const { return const_iterator(m_hash_set.begin()); }
const_iterator end() const { return const_iterator(m_hash_set.end()); }
HashMap<Key, T, HASH>& operator=(const HashMap<Key, T, HASH>&);
HashMap<Key, T, HASH>& operator=(HashMap<Key, T, HASH>&&);
ErrorOr<iterator> insert(const Key& key, const T& value) { return emplace(key, value); }
ErrorOr<iterator> insert(const Key& key, T&& value) { return emplace(key, move(value)); }
@@ -162,100 +57,263 @@ namespace BAN
template<typename... Args>
ErrorOr<iterator> emplace(const Key& key, Args&&... args) requires is_constructible_v<T, Args...>
{ return emplace(Key(key), BAN::forward<Args>(args)...); }
{ return emplace(Key(key), forward<Args>(args)...); }
template<typename... Args>
ErrorOr<iterator> emplace(Key&& key, Args&&... args) requires is_constructible_v<T, Args...>
{
ASSERT(!contains(key));
auto it = TRY(m_hash_set.insert(Entry { BAN::move(key), T(BAN::forward<Args>(args)...) }));
return iterator(it);
}
ErrorOr<iterator> emplace(Key&&, Args&&...) requires is_constructible_v<T, Args...>;
template<typename... Args>
ErrorOr<iterator> emplace_or_assign(const Key& key, Args&&... args) requires is_constructible_v<T, Args...>
{ return emplace_or_assign(Key(key), BAN::forward<Args>(args)...); }
{ return emplace_or_assign(Key(key), forward<Args>(args)...); }
template<typename... Args>
ErrorOr<iterator> emplace_or_assign(Key&& key, Args&&... args) requires is_constructible_v<T, Args...>
{
if (auto it = m_hash_set.find(key); it != m_hash_set.end())
{
const_cast<T&>(it->value) = T(BAN::forward<Args>(args)...);
return iterator(it);
}
ErrorOr<iterator> emplace_or_assign(Key&&, Args&&...) requires is_constructible_v<T, Args...>;
auto it = TRY(m_hash_set.insert(Entry { BAN::move(key), T(BAN::forward<Args>(args)...) }));
return iterator(it);
}
iterator begin() { return iterator(m_buckets.end(), m_buckets.begin()); }
iterator end() { return iterator(m_buckets.end(), m_buckets.end()); }
const_iterator begin() const { return const_iterator(m_buckets.end(), m_buckets.begin()); }
const_iterator end() const { return const_iterator(m_buckets.end(), m_buckets.end()); }
template<detail::HashMapFindable<Key, HASH, COMP> U>
void remove(const U& key)
{
if (auto it = find(key); it != end())
remove(it);
}
ErrorOr<void> reserve(size_type);
iterator remove(iterator it)
{
return iterator(m_hash_set.remove(it.m_iterator));
}
void remove(const Key&);
void remove(iterator it);
void clear();
template<detail::HashMapFindable<Key, HASH, COMP> U>
iterator find(const U& key)
{
return iterator(m_hash_set.find(key));
}
T& operator[](const Key&);
const T& operator[](const Key&) const;
template<detail::HashMapFindable<Key, HASH, COMP> U>
const_iterator find(const U& key) const
{
return const_iterator(m_hash_set.find(key));
}
iterator find(const Key& key);
const_iterator find(const Key& key) const;
bool contains(const Key&) const;
void clear()
{
m_hash_set.clear();
}
ErrorOr<void> reserve(size_type size)
{
return m_hash_set.reserve(size);
}
template<detail::HashMapFindable<Key, HASH, COMP> U>
T& operator[](const U& key)
{
return find(key)->value;
}
template<detail::HashMapFindable<Key, HASH, COMP> U>
const T& operator[](const U& key) const
{
return find(key)->value;
}
template<detail::HashMapFindable<Key, HASH, COMP> U>
bool contains(const U& key) const
{
return find(key) != end();
}
size_type capacity() const
{
return m_hash_set.capacity();
}
size_type size() const
{
return m_hash_set.size();
}
bool empty() const
{
return m_hash_set.empty();
}
bool empty() const;
size_type size() const;
private:
HashSet<Entry, EntryHash, EntryComp> m_hash_set;
ErrorOr<void> rebucket(size_type);
LinkedList<Entry>& get_bucket(const Key&);
const LinkedList<Entry>& get_bucket(const Key&) const;
Vector<LinkedList<Entry>>::iterator get_bucket_iterator(const Key&);
Vector<LinkedList<Entry>>::const_iterator get_bucket_iterator(const Key&) const;
private:
Vector<LinkedList<Entry>> m_buckets;
size_type m_size = 0;
friend iterator;
};
template<typename Key, typename T, typename HASH>
HashMap<Key, T, HASH>::HashMap(const HashMap<Key, T, HASH>& other)
{
*this = other;
}
template<typename Key, typename T, typename HASH>
HashMap<Key, T, HASH>::HashMap(HashMap<Key, T, HASH>&& other)
{
*this = move(other);
}
template<typename Key, typename T, typename HASH>
HashMap<Key, T, HASH>::~HashMap()
{
clear();
}
template<typename Key, typename T, typename HASH>
HashMap<Key, T, HASH>& HashMap<Key, T, HASH>::operator=(const HashMap<Key, T, HASH>& other)
{
clear();
m_buckets = other.m_buckets;
m_size = other.m_size;
return *this;
}
template<typename Key, typename T, typename HASH>
HashMap<Key, T, HASH>& HashMap<Key, T, HASH>::operator=(HashMap<Key, T, HASH>&& other)
{
clear();
m_buckets = move(other.m_buckets);
m_size = other.m_size;
other.m_size = 0;
return *this;
}
template<typename Key, typename T, typename HASH>
template<typename... Args>
ErrorOr<typename HashMap<Key, T, HASH>::iterator> HashMap<Key, T, HASH>::emplace(Key&& key, Args&&... args) requires is_constructible_v<T, Args...>
{
ASSERT(!contains(key));
TRY(rebucket(m_size + 1));
auto bucket_it = get_bucket_iterator(key);
TRY(bucket_it->emplace_back(move(key), forward<Args>(args)...));
m_size++;
return iterator(m_buckets.end(), bucket_it, prev(bucket_it->end(), 1));
}
template<typename Key, typename T, typename HASH>
template<typename... Args>
ErrorOr<typename HashMap<Key, T, HASH>::iterator> HashMap<Key, T, HASH>::emplace_or_assign(Key&& key, Args&&... args) requires is_constructible_v<T, Args...>
{
if (empty())
return emplace(move(key), forward<Args>(args)...);
auto bucket_it = get_bucket_iterator(key);
for (auto entry_it = bucket_it->begin(); entry_it != bucket_it->end(); entry_it++)
{
if (entry_it->key != key)
continue;
entry_it->value = T(forward<Args>(args)...);
return iterator(m_buckets.end(), bucket_it, entry_it);
}
return emplace(move(key), forward<Args>(args)...);
}
template<typename Key, typename T, typename HASH>
ErrorOr<void> HashMap<Key, T, HASH>::reserve(size_type size)
{
TRY(rebucket(size));
return {};
}
template<typename Key, typename T, typename HASH>
void HashMap<Key, T, HASH>::remove(const Key& key)
{
auto it = find(key);
if (it != end())
remove(it);
}
template<typename Key, typename T, typename HASH>
void HashMap<Key, T, HASH>::remove(iterator it)
{
it.outer_current()->remove(it.inner_current());
m_size--;
}
template<typename Key, typename T, typename HASH>
void HashMap<Key, T, HASH>::clear()
{
m_buckets.clear();
m_size = 0;
}
template<typename Key, typename T, typename HASH>
T& HashMap<Key, T, HASH>::operator[](const Key& key)
{
ASSERT(!empty());
auto& bucket = get_bucket(key);
for (Entry& entry : bucket)
if (entry.key == key)
return entry.value;
ASSERT_NOT_REACHED();
}
template<typename Key, typename T, typename HASH>
const T& HashMap<Key, T, HASH>::operator[](const Key& key) const
{
ASSERT(!empty());
const auto& bucket = get_bucket(key);
for (const Entry& entry : bucket)
if (entry.key == key)
return entry.value;
ASSERT_NOT_REACHED();
}
template<typename Key, typename T, typename HASH>
typename HashMap<Key, T, HASH>::iterator HashMap<Key, T, HASH>::find(const Key& key)
{
if (empty())
return end();
auto bucket_it = get_bucket_iterator(key);
for (auto it = bucket_it->begin(); it != bucket_it->end(); it++)
if (it->key == key)
return iterator(m_buckets.end(), bucket_it, it);
return end();
}
template<typename Key, typename T, typename HASH>
typename HashMap<Key, T, HASH>::const_iterator HashMap<Key, T, HASH>::find(const Key& key) const
{
if (empty())
return end();
auto bucket_it = get_bucket_iterator(key);
for (auto it = bucket_it->begin(); it != bucket_it->end(); it++)
if (it->key == key)
return const_iterator(m_buckets.end(), bucket_it, it);
return end();
}
template<typename Key, typename T, typename HASH>
bool HashMap<Key, T, HASH>::contains(const Key& key) const
{
return find(key) != end();
}
template<typename Key, typename T, typename HASH>
bool HashMap<Key, T, HASH>::empty() const
{
return m_size == 0;
}
template<typename Key, typename T, typename HASH>
typename HashMap<Key, T, HASH>::size_type HashMap<Key, T, HASH>::size() const
{
return m_size;
}
template<typename Key, typename T, typename HASH>
ErrorOr<void> HashMap<Key, T, HASH>::rebucket(size_type bucket_count)
{
if (m_buckets.size() >= bucket_count)
return {};
size_type new_bucket_count = BAN::Math::max<size_type>(bucket_count, m_buckets.size() * 2);
Vector<LinkedList<Entry>> new_buckets;
TRY(new_buckets.resize(new_bucket_count));
for (auto& bucket : m_buckets)
{
for (auto it = bucket.begin(); it != bucket.end();)
{
size_type new_bucket_index = HASH()(it->key) % new_buckets.size();
it = bucket.move_element_to_other_linked_list(new_buckets[new_bucket_index], new_buckets[new_bucket_index].end(), it);
}
}
m_buckets = move(new_buckets);
return {};
}
template<typename Key, typename T, typename HASH>
LinkedList<typename HashMap<Key, T, HASH>::Entry>& HashMap<Key, T, HASH>::get_bucket(const Key& key)
{
return *get_bucket_iterator(key);
}
template<typename Key, typename T, typename HASH>
const LinkedList<typename HashMap<Key, T, HASH>::Entry>& HashMap<Key, T, HASH>::get_bucket(const Key& key) const
{
return *get_bucket_iterator(key);
}
template<typename Key, typename T, typename HASH>
Vector<LinkedList<typename HashMap<Key, T, HASH>::Entry>>::iterator HashMap<Key, T, HASH>::get_bucket_iterator(const Key& key)
{
ASSERT(!m_buckets.empty());
auto index = HASH()(key) % m_buckets.size();
return next(m_buckets.begin(), index);
}
template<typename Key, typename T, typename HASH>
Vector<LinkedList<typename HashMap<Key, T, HASH>::Entry>>::const_iterator HashMap<Key, T, HASH>::get_bucket_iterator(const Key& key) const
{
ASSERT(!m_buckets.empty());
auto index = HASH()(key) % m_buckets.size();
return next(m_buckets.begin(), index);
}
}

View File

@@ -2,358 +2,198 @@
#include <BAN/Errors.h>
#include <BAN/Hash.h>
#include <BAN/Iterators.h>
#include <BAN/LinkedList.h>
#include <BAN/Math.h>
#include <BAN/Move.h>
#include <BAN/New.h>
#include <BAN/Vector.h>
namespace BAN
{
template<typename HashSet, typename Bucket, typename T>
class HashSetIterator
{
public:
HashSetIterator() = default;
const T& operator*() const
{
ASSERT(m_bucket);
return *m_bucket->element();
}
const T* operator->() const
{
ASSERT(m_bucket);
return m_bucket->element();
}
HashSetIterator& operator++()
{
ASSERT(m_bucket);
m_bucket++;
skip_to_valid_bucket();
return *this;
}
HashSetIterator operator++(int)
{
auto temp = *this;
++(*this);
return temp;
}
bool operator==(HashSetIterator other) const
{
return m_bucket == other.m_bucket;
}
bool operator!=(HashSetIterator other) const
{
return m_bucket != other.m_bucket;
}
private:
explicit HashSetIterator(Bucket* bucket)
: m_bucket(bucket)
{
if (m_bucket != nullptr)
skip_to_valid_bucket();
}
void skip_to_valid_bucket()
{
while (m_bucket->state != Bucket::USED && !m_bucket->end)
m_bucket++;
if (m_bucket->end)
m_bucket = nullptr;
}
private:
Bucket* m_bucket { nullptr };
friend HashSet;
};
namespace detail
{
template<typename T, typename U, typename HASH, typename COMP>
concept HashSetFindable = requires(const U& a, const T& b) { COMP()(a, b); HASH()(b); };
}
template<typename T, typename HASH = BAN::hash<T>, typename COMP = BAN::equal<T>>
template<typename T, typename HASH = hash<T>>
class HashSet
{
private:
struct Bucket
{
static constexpr uint8_t UNUSED = 0;
static constexpr uint8_t USED = 1;
static constexpr uint8_t REMOVED = 2;
alignas(T) uint8_t storage[sizeof(T)];
hash_t hash;
uint8_t state : 2;
uint8_t chain_start : 1;
uint8_t end : 1;
T* element() { return reinterpret_cast<T*>(storage); }
const T* element() const { return reinterpret_cast<const T*>(storage); }
};
public:
using value_type = T;
using size_type = size_t;
using iterator = HashSetIterator<HashSet, Bucket, T>;
using const_iterator = HashSetIterator<HashSet, const Bucket, const T>;
using value_type = T;
using size_type = size_t;
using iterator = IteratorDouble<T, Vector, LinkedList, HashSet>;
using const_iterator = ConstIteratorDouble<T, Vector, LinkedList, HashSet>;
public:
HashSet() = default;
~HashSet() { clear(); }
HashSet(const HashSet&);
HashSet(HashSet&&);
HashSet(const HashSet& other) { *this = other; }
HashSet& operator=(const HashSet& other)
{
clear();
HashSet& operator=(const HashSet&);
HashSet& operator=(HashSet&&);
MUST(reserve(other.size()));
for (auto& bucket : other)
MUST(insert(bucket));
ErrorOr<void> insert(const T&);
ErrorOr<void> insert(T&&);
void remove(const T&);
void clear();
return *this;
}
ErrorOr<void> reserve(size_type);
HashSet(HashSet&& other) { *this = BAN::move(other); }
HashSet& operator=(HashSet&& other)
{
clear();
iterator begin() { return iterator(m_buckets.end(), m_buckets.begin()); }
iterator end() { return iterator(m_buckets.end(), m_buckets.end()); }
const_iterator begin() const { return const_iterator(m_buckets.end(), m_buckets.begin()); }
const_iterator end() const { return const_iterator(m_buckets.end(), m_buckets.end()); }
m_buckets = other.m_buckets;
m_capacity = other.m_capacity;
m_size = other.m_size;
m_removed = other.m_removed;
bool contains(const T&) const;
other.m_buckets = nullptr;
other.m_capacity = 0;
other.m_size = 0;
other.m_removed = 0;
return *this;
}
iterator begin() { return iterator(m_buckets); }
iterator end() { return iterator(nullptr); }
const_iterator begin() const { return const_iterator(m_buckets); }
const_iterator end() const { return const_iterator(nullptr); }
ErrorOr<iterator> insert(const T& value)
{
return insert(T(value));
}
ErrorOr<iterator> insert(T&& value)
{
if (should_rehash_with_size(m_size + 1))
TRY(rehash(m_size * 2));
return insert_impl(BAN::move(value), HASH()(value));
}
template<detail::HashSetFindable<T, HASH, COMP> U>
void remove(const U& value)
{
if (auto it = find(value); it != end())
remove(it);
}
iterator remove(iterator it)
{
auto& bucket = *it.m_bucket;
bucket.element()->~T();
bucket.state = Bucket::REMOVED;
m_size--;
m_removed++;
return iterator(&bucket);
}
template<detail::HashSetFindable<T, HASH, COMP> U>
iterator find(const U& value)
{
return iterator(const_cast<Bucket*>(find_impl(value).m_bucket));
}
template<detail::HashSetFindable<T, HASH, COMP> U>
const_iterator find(const U& value) const
{
return find_impl(value);
}
void clear()
{
if (m_buckets == nullptr)
return;
for (size_type i = 0; i < m_capacity; i++)
if (m_buckets[i].state == Bucket::USED)
m_buckets[i].element()->~T();
BAN::deallocator(m_buckets);
m_buckets = nullptr;
m_capacity = 0;
m_size = 0;
m_removed = 0;
}
ErrorOr<void> reserve(size_type size)
{
if (should_rehash_with_size(size))
TRY(rehash(size * 2));
return {};
}
template<detail::HashSetFindable<T, HASH, COMP> U>
bool contains(const U& value) const
{
return find(value) != end();
}
size_type capacity() const
{
return m_capacity;
}
size_type size() const
{
return m_size;
}
bool empty() const
{
return m_size == 0;
}
size_type size() const;
bool empty() const;
private:
ErrorOr<void> rehash(size_type new_capacity)
{
new_capacity = BAN::Math::max<size_t>(16, BAN::Math::max(new_capacity, m_size + 1));
new_capacity = BAN::Math::round_up_to_power_of_two(new_capacity);
void* new_buckets = BAN::allocator((new_capacity + 1) * sizeof(Bucket));
if (new_buckets == nullptr)
return BAN::Error::from_errno(ENOMEM);
memset(new_buckets, 0, (new_capacity + 1) * sizeof(Bucket));
Bucket* old_buckets = m_buckets;
const size_type old_capacity = m_capacity;
m_buckets = static_cast<Bucket*>(new_buckets);
m_capacity = new_capacity;
m_size = 0;
m_removed = 0;
for (size_type i = 0; i < old_capacity; i++)
{
auto& old_bucket = old_buckets[i];
if (old_bucket.state != Bucket::USED)
continue;
insert_impl(BAN::move(*old_bucket.element()), old_bucket.hash);
old_bucket.element()->~T();
}
m_buckets[m_capacity].end = true;
BAN::deallocator(old_buckets);
return {};
}
template<detail::HashSetFindable<T, HASH, COMP> U>
const_iterator find_impl(const U& value) const
{
if (m_capacity == 0)
return end();
bool first = true;
const hash_t orig_hash = HASH()(value);
for (auto hash = orig_hash;; hash = get_next_hash_in_chain(hash, orig_hash), first = false)
{
auto& bucket = m_buckets[hash & (m_capacity - 1)];
if (bucket.state == Bucket::USED && bucket.hash == orig_hash && COMP()(*bucket.element(), value))
return const_iterator(&bucket);
if (bucket.state == Bucket::UNUSED)
return end();
if (!first && bucket.chain_start)
return end();
}
}
iterator insert_impl(T&& value, hash_t orig_hash)
{
ASSERT(!should_rehash_with_size(m_size + 1));
Bucket* target = nullptr;
bool first = true;
for (auto hash = orig_hash;; hash = get_next_hash_in_chain(hash, orig_hash), first = false)
{
auto& bucket = m_buckets[hash & (m_capacity - 1)];
if (!first)
bucket.chain_start = false;
if (bucket.state == Bucket::USED)
{
if (bucket.hash != orig_hash || !COMP()(*bucket.element(), value))
continue;
target = &bucket;
break;
}
if (target == nullptr)
target = &bucket;
if (bucket.state == Bucket::UNUSED)
break;
}
switch (target->state)
{
case Bucket::USED:
target->element()->~T();
break;
case Bucket::REMOVED:
m_removed--;
[[fallthrough]];
case Bucket::UNUSED:
m_size++;
break;
}
target->chain_start = first && target->state == Bucket::UNUSED;
target->hash = orig_hash;
target->state = Bucket::USED;
new (target->element()) T(BAN::move(value));
return iterator(target);
}
bool should_rehash_with_size(size_type size) const
{
if (m_capacity < 16)
return true;
if (size + m_removed > m_capacity / 4 * 3)
return true;
return false;
}
hash_t get_next_hash_in_chain(hash_t prev_hash, hash_t orig_hash) const
{
// TODO: does this even provide better performance than `return prev_hash + 1`
// when using "good" hash functions
return prev_hash * 1103515245 + (orig_hash | 1);
}
ErrorOr<void> rebucket(size_type);
LinkedList<T>& get_bucket(const T&);
const LinkedList<T>& get_bucket(const T&) const;
private:
Bucket* m_buckets { nullptr };
size_type m_capacity { 0 };
size_type m_size { 0 };
size_type m_removed { 0 };
Vector<LinkedList<T>> m_buckets;
size_type m_size = 0;
};
template<typename T, typename HASH>
HashSet<T, HASH>::HashSet(const HashSet& other)
: m_buckets(other.m_buckets)
, m_size(other.m_size)
{
}
template<typename T, typename HASH>
HashSet<T, HASH>::HashSet(HashSet&& other)
: m_buckets(move(other.m_buckets))
, m_size(other.m_size)
{
other.clear();
}
template<typename T, typename HASH>
HashSet<T, HASH>& HashSet<T, HASH>::operator=(const HashSet& other)
{
clear();
m_buckets = other.m_buckets;
m_size = other.m_size;
return *this;
}
template<typename T, typename HASH>
HashSet<T, HASH>& HashSet<T, HASH>::operator=(HashSet&& other)
{
clear();
m_buckets = move(other.m_buckets);
m_size = other.m_size;
other.clear();
return *this;
}
template<typename T, typename HASH>
ErrorOr<void> HashSet<T, HASH>::insert(const T& key)
{
return insert(move(T(key)));
}
template<typename T, typename HASH>
ErrorOr<void> HashSet<T, HASH>::insert(T&& key)
{
if (!empty() && get_bucket(key).contains(key))
return {};
TRY(rebucket(m_size + 1));
TRY(get_bucket(key).push_back(move(key)));
m_size++;
return {};
}
template<typename T, typename HASH>
void HashSet<T, HASH>::remove(const T& key)
{
if (empty()) return;
auto& bucket = get_bucket(key);
for (auto it = bucket.begin(); it != bucket.end(); it++)
{
if (*it == key)
{
bucket.remove(it);
m_size--;
break;
}
}
}
template<typename T, typename HASH>
void HashSet<T, HASH>::clear()
{
m_buckets.clear();
m_size = 0;
}
template<typename T, typename HASH>
ErrorOr<void> HashSet<T, HASH>::reserve(size_type size)
{
TRY(rebucket(size));
return {};
}
template<typename T, typename HASH>
bool HashSet<T, HASH>::contains(const T& key) const
{
if (empty()) return false;
return get_bucket(key).contains(key);
}
template<typename T, typename HASH>
typename HashSet<T, HASH>::size_type HashSet<T, HASH>::size() const
{
return m_size;
}
template<typename T, typename HASH>
bool HashSet<T, HASH>::empty() const
{
return m_size == 0;
}
template<typename T, typename HASH>
ErrorOr<void> HashSet<T, HASH>::rebucket(size_type bucket_count)
{
if (m_buckets.size() >= bucket_count)
return {};
size_type new_bucket_count = Math::max<size_type>(bucket_count, m_buckets.size() * 2);
Vector<LinkedList<T>> new_buckets;
if (new_buckets.resize(new_bucket_count).is_error())
return Error::from_errno(ENOMEM);
for (auto& bucket : m_buckets)
{
for (auto it = bucket.begin(); it != bucket.end();)
{
size_type new_bucket_index = HASH()(*it) % new_buckets.size();
it = bucket.move_element_to_other_linked_list(new_buckets[new_bucket_index], new_buckets[new_bucket_index].end(), it);
}
}
m_buckets = move(new_buckets);
return {};
}
template<typename T, typename HASH>
LinkedList<T>& HashSet<T, HASH>::get_bucket(const T& key)
{
ASSERT(!m_buckets.empty());
size_type index = HASH()(key) % m_buckets.size();
return m_buckets[index];
}
template<typename T, typename HASH>
const LinkedList<T>& HashSet<T, HASH>::get_bucket(const T& key) const
{
ASSERT(!m_buckets.empty());
size_type index = HASH()(key) % m_buckets.size();
return m_buckets[index];
}
}

View File

@@ -1,89 +0,0 @@
#pragma once
#include <BAN/Iterators.h>
#include <BAN/Swap.h>
#include <BAN/Traits.h>
namespace BAN
{
namespace detail
{
template<typename It, typename Comp>
void heapify_up(It begin, size_t index, Comp comp)
{
size_t parent = (index - 1) / 2;
while (parent < index)
{
if (comp(*(begin + index), *(begin + parent)))
break;
swap(*(begin + parent), *(begin + index));
index = parent;
parent = (index - 1) / 2;
}
}
template<typename It, typename Comp>
void heapify_down(It begin, size_t index, size_t len, Comp comp)
{
for (;;)
{
const size_t lchild = 2 * index + 1;
const size_t rchild = 2 * index + 2;
size_t child = 0;
if (lchild < len && !comp(*(begin + lchild), *(begin + index)))
{
if (rchild < len && !comp(*(begin + rchild), *(begin + lchild)))
child = rchild;
else
child = lchild;
}
else if (rchild < len && !comp(*(begin + rchild), *(begin + index)))
child = rchild;
else
break;
swap(*(begin + child), *(begin + index));
index = child;
}
}
}
template<typename It, typename Comp = less<it_value_type_t<It>>>
void make_heap(It begin, It end, Comp comp = {})
{
const size_t len = distance(begin, end);
if (len <= 1)
return;
size_t index = (len - 2) / 2;
while (index < len)
detail::heapify_down(begin, index--, len, comp);
}
template<typename It, typename Comp = less<it_value_type_t<It>>>
void push_heap(It begin, It end, Comp comp = {})
{
const size_t len = distance(begin, end);
detail::heapify_up(begin, len - 1, comp);
}
template<typename It, typename Comp = less<it_value_type_t<It>>>
void pop_heap(It begin, It end, Comp comp = {})
{
const size_t len = distance(begin, end);
swap(*begin, *(begin + len - 1));
detail::heapify_down(begin, 0, len - 1, comp);
}
template<typename It, typename Comp = less<it_value_type_t<It>>>
void sort_heap(It begin, It end, Comp comp = {})
{
while (begin != end)
pop_heap(begin, end--, comp);
}
}

View File

@@ -9,10 +9,6 @@ namespace BAN
struct IPv4Address
{
constexpr IPv4Address()
: IPv4Address(0)
{ }
constexpr IPv4Address(uint32_t u32_address)
{
raw = u32_address;

View File

@@ -1,46 +0,0 @@
#pragma once
#define _ban_count_args_impl(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, ...) _9
#define _ban_count_args(...) _ban_count_args_impl(__VA_ARGS__ __VA_OPT__(,) 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
#define _ban_concat_impl(a, b) a##b
#define _ban_concat(a, b) _ban_concat_impl(a, b)
#define _ban_stringify_impl(x) #x
#define _ban_stringify(x) _ban_stringify_impl(x)
#define _ban_fe_0(f)
#define _ban_fe_1(f, _0) f(0, _0)
#define _ban_fe_2(f, _0, _1) f(0, _0) f(1, _1)
#define _ban_fe_3(f, _0, _1, _2) f(0, _0) f(1, _1) f(2, _2)
#define _ban_fe_4(f, _0, _1, _2, _3) f(0, _0) f(1, _1) f(2, _2) f(3, _3)
#define _ban_fe_5(f, _0, _1, _2, _3, _4) f(0, _0) f(1, _1) f(2, _2) f(3, _3) f(4, _4)
#define _ban_fe_6(f, _0, _1, _2, _3, _4, _5) f(0, _0) f(1, _1) f(2, _2) f(3, _3) f(4, _4) f(5, _5)
#define _ban_fe_7(f, _0, _1, _2, _3, _4, _5, _6) f(0, _0) f(1, _1) f(2, _2) f(3, _3) f(4, _4) f(5, _5) f(6, _6)
#define _ban_fe_8(f, _0, _1, _2, _3, _4, _5, _6, _7) f(0, _0) f(1, _1) f(2, _2) f(3, _3) f(4, _4) f(5, _5) f(6, _6) f(7, _7)
#define _ban_fe_9(f, _0, _1, _2, _3, _4, _5, _6, _7, _8) f(0, _0) f(1, _1) f(2, _2) f(3, _3) f(4, _4) f(5, _5) f(6, _6) f(7, _7) f(8, _8)
#define _ban_for_each(f, ...) _ban_concat(_ban_fe_, _ban_count_args(__VA_ARGS__))(f __VA_OPT__(,) __VA_ARGS__)
#define _ban_fe_comma_0(f)
#define _ban_fe_comma_1(f, _0) f(0, _0)
#define _ban_fe_comma_2(f, _0, _1) f(0, _0), f(1, _1)
#define _ban_fe_comma_3(f, _0, _1, _2) f(0, _0), f(1, _1), f(2, _2)
#define _ban_fe_comma_4(f, _0, _1, _2, _3) f(0, _0), f(1, _1), f(2, _2), f(3, _3)
#define _ban_fe_comma_5(f, _0, _1, _2, _3, _4) f(0, _0), f(1, _1), f(2, _2), f(3, _3), f(4, _4)
#define _ban_fe_comma_6(f, _0, _1, _2, _3, _4, _5) f(0, _0), f(1, _1), f(2, _2), f(3, _3), f(4, _4), f(5, _5)
#define _ban_fe_comma_7(f, _0, _1, _2, _3, _4, _5, _6) f(0, _0), f(1, _1), f(2, _2), f(3, _3), f(4, _4), f(5, _5), f(6, _6)
#define _ban_fe_comma_8(f, _0, _1, _2, _3, _4, _5, _6, _7) f(0, _0), f(1, _1), f(2, _2), f(3, _3), f(4, _4), f(5, _5), f(6, _6), f(7, _7)
#define _ban_fe_comma_9(f, _0, _1, _2, _3, _4, _5, _6, _7, _8) f(0, _0), f(1, _1), f(2, _2), f(3, _3), f(4, _4), f(5, _5), f(6, _6), f(7, _7), f(8, _8)
#define _ban_for_each_comma(f, ...) _ban_concat(_ban_fe_comma_, _ban_count_args(__VA_ARGS__))(f __VA_OPT__(,) __VA_ARGS__)
#define _ban_get_0(a0, ...) a0
#define _ban_get_1(a0, a1, ...) a1
#define _ban_get_2(a0, a1, a2, ...) a2
#define _ban_get_3(a0, a1, a2, a3, ...) a3
#define _ban_get_4(a0, a1, a2, a3, a4, ...) a4
#define _ban_get_5(a0, a1, a2, a3, a4, a5, ...) a5
#define _ban_get_6(a0, a1, a2, a3, a4, a5, a6, ...) a6
#define _ban_get_7(a0, a1, a2, a3, a4, a5, a6, a7, ...) a7
#define _ban_get_8(a0, a1, a2, a3, a4, a5, a6, a7, a8, ...) a8
#define _ban_get_9(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, ...) a9
#define _ban_get(n, ...) _ban_concat(_ban_get_, n)(__VA_ARGS__)

View File

@@ -36,11 +36,12 @@ namespace BAN::Math
template<integral T>
inline constexpr T gcd(T a, T b)
{
T t;
while (b)
{
T temp = b;
t = b;
b = a % b;
a = temp;
a = t;
}
return a;
}
@@ -65,36 +66,25 @@ namespace BAN::Math
return (x & (x - 1)) == 0;
}
template<integral T> requires(sizeof(T) <= 8)
inline constexpr T round_up_to_power_of_two(T x)
template<BAN::integral T>
static constexpr bool will_multiplication_overflow(T a, T b)
{
x--;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
if constexpr(sizeof(T) >= 2)
x |= x >> 8;
if constexpr(sizeof(T) >= 4)
x |= x >> 16;
if constexpr(sizeof(T) >= 8)
x |= x >> 32;
return x + 1;
if (a == 0 || b == 0)
return false;
if ((a > 0) == (b > 0))
return a > BAN::numeric_limits<T>::max() / b;
else
return a < BAN::numeric_limits<T>::min() / b;
}
template<integral T>
__attribute__((always_inline))
inline constexpr bool will_multiplication_overflow(T a, T b)
template<BAN::integral T>
static constexpr bool will_addition_overflow(T a, T b)
{
T dummy;
return __builtin_mul_overflow(a, b, &dummy);
}
template<integral T>
__attribute__((always_inline))
inline constexpr bool will_addition_overflow(T a, T b)
{
T dummy;
return __builtin_add_overflow(a, b, &dummy);
if (a > 0 && b > 0)
return a > BAN::numeric_limits<T>::max() - b;
if (a < 0 && b < 0)
return a < BAN::numeric_limits<T>::min() - b;
return false;
}
template<typename T>
@@ -108,19 +98,6 @@ namespace BAN::Math
return sizeof(T) * 8 - __builtin_clzll(x) - 1;
}
// This is ugly but my clangd does not like including
// intrinsic headers at all
#if !defined(__SSE__) || !defined(__SSE2__)
#pragma GCC push_options
#ifndef __SSE__
#pragma GCC target("sse")
#endif
#ifndef __SSE2__
#pragma GCC target("sse2")
#endif
#define BAN_MATH_POP_OPTIONS
#endif
template<floating_point T>
inline constexpr T floor(T x)
{
@@ -182,23 +159,7 @@ namespace BAN::Math
"jne 1b;"
: "+t"(a)
: "u"(b)
: "ax", "cc"
);
return a;
}
template<floating_point T>
inline constexpr T remainder(T a, T b)
{
asm(
"1:"
"fprem1;"
"fnstsw %%ax;"
"testb $4, %%ah;"
"jne 1b;"
: "+t"(a)
: "u"(b)
: "ax", "cc"
: "ax"
);
return a;
}
@@ -206,7 +167,7 @@ namespace BAN::Math
template<floating_point T>
static T modf(T x, T* iptr)
{
const T frac = BAN::Math::fmod<T>(x, (T)1.0);
const T frac = BAN::Math::fmod<T>(x, 1);
*iptr = x - frac;
return frac;
}
@@ -214,15 +175,15 @@ namespace BAN::Math
template<floating_point T>
inline constexpr T frexp(T num, int* exp)
{
if (num == (T)0.0)
if (num == 0.0)
{
*exp = 0;
return (T)0.0;
return 0.0;
}
T e;
asm("fxtract" : "+t"(num), "=u"(e));
*exp = (int)e + 1;
T _exp;
asm("fxtract" : "+t"(num), "=u"(_exp));
*exp = (int)_exp + 1;
return num / (T)2.0;
}
@@ -290,7 +251,6 @@ namespace BAN::Math
"fstp %%st(1);"
: "+t"(x)
);
return x;
}
@@ -303,9 +263,18 @@ namespace BAN::Math
template<floating_point T>
inline constexpr T pow(T x, T y)
{
if (x == (T)0.0)
return (T)0.0;
return exp2<T>(y * log2<T>(x));
asm(
"fyl2x;"
"fld1;"
"fld %%st(1);"
"fprem;"
"f2xm1;"
"faddp;"
"fscale;"
: "+t"(x), "+u"(y)
);
return x;
}
template<floating_point T>
@@ -341,27 +310,16 @@ namespace BAN::Math
template<floating_point T>
inline constexpr T sqrt(T x)
{
if constexpr(BAN::is_same_v<T, float>)
{
using v4sf = float __attribute__((vector_size(16)));
return __builtin_ia32_sqrtss((v4sf) { x, 0.0f, 0.0f, 0.0f })[0];
}
else if constexpr(BAN::is_same_v<T, double>)
{
using v2df = double __attribute__((vector_size(16)));
return __builtin_ia32_sqrtsd((v2df) { x, 0.0 })[0];
}
else if constexpr(BAN::is_same_v<T, long double>)
{
asm("fsqrt" : "+t"(x));
return x;
}
asm("fsqrt" : "+t"(x));
return x;
}
template<floating_point T>
inline constexpr T cbrt(T value)
{
return pow<T>(value, (T)1.0 / (T)3.0);
if (value == 0.0)
return 0.0;
return pow<T>(value, 1.0 / 3.0);
}
template<floating_point T>
@@ -388,21 +346,30 @@ namespace BAN::Math
inline constexpr T tan(T x)
{
T one, ret;
asm("fptan" : "=t"(one), "=u"(ret) : "0"(x));
asm(
"fptan"
: "=t"(one), "=u"(ret)
: "0"(x)
);
return ret;
}
template<floating_point T>
inline constexpr T atan2(T y, T x)
{
asm("fpatan" : "+t"(x) : "u"(y) : "st(1)");
asm(
"fpatan"
: "+t"(x)
: "u"(y)
: "st(1)"
);
return x;
}
template<floating_point T>
inline constexpr T atan(T x)
{
return atan2<T>(x, (T)1.0);
return atan2<T>(x, 1.0);
}
template<floating_point T>
@@ -411,10 +378,10 @@ namespace BAN::Math
if (x == (T)0.0)
return (T)0.0;
if (x == (T)1.0)
return +numbers::pi_v<T> / (T)2.0;
return numbers::pi_v<T> / (T)2.0;
if (x == (T)-1.0)
return -numbers::pi_v<T> / (T)2.0;
return (T)2.0 * atan<T>(x / ((T)1.0 + sqrt<T>((T)1.0 - x * x)));
return (T)2.0 * atan<T>(x / (T(1.0) + sqrt<T>((T)1.0 - x * x)));
}
template<floating_point T>
@@ -444,7 +411,7 @@ namespace BAN::Math
template<floating_point T>
inline constexpr T tanh(T x)
{
const T exp_px = exp<T>(+x);
const T exp_px = exp<T>(x);
const T exp_nx = exp<T>(-x);
return (exp_px - exp_nx) / (exp_px + exp_nx);
}
@@ -473,9 +440,4 @@ namespace BAN::Math
return sqrt<T>(x * x + y * y);
}
#ifdef BAN_MATH_POP_OPTIONS
#undef BAN_MATH_POP_OPTIONS
#pragma GCC pop_options
#endif
}

View File

@@ -9,12 +9,10 @@
namespace BAN
{
#if defined(__is_kernel)
static constexpr void*(*allocator)(size_t) = kmalloc;
static constexpr void*(*reallocator)(void*, size_t) = nullptr;
static constexpr void(*deallocator)(void*) = kfree;
static constexpr void*(&allocator)(size_t) = kmalloc;
static constexpr void(&deallocator)(void*) = kfree;
#else
static constexpr void*(*allocator)(size_t) = malloc;
static constexpr void*(*reallocator)(void*, size_t) = realloc;
static constexpr void(*deallocator)(void*) = free;
static constexpr void*(&allocator)(size_t) = malloc;
static constexpr void(&deallocator)(void*) = free;
#endif
}

View File

@@ -1,64 +0,0 @@
#pragma once
#include "BAN/Errors.h"
#include <BAN/Vector.h>
#include <BAN/Heap.h>
namespace BAN
{
template<typename T, typename Comp = less<T>>
class PriorityQueue
{
public:
PriorityQueue() = default;
PriorityQueue(Comp comp)
: m_comp(comp)
{ }
ErrorOr<void> push(const T& value)
{
TRY(m_data.push_back(value));
push_heap(m_data.begin(), m_data.end());
return {};
}
ErrorOr<void> push(T&& value)
{
TRY(m_data.push_back(move(value)));
push_heap(m_data.begin(), m_data.end());
return {};
}
template<typename... Args>
ErrorOr<void> emplace(Args&&... args) requires is_constructible_v<T, Args...>
{
TRY(m_data.emplace_back(forward<Args>(args)...));
push_heap(m_data.begin(), m_data.end());
return {};
}
void pop()
{
pop_heap(m_data.begin(), m_data.end());
m_data.pop_back();
}
BAN::ErrorOr<void> reserve(Vector<T>::size_type size)
{
return m_data.reserve(size);
}
T& top() { return m_data.front(); }
const T& top() const { return m_data.front(); }
bool empty() const { return m_data.empty(); }
Vector<T>::size_type size() const { return m_data.size(); }
Vector<T>::size_type capacity() const { return m_data.capacity(); }
private:
Comp m_comp;
Vector<T> m_data;
};
}

View File

@@ -2,9 +2,9 @@
#include <BAN/Atomic.h>
#include <BAN/Errors.h>
#include <BAN/Hash.h>
#include <BAN/Move.h>
#include <BAN/NoCopyMove.h>
#include <stdint.h>
namespace BAN
{
@@ -129,9 +129,14 @@ namespace BAN
return *this;
}
T* ptr() const { return m_pointer; }
T& operator*() const { ASSERT(!empty()); return *ptr(); }
T* operator->() const { ASSERT(!empty()); return ptr(); }
T* ptr() { ASSERT(!empty()); return m_pointer; }
const T* ptr() const { ASSERT(!empty()); return m_pointer; }
T& operator*() { return *ptr(); }
const T& operator*() const { return *ptr(); }
T* operator->() { return ptr(); }
const T* operator->() const { return ptr(); }
bool operator==(RefPtr other) const { return m_pointer == other.m_pointer; }
bool operator!=(RefPtr other) const { return m_pointer != other.m_pointer; }
@@ -153,13 +158,4 @@ namespace BAN
friend class RefPtr;
};
template<typename T>
struct hash<RefPtr<T>>
{
constexpr hash_t operator()(const RefPtr<T>& ptr) const
{
return hash<T*>()(ptr.ptr());
}
};
}

View File

@@ -1,6 +1,5 @@
#pragma once
#include <BAN/Heap.h>
#include <BAN/Math.h>
#include <BAN/Swap.h>
#include <BAN/Traits.h>
@@ -9,7 +8,7 @@
namespace BAN::sort
{
template<typename It, typename Comp = less<it_value_type_t<It>>>
template<typename It, typename Comp = less<typename It::value_type>>
void exchange_sort(It begin, It end, Comp comp = {})
{
for (It lhs = begin; lhs != end; ++lhs)
@@ -43,7 +42,7 @@ namespace BAN::sort
}
template<typename It, typename Comp = less<it_value_type_t<It>>>
template<typename It, typename Comp = less<typename It::value_type>>
void quick_sort(It begin, It end, Comp comp = {})
{
if (distance(begin, end) <= 1)
@@ -53,14 +52,14 @@ namespace BAN::sort
quick_sort(++mid, end, comp);
}
template<typename It, typename Comp = less<it_value_type_t<It>>>
template<typename It, typename Comp = less<typename It::value_type>>
void insertion_sort(It begin, It end, Comp comp = {})
{
if (distance(begin, end) <= 1)
return;
for (It it1 = next(begin, 1); it1 != end; ++it1)
{
auto x = move(*it1);
typename It::value_type x = move(*it1);
It it2 = it1;
for (; it2 != begin && comp(x, *prev(it2, 1)); --it2)
*it2 = move(*prev(it2, 1));
@@ -68,7 +67,83 @@ namespace BAN::sort
}
}
template<typename It, typename Comp = less<it_value_type_t<It>>>
namespace detail
{
template<typename It, typename Comp>
void push_heap(It begin, size_t hole_index, size_t top_index, typename It::value_type value, Comp comp)
{
size_t parent = (hole_index - 1) / 2;
while (hole_index > top_index && comp(*next(begin, parent), value))
{
*next(begin, hole_index) = move(*next(begin, parent));
hole_index = parent;
parent = (hole_index - 1) / 2;
}
*next(begin, hole_index) = move(value);
}
template<typename It, typename Comp>
void adjust_heap(It begin, size_t hole_index, size_t len, typename It::value_type value, Comp comp)
{
const size_t top_index = hole_index;
size_t child = hole_index;
while (child < (len - 1) / 2)
{
child = 2 * (child + 1);
if (comp(*next(begin, child), *next(begin, child - 1)))
child--;
*next(begin, hole_index) = move(*next(begin, child));
hole_index = child;
}
if (len % 2 == 0 && child == (len - 2) / 2)
{
child = 2 * (child + 1);
*next(begin, hole_index) = move(*next(begin, child - 1));
hole_index = child - 1;
}
push_heap(begin, hole_index, top_index, move(value), comp);
}
}
template<typename It, typename Comp = less<typename It::value_type>>
void make_heap(It begin, It end, Comp comp = {})
{
const size_t len = distance(begin, end);
if (len <= 1)
return;
size_t parent = (len - 2) / 2;
while (true)
{
detail::adjust_heap(begin, parent, len, move(*next(begin, parent)), comp);
if (parent == 0)
break;
parent--;
}
}
template<typename It, typename Comp = less<typename It::value_type>>
void sort_heap(It begin, It end, Comp comp = {})
{
const size_t len = distance(begin, end);
if (len <= 1)
return;
size_t last = len;
while (last > 1)
{
last--;
typename It::value_type x = move(*next(begin, last));
*next(begin, last) = move(*begin);
detail::adjust_heap(begin, 0, last, move(x), comp);
}
}
template<typename It, typename Comp = less<typename It::value_type>>
void heap_sort(It begin, It end, Comp comp = {})
{
make_heap(begin, end, comp);
@@ -92,7 +167,7 @@ namespace BAN::sort
}
template<typename It, typename Comp = less<it_value_type_t<It>>>
template<typename It, typename Comp = less<typename It::value_type>>
void intro_sort(It begin, It end, Comp comp = {})
{
const size_t len = distance(begin, end);
@@ -115,10 +190,10 @@ namespace BAN::sort
}
template<typename It, size_t radix = 256>
requires is_unsigned_v<it_value_type_t<It>> && (radix > 0 && (radix & (radix - 1)) == 0)
requires is_unsigned_v<typename It::value_type> && (radix > 0 && (radix & (radix - 1)) == 0)
BAN::ErrorOr<void> radix_sort(It begin, It end)
{
using value_type = it_value_type_t<It>;
using value_type = typename It::value_type;
const size_t len = distance(begin, end);
if (len <= 1)
@@ -156,7 +231,7 @@ namespace BAN::sort
return {};
}
template<typename It, typename Comp = less<it_value_type_t<It>>>
template<typename It, typename Comp = less<typename It::value_type>>
void sort(It begin, It end, Comp comp = {})
{
return intro_sort(begin, end, comp);

View File

@@ -69,6 +69,7 @@ namespace BAN
value_type* data() const
{
ASSERT(m_data);
return m_data;
}
@@ -83,6 +84,7 @@ namespace BAN
Span slice(size_type start, size_type length = ~size_type(0)) const
{
ASSERT(m_data);
ASSERT(start <= m_size);
if (length == ~size_type(0))
length = m_size - start;

View File

@@ -14,7 +14,6 @@ namespace BAN
{
public:
using size_type = size_t;
using value_type = char;
using iterator = IteratorSimple<char, String>;
using const_iterator = ConstIteratorSimple<char, String>;
static constexpr size_type sso_capacity = 15;
@@ -353,9 +352,10 @@ namespace BAN::Formatter
{
template<typename F>
void print_argument(F putc, const String& string, const ValueFormat& format)
void print_argument(F putc, const String& string, const ValueFormat&)
{
print_argument(putc, string.sv(), format);
for (String::size_type i = 0; i < string.size(); i++)
putc(string[i]);
}
}

View File

@@ -14,7 +14,6 @@ namespace BAN
{
public:
using size_type = size_t;
using value_type = char;
using const_iterator = ConstIteratorSimple<char, StringView>;
public:
@@ -247,12 +246,10 @@ namespace BAN::Formatter
{
template<typename F>
void print_argument(F putc, const StringView& sv, const ValueFormat& format)
void print_argument(F putc, const StringView& sv, const ValueFormat&)
{
for (StringView::size_type i = 0; i < sv.size(); i++)
putc(sv[i]);
for (int i = sv.size(); i < format.fill; i++)
putc(' ');
}
}

View File

@@ -61,9 +61,6 @@ namespace BAN
template<typename T> struct is_move_constructible { static constexpr bool value = is_constructible_v<T, T&&>; };
template<typename T> inline constexpr bool is_move_constructible_v = is_move_constructible<T>::value;
template<typename T> struct is_trivially_copyable { static constexpr bool value = __is_trivially_copyable(T); };
template<typename T> inline constexpr bool is_trivially_copyable_v = is_trivially_copyable<T>::value;
template<typename T> struct is_integral { static constexpr bool value = requires (T t, T* p, void (*f)(T)) { reinterpret_cast<T>(t); f(0); p + t; }; };
template<typename T> inline constexpr bool is_integral_v = is_integral<T>::value;
template<typename T> concept integral = is_integral_v<T>;
@@ -142,10 +139,6 @@ namespace BAN
template<typename T> using make_signed_t = typename make_signed<T>::type;
#undef __BAN_TRAITS_MAKE_SIGNED_CV
template<typename T> struct it_value_type { using value_type = T::value_type; };
template<typename T> struct it_value_type<T*> { using value_type = T; };
template<typename T> using it_value_type_t = typename it_value_type<T>::value_type;
template<typename T> struct less { constexpr bool operator()(const T& lhs, const T& rhs) const { return lhs < rhs; } };
template<typename T> struct equal { constexpr bool operator()(const T& lhs, const T& rhs) const { return lhs == rhs; } };
template<typename T> struct greater { constexpr bool operator()(const T& lhs, const T& rhs) const { return lhs > rhs; } };

View File

@@ -1,7 +1,6 @@
#pragma once
#include <BAN/Errors.h>
#include <BAN/Hash.h>
#include <BAN/NoCopyMove.h>
namespace BAN
@@ -54,12 +53,32 @@ namespace BAN
return *this;
}
T* ptr() const { return m_pointer; }
T& operator*() const { ASSERT(!empty()); return *ptr(); }
T* operator->() const { ASSERT(!empty()); return ptr(); }
T& operator*()
{
ASSERT(m_pointer);
return *m_pointer;
}
bool empty() const { return m_pointer == nullptr; }
explicit operator bool() const { return m_pointer; }
const T& operator*() const
{
ASSERT(m_pointer);
return *m_pointer;
}
T* operator->()
{
ASSERT(m_pointer);
return m_pointer;
}
const T* operator->() const
{
ASSERT(m_pointer);
return m_pointer;
}
T* ptr() { return m_pointer; }
const T* ptr() const { return m_pointer; }
void clear()
{
@@ -68,6 +87,8 @@ namespace BAN
m_pointer = nullptr;
}
operator bool() const { return m_pointer != nullptr; }
private:
T* m_pointer = nullptr;
@@ -75,13 +96,4 @@ namespace BAN
friend class UniqPtr;
};
template<typename T>
struct hash<UniqPtr<T>>
{
constexpr hash_t operator()(const UniqPtr<T>& ptr) const
{
return hash<T*>()(ptr.ptr());
}
};
}

View File

@@ -126,16 +126,14 @@ namespace BAN
Variant(Variant&& other)
: m_index(other.m_index)
{
if (other.has_value())
detail::move_construct<Ts...>(other.m_index, other.m_storage, m_storage);
detail::move_construct<Ts...>(other.m_index, other.m_storage, m_storage);
other.clear();
}
Variant(const Variant& other)
: m_index(other.m_index)
{
if (other.has_value())
detail::copy_construct<Ts...>(other.m_index, other.m_storage, m_storage);
detail::copy_construct<Ts...>(other.m_index, other.m_storage, m_storage);
}
template<typename T>
@@ -159,13 +157,12 @@ namespace BAN
Variant& operator=(Variant&& other)
{
if (m_index == other.m_index && m_index != invalid_index())
if (m_index == other.m_index)
detail::move_assign<Ts...>(m_index, other.m_storage, m_storage);
else
{
clear();
if (other.has_value())
detail::move_construct<Ts...>(other.m_index, other.m_storage, m_storage);
detail::move_construct<Ts...>(other.m_index, other.m_storage, m_storage);
m_index = other.m_index;
}
other.clear();
@@ -174,13 +171,12 @@ namespace BAN
Variant& operator=(const Variant& other)
{
if (m_index == other.m_index && m_index != invalid_index())
if (m_index == other.m_index)
detail::copy_assign<Ts...>(m_index, other.m_storage, m_storage);
else
{
clear();
if (other.has_value())
detail::copy_construct<Ts...>(other.m_index, other.m_storage, m_storage);
detail::copy_construct<Ts...>(other.m_index, other.m_storage, m_storage);
m_index = other.m_index;
}
return *this;

View File

@@ -56,7 +56,7 @@ namespace BAN
bool contains(const T&) const;
Span<T> span() { return Span(m_data, m_size); }
Span<const T> span() const { return Span(m_data, m_size); }
const Span<T> span() const { return Span(m_data, m_size); }
const T& operator[](size_type) const;
T& operator[](size_type);
@@ -381,46 +381,19 @@ namespace BAN
template<typename T>
ErrorOr<void> Vector<T>::ensure_capacity(size_type size)
{
static_assert(alignof(T) <= alignof(max_align_t), "over aligned types not supported");
if (m_capacity >= size)
return {};
const size_type new_cap = BAN::Math::max<size_type>(size, m_capacity * 2);
if constexpr (BAN::is_trivially_copyable_v<T>)
size_type new_cap = BAN::Math::max<size_type>(size, m_capacity * 2);
T* new_data = (T*)BAN::allocator(new_cap * sizeof(T));
if (new_data == nullptr)
return Error::from_errno(ENOMEM);
for (size_type i = 0; i < m_size; i++)
{
if constexpr (BAN::reallocator)
{
auto* new_data = static_cast<T*>(BAN::reallocator(m_data, new_cap * sizeof(T)));
if (new_data == nullptr)
return Error::from_errno(ENOMEM);
m_data = new_data;
}
else
{
auto* new_data = static_cast<T*>(BAN::allocator(new_cap * sizeof(T)));
if (new_data == nullptr)
return Error::from_errno(ENOMEM);
memcpy(new_data, m_data, m_size * sizeof(T));
BAN::deallocator(m_data);
m_data = new_data;
}
new (new_data + i) T(move(m_data[i]));
m_data[i].~T();
}
else
{
auto* new_data = static_cast<T*>(BAN::allocator(new_cap * sizeof(T)));
if (new_data == nullptr)
return Error::from_errno(ENOMEM);
for (size_type i = 0; i < m_size; i++)
{
new (new_data + i) T(move(m_data[i]));
m_data[i].~T();
}
BAN::deallocator(m_data);
m_data = new_data;
}
BAN::deallocator(m_data);
m_data = new_data;
m_capacity = new_cap;
return {};
}

Binary file not shown.

View File

@@ -5,12 +5,9 @@ set(KERNEL_SOURCES
kernel/ACPI/AML/Node.cpp
kernel/ACPI/AML/OpRegion.cpp
kernel/ACPI/BatterySystem.cpp
kernel/ACPI/EmbeddedController.cpp
kernel/APIC.cpp
kernel/Audio/AC97/Controller.cpp
kernel/Audio/Controller.cpp
kernel/Audio/HDAudio/AudioFunctionGroup.cpp
kernel/Audio/HDAudio/Controller.cpp
kernel/BootInfo.cpp
kernel/CPUID.cpp
kernel/Credentials.cpp
@@ -25,7 +22,6 @@ set(KERNEL_SOURCES
kernel/Epoll.cpp
kernel/Errors.cpp
kernel/FS/DevFS/FileSystem.cpp
kernel/FS/EventFD.cpp
kernel/FS/Ext2/FileSystem.cpp
kernel/FS/Ext2/Inode.cpp
kernel/FS/FAT/FileSystem.cpp
@@ -51,7 +47,6 @@ set(KERNEL_SOURCES
kernel/InterruptController.cpp
kernel/kernel.cpp
kernel/Lock/SpinLock.cpp
kernel/Memory/ByteRingBuffer.cpp
kernel/Memory/DMARegion.cpp
kernel/Memory/FileBackedRegion.cpp
kernel/Memory/Heap.cpp
@@ -112,7 +107,6 @@ set(KERNEL_SOURCES
kernel/USB/Controller.cpp
kernel/USB/Device.cpp
kernel/USB/HID/HIDDriver.cpp
kernel/USB/HID/Joystick.cpp
kernel/USB/HID/Keyboard.cpp
kernel/USB/HID/Mouse.cpp
kernel/USB/Hub/HubDriver.cpp
@@ -139,8 +133,6 @@ if("${BANAN_ARCH}" STREQUAL "x86_64")
arch/x86_64/Signal.S
arch/x86_64/Syscall.S
arch/x86_64/Thread.S
arch/x86_64/User.S
arch/x86_64/Yield.S
)
elseif("${BANAN_ARCH}" STREQUAL "i686")
set(KERNEL_SOURCES
@@ -151,8 +143,6 @@ elseif("${BANAN_ARCH}" STREQUAL "i686")
arch/i686/Signal.S
arch/i686/Syscall.S
arch/i686/Thread.S
arch/i686/User.S
arch/i686/Yield.S
)
else()
message(FATAL_ERROR "unsupported architecure ${BANAN_ARCH}")
@@ -168,13 +158,10 @@ set(BAN_SOURCES
set(KLIBC_SOURCES
klibc/ctype.cpp
klibc/string.cpp
klibc/arch/${BANAN_ARCH}/string.S
)
set(LIBDEFLATE_SOURCE
../userspace/libraries/LibDEFLATE/Compressor.cpp
../userspace/libraries/LibDEFLATE/Decompressor.cpp
../userspace/libraries/LibDEFLATE/HuffmanTree.cpp
# Ehhh don't do this but for now libc uses the same stuff kernel can use
# This won't work after libc starts using sse implemetations tho
../userspace/libraries/LibC/arch/${BANAN_ARCH}/string.S
)
set(LIBFONT_SOURCES
@@ -187,25 +174,18 @@ set(LIBINPUT_SOURCE
../userspace/libraries/LibInput/KeyEvent.cpp
)
set(LIBQR_SOURCE
../userspace/libraries/LibQR/QRCode.cpp
)
set(KERNEL_SOURCES
${KERNEL_SOURCES}
${BAN_SOURCES}
${KLIBC_SOURCES}
${LIBDEFLATE_SOURCE}
${LIBFONT_SOURCES}
${LIBINPUT_SOURCE}
${LIBQR_SOURCE}
)
add_executable(kernel ${KERNEL_SOURCES})
target_compile_definitions(kernel PRIVATE __is_kernel)
target_compile_definitions(kernel PRIVATE __arch=${BANAN_ARCH})
target_compile_definitions(kernel PRIVATE LIBDEFLATE_AVOID_STACK=1)
target_compile_options(kernel PRIVATE
-O2 -g
@@ -259,11 +239,9 @@ add_custom_command(
banan_include_headers(kernel ban)
banan_include_headers(kernel libc)
banan_include_headers(kernel libdeflate)
banan_include_headers(kernel libelf)
banan_include_headers(kernel libfont)
banan_include_headers(kernel libelf)
banan_include_headers(kernel libinput)
banan_include_headers(kernel libqr)
banan_install_headers(kernel)
set_target_properties(kernel PROPERTIES OUTPUT_NAME banan-os.kernel)

View File

@@ -16,18 +16,11 @@ extern uint8_t g_kernel_writable_end[];
extern uint8_t g_userspace_start[];
extern uint8_t g_userspace_end[];
extern uint64_t g_boot_fast_page_pt[];
namespace Kernel
{
SpinLock PageTable::s_fast_page_lock;
constexpr uint64_t s_page_flag_mask = 0x8000000000000FFF;
constexpr uint64_t s_page_addr_mask = ~s_page_flag_mask;
static bool s_is_initialized = false;
static PageTable* s_kernel = nullptr;
static bool s_has_nxe = false;
static bool s_has_pge = false;
@@ -35,28 +28,6 @@ namespace Kernel
static paddr_t s_global_pdpte = 0;
static uint64_t* s_fast_page_pt { nullptr };
static uint64_t* allocate_zeroed_page_aligned_page()
{
void* page = kmalloc(PAGE_SIZE, PAGE_SIZE, true);
ASSERT(page);
memset(page, 0, PAGE_SIZE);
return (uint64_t*)page;
}
template<typename T>
static paddr_t V2P(const T vaddr)
{
return (vaddr_t)vaddr - KERNEL_OFFSET + g_boot_info.kernel_paddr;
}
template<typename T>
static uint64_t* P2V(const T paddr)
{
return reinterpret_cast<uint64_t*>(reinterpret_cast<paddr_t>(paddr) - g_boot_info.kernel_paddr + KERNEL_OFFSET);
}
static inline PageTable::flags_t parse_flags(uint64_t entry)
{
using Flags = PageTable::Flags;
@@ -75,22 +46,31 @@ namespace Kernel
return result;
}
void PageTable::initialize_fast_page()
{
s_fast_page_pt = g_boot_fast_page_pt;
}
static void detect_cpu_features()
void PageTable::initialize_pre_heap()
{
if (CPUID::has_nxe())
s_has_nxe = true;
if (CPUID::has_pge())
s_has_pge = true;
if (CPUID::has_pat())
s_has_pat = true;
ASSERT(s_kernel == nullptr);
s_kernel = new PageTable();
ASSERT(s_kernel);
s_kernel->initialize_kernel();
s_kernel->initial_load();
}
void PageTable::enable_cpu_features()
void PageTable::initialize_post_heap()
{
// NOTE: this is no-op as our 32 bit target does not use hhdm
}
void PageTable::initial_load()
{
if (s_has_nxe)
{
@@ -131,56 +111,8 @@ namespace Kernel
"movl %%eax, %%cr0;"
::: "rax"
);
}
void PageTable::initialize_and_load()
{
detect_cpu_features();
enable_cpu_features();
ASSERT(s_kernel == nullptr);
s_kernel = new PageTable();
ASSERT(s_kernel);
auto* pdpt = allocate_zeroed_page_aligned_page();
ASSERT(pdpt);
s_kernel->m_highest_paging_struct = V2P(pdpt);
s_kernel->map_kernel_memory();
PageTable::with_fast_page(s_kernel->m_highest_paging_struct, [] {
s_global_pdpte = PageTable::fast_page_as_sized<paddr_t>(3);
});
// update fast page pt
{
constexpr vaddr_t vaddr = fast_page();
constexpr uint16_t pdpte = (vaddr >> 30) & 0x1FF;
constexpr uint16_t pde = (vaddr >> 21) & 0x1FF;
const auto get_or_allocate_entry =
[](paddr_t table_paddr, uint16_t entry, uint64_t flags)
{
uint64_t* table = P2V(table_paddr);
if (!(table[entry] & Flags::Present))
{
auto* vaddr = allocate_zeroed_page_aligned_page();
ASSERT(vaddr);
table[entry] = V2P(vaddr);
}
table[entry] |= flags;
return table[entry] & s_page_addr_mask;
};
const paddr_t pdpt = s_kernel->m_highest_paging_struct;
const paddr_t pd = get_or_allocate_entry(pdpt, pdpte, Flags::Present);
s_fast_page_pt = P2V(get_or_allocate_entry(pd, pde, Flags::ReadWrite | Flags::Present));
}
s_kernel->load();
load();
}
PageTable& PageTable::kernel()
@@ -194,12 +126,40 @@ namespace Kernel
return true;
}
void PageTable::map_kernel_memory()
static uint64_t* allocate_zeroed_page_aligned_page()
{
void* page = kmalloc(PAGE_SIZE, PAGE_SIZE, true);
ASSERT(page);
memset(page, 0, PAGE_SIZE);
return (uint64_t*)page;
}
template<typename T>
static paddr_t V2P(const T vaddr)
{
return (vaddr_t)vaddr - KERNEL_OFFSET + g_boot_info.kernel_paddr;
}
template<typename T>
static vaddr_t P2V(const T paddr)
{
return (paddr_t)paddr - g_boot_info.kernel_paddr + KERNEL_OFFSET;
}
void PageTable::initialize_kernel()
{
ASSERT(s_global_pdpte == 0);
s_global_pdpte = V2P(allocate_zeroed_page_aligned_page());
map_kernel_memory();
prepare_fast_page();
// Map (phys_kernel_start -> phys_kernel_end) to (virt_kernel_start -> virt_kernel_end)
ASSERT((vaddr_t)g_kernel_start % PAGE_SIZE == 0);
map_range_at(
V2P(g_kernel_start),
reinterpret_cast<vaddr_t>(g_kernel_start),
(vaddr_t)g_kernel_start,
g_kernel_end - g_kernel_start,
Flags::Present
);
@@ -207,7 +167,7 @@ namespace Kernel
// Map executable kernel memory as executable
map_range_at(
V2P(g_kernel_execute_start),
reinterpret_cast<vaddr_t>(g_kernel_execute_start),
(vaddr_t)g_kernel_execute_start,
g_kernel_execute_end - g_kernel_execute_start,
Flags::Execute | Flags::Present
);
@@ -215,7 +175,7 @@ namespace Kernel
// Map writable kernel memory as writable
map_range_at(
V2P(g_kernel_writable_start),
reinterpret_cast<vaddr_t>(g_kernel_writable_start),
(vaddr_t)g_kernel_writable_start,
g_kernel_writable_end - g_kernel_writable_start,
Flags::ReadWrite | Flags::Present
);
@@ -223,34 +183,65 @@ namespace Kernel
// Map userspace memory
map_range_at(
V2P(g_userspace_start),
reinterpret_cast<vaddr_t>(g_userspace_start),
(vaddr_t)g_userspace_start,
g_userspace_end - g_userspace_start,
Flags::Execute | Flags::UserSupervisor | Flags::Present
);
}
void PageTable::prepare_fast_page()
{
constexpr uint64_t pdpte = (fast_page() >> 30) & 0x1FF;
constexpr uint64_t pde = (fast_page() >> 21) & 0x1FF;
uint64_t* pdpt = reinterpret_cast<uint64_t*>(P2V(m_highest_paging_struct));
ASSERT(pdpt[pdpte] & Flags::Present);
uint64_t* pd = reinterpret_cast<uint64_t*>(P2V(pdpt[pdpte]) & PAGE_ADDR_MASK);
ASSERT(!(pd[pde] & Flags::Present));
pd[pde] = V2P(allocate_zeroed_page_aligned_page()) | Flags::ReadWrite | Flags::Present;
}
void PageTable::map_fast_page(paddr_t paddr)
{
ASSERT(paddr && paddr % PAGE_SIZE == 0);
ASSERT(s_kernel);
ASSERT(paddr);
ASSERT(paddr % PAGE_SIZE == 0);
ASSERT(s_fast_page_pt);
ASSERT(s_fast_page_lock.current_processor_has_lock());
ASSERT(!(*s_fast_page_pt & Flags::Present));
s_fast_page_pt[0] = paddr | Flags::ReadWrite | Flags::Present;
constexpr uint64_t pdpte = (fast_page() >> 30) & 0x1FF;
constexpr uint64_t pde = (fast_page() >> 21) & 0x1FF;
constexpr uint64_t pte = (fast_page() >> 12) & 0x1FF;
asm volatile("invlpg (%0)" :: "r"(fast_page()));
uint64_t* pdpt = reinterpret_cast<uint64_t*>(P2V(s_kernel->m_highest_paging_struct));
uint64_t* pd = reinterpret_cast<uint64_t*>(P2V(pdpt[pdpte] & PAGE_ADDR_MASK));
uint64_t* pt = reinterpret_cast<uint64_t*>(P2V(pd[pde] & PAGE_ADDR_MASK));
ASSERT(!(pt[pte] & Flags::Present));
pt[pte] = paddr | Flags::ReadWrite | Flags::Present;
invalidate(fast_page(), false);
}
void PageTable::unmap_fast_page()
{
ASSERT(s_fast_page_pt);
ASSERT(s_kernel);
ASSERT(s_fast_page_lock.current_processor_has_lock());
ASSERT((*s_fast_page_pt & Flags::Present));
s_fast_page_pt[0] = 0;
constexpr uint64_t pdpte = (fast_page() >> 30) & 0x1FF;
constexpr uint64_t pde = (fast_page() >> 21) & 0x1FF;
constexpr uint64_t pte = (fast_page() >> 12) & 0x1FF;
asm volatile("invlpg (%0)" :: "r"(fast_page()));
uint64_t* pdpt = reinterpret_cast<uint64_t*>(P2V(s_kernel->m_highest_paging_struct));
uint64_t* pd = reinterpret_cast<uint64_t*>(P2V(pdpt[pdpte] & PAGE_ADDR_MASK));
uint64_t* pt = reinterpret_cast<uint64_t*>(P2V(pd[pde] & PAGE_ADDR_MASK));
ASSERT(pt[pte] & Flags::Present);
pt[pte] = 0;
invalidate(fast_page(), false);
}
BAN::ErrorOr<PageTable*> PageTable::create_userspace()
@@ -259,23 +250,25 @@ namespace Kernel
PageTable* page_table = new PageTable;
if (page_table == nullptr)
return BAN::Error::from_errno(ENOMEM);
page_table->map_kernel_memory();
return page_table;
}
uint64_t* pdpt = allocate_zeroed_page_aligned_page();
if (pdpt == nullptr)
{
delete page_table;
return BAN::Error::from_errno(ENOMEM);
}
void PageTable::map_kernel_memory()
{
ASSERT(s_kernel);
ASSERT(s_global_pdpte);
page_table->m_highest_paging_struct = V2P(pdpt);
ASSERT(m_highest_paging_struct == 0);
m_highest_paging_struct = V2P(kmalloc(32, 32, true));
ASSERT(m_highest_paging_struct);
uint64_t* pdpt = reinterpret_cast<uint64_t*>(P2V(m_highest_paging_struct));
pdpt[0] = 0;
pdpt[1] = 0;
pdpt[2] = 0;
pdpt[3] = s_global_pdpte | Flags::Present;
static_assert(KERNEL_OFFSET == 0xC0000000);
return page_table;
}
PageTable::~PageTable()
@@ -283,17 +276,18 @@ namespace Kernel
if (m_highest_paging_struct == 0)
return;
uint64_t* pdpt = P2V(m_highest_paging_struct);
uint64_t* pdpt = reinterpret_cast<uint64_t*>(P2V(m_highest_paging_struct));
for (uint32_t pdpte = 0; pdpte < 3; pdpte++)
{
if (!(pdpt[pdpte] & Flags::Present))
continue;
uint64_t* pd = P2V(pdpt[pdpte] & s_page_addr_mask);
uint64_t* pd = reinterpret_cast<uint64_t*>(P2V(pdpt[pdpte] & PAGE_ADDR_MASK));
for (uint32_t pde = 0; pde < 512; pde++)
{
if (!(pd[pde] & Flags::Present))
continue;
kfree(P2V(pd[pde] & s_page_addr_mask));
kfree(reinterpret_cast<uint64_t*>(P2V(pd[pde] & PAGE_ADDR_MASK)));
}
kfree(pd);
}
@@ -304,43 +298,15 @@ namespace Kernel
{
SpinLockGuard _(m_lock);
ASSERT(m_highest_paging_struct < 0x100000000);
asm volatile("movl %0, %%cr3" :: "r"(static_cast<uint32_t>(m_highest_paging_struct)));
const uint32_t pdpt_lo = m_highest_paging_struct;
asm volatile("movl %0, %%cr3" :: "r"(pdpt_lo));
Processor::set_current_page_table(this);
}
void PageTable::invalidate_range(vaddr_t vaddr, size_t pages, bool send_smp_message)
void PageTable::invalidate(vaddr_t vaddr, bool send_smp_message)
{
ASSERT(vaddr % PAGE_SIZE == 0);
const bool is_userspace = (vaddr < KERNEL_OFFSET);
if (is_userspace && this != &PageTable::current())
;
else if (pages <= 32 || !s_is_initialized)
{
for (size_t i = 0; i < pages; i++, vaddr += PAGE_SIZE)
asm volatile("invlpg (%0)" :: "r"(vaddr));
}
else if (is_userspace || !s_has_pge)
{
asm volatile("movl %0, %%cr3" :: "r"(static_cast<uint32_t>(m_highest_paging_struct)));
}
else
{
asm volatile(
"movl %%cr4, %%eax;"
"andl $~0x80, %%eax;"
"movl %%eax, %%cr4;"
"movl %0, %%cr3;"
"orl $0x80, %%eax;"
"movl %%eax, %%cr4;"
:
: "r"(static_cast<uint32_t>(m_highest_paging_struct))
: "eax"
);
}
asm volatile("invlpg (%0)" :: "r"(vaddr) : "memory");
if (send_smp_message)
{
@@ -348,14 +314,13 @@ namespace Kernel
.type = Processor::SMPMessage::Type::FlushTLB,
.flush_tlb = {
.vaddr = vaddr,
.page_count = pages,
.page_table = vaddr < KERNEL_OFFSET ? this : nullptr,
.page_count = 1
}
});
}
}
void PageTable::unmap_page(vaddr_t vaddr, bool invalidate)
void PageTable::unmap_page(vaddr_t vaddr, bool send_smp_message)
{
ASSERT(vaddr);
ASSERT(vaddr % PAGE_SIZE == 0);
@@ -374,16 +339,12 @@ namespace Kernel
if (is_page_free(vaddr))
Kernel::panic("trying to unmap unmapped page 0x{H}", vaddr);
uint64_t* pdpt = P2V(m_highest_paging_struct);
uint64_t* pd = P2V(pdpt[pdpte] & s_page_addr_mask);
uint64_t* pt = P2V(pd[pde] & s_page_addr_mask);
const paddr_t old_paddr = pt[pte] & s_page_addr_mask;
uint64_t* pdpt = reinterpret_cast<uint64_t*>(P2V(m_highest_paging_struct));
uint64_t* pd = reinterpret_cast<uint64_t*>(P2V(pdpt[pdpte] & PAGE_ADDR_MASK));
uint64_t* pt = reinterpret_cast<uint64_t*>(P2V(pd[pde] & PAGE_ADDR_MASK));
pt[pte] = 0;
if (invalidate && old_paddr != 0)
invalidate_page(vaddr, true);
invalidate(vaddr, send_smp_message);
}
void PageTable::unmap_range(vaddr_t vaddr, size_t size)
@@ -395,10 +356,17 @@ namespace Kernel
SpinLockGuard _(m_lock);
for (vaddr_t page = 0; page < page_count; page++)
unmap_page(vaddr + page * PAGE_SIZE, false);
invalidate_range(vaddr, page_count, true);
Processor::broadcast_smp_message({
.type = Processor::SMPMessage::Type::FlushTLB,
.flush_tlb = {
.vaddr = vaddr,
.page_count = page_count
}
});
}
void PageTable::map_page_at(paddr_t paddr, vaddr_t vaddr, flags_t flags, MemoryType memory_type, bool invalidate)
void PageTable::map_page_at(paddr_t paddr, vaddr_t vaddr, flags_t flags, MemoryType memory_type, bool send_smp_message)
{
ASSERT(vaddr);
ASSERT(vaddr != fast_page());
@@ -433,11 +401,11 @@ namespace Kernel
SpinLockGuard _(m_lock);
uint64_t* pdpt = P2V(m_highest_paging_struct);
uint64_t* pdpt = reinterpret_cast<uint64_t*>(P2V(m_highest_paging_struct));
if (!(pdpt[pdpte] & Flags::Present))
pdpt[pdpte] = V2P(allocate_zeroed_page_aligned_page()) | Flags::Present;
uint64_t* pd = P2V(pdpt[pdpte] & s_page_addr_mask);
uint64_t* pd = reinterpret_cast<uint64_t*>(P2V(pdpt[pdpte] & PAGE_ADDR_MASK));
if ((pd[pde] & uwr_flags) != uwr_flags)
{
if (!(pd[pde] & Flags::Present))
@@ -448,14 +416,10 @@ namespace Kernel
if (!(flags & Flags::Present))
uwr_flags &= ~Flags::Present;
uint64_t* pt = P2V(pd[pde] & s_page_addr_mask);
const paddr_t old_paddr = pt[pte] & s_page_addr_mask;
uint64_t* pt = reinterpret_cast<uint64_t*>(P2V(pd[pde] & PAGE_ADDR_MASK));
pt[pte] = paddr | uwr_flags | extra_flags;
if (invalidate && old_paddr != 0)
invalidate_page(vaddr, true);
invalidate(vaddr, send_smp_message);
}
void PageTable::map_range_at(paddr_t paddr, vaddr_t vaddr, size_t size, flags_t flags, MemoryType memory_type)
@@ -469,49 +433,14 @@ namespace Kernel
SpinLockGuard _(m_lock);
for (size_t page = 0; page < page_count; page++)
map_page_at(paddr + page * PAGE_SIZE, vaddr + page * PAGE_SIZE, flags, memory_type, false);
invalidate_range(vaddr, page_count, true);
}
void PageTable::remove_writable_from_range(vaddr_t vaddr, size_t size)
{
ASSERT(vaddr);
ASSERT(vaddr % PAGE_SIZE == 0);
uint32_t pdpte = (vaddr >> 30) & 0x1FF;
uint32_t pde = (vaddr >> 21) & 0x1FF;
uint32_t pte = (vaddr >> 12) & 0x1FF;
const uint32_t e_pdpte = ((vaddr + size - 1) >> 30) & 0x1FF;
const uint32_t e_pde = ((vaddr + size - 1) >> 21) & 0x1FF;
const uint32_t e_pte = ((vaddr + size - 1) >> 12) & 0x1FF;
SpinLockGuard _(m_lock);
const uint64_t* pdpt = P2V(m_highest_paging_struct);
for (; pdpte <= e_pdpte; pdpte++)
{
if (!(pdpt[pdpte] & Flags::Present))
continue;
const uint64_t* pd = P2V(pdpt[pdpte] & s_page_addr_mask);
for (; pde < 512; pde++)
{
if (pdpte == e_pdpte && pde > e_pde)
break;
if (!(pd[pde] & Flags::ReadWrite))
continue;
uint64_t* pt = P2V(pd[pde] & s_page_addr_mask);
for (; pte < 512; pte++)
{
if (pdpte == e_pdpte && pde == e_pde && pte > e_pte)
break;
pt[pte] &= ~static_cast<uint64_t>(Flags::ReadWrite);
}
pte = 0;
Processor::broadcast_smp_message({
.type = Processor::SMPMessage::Type::FlushTLB,
.flush_tlb = {
.vaddr = vaddr,
.page_count = page_count
}
pde = 0;
}
invalidate_range(vaddr, size / PAGE_SIZE, true);
});
}
uint64_t PageTable::get_page_data(vaddr_t vaddr) const
@@ -524,15 +453,15 @@ namespace Kernel
SpinLockGuard _(m_lock);
const uint64_t* pdpt = P2V(m_highest_paging_struct);
uint64_t* pdpt = (uint64_t*)P2V(m_highest_paging_struct);
if (!(pdpt[pdpte] & Flags::Present))
return 0;
const uint64_t* pd = P2V(pdpt[pdpte] & s_page_addr_mask);
uint64_t* pd = (uint64_t*)P2V(pdpt[pdpte] & PAGE_ADDR_MASK);
if (!(pd[pde] & Flags::Present))
return 0;
const uint64_t* pt = P2V(pd[pde] & s_page_addr_mask);
uint64_t* pt = (uint64_t*)P2V(pd[pde] & PAGE_ADDR_MASK);
if (!(pt[pte] & Flags::Used))
return 0;
@@ -546,7 +475,8 @@ namespace Kernel
paddr_t PageTable::physical_address_of(vaddr_t vaddr) const
{
return get_page_data(vaddr) & s_page_addr_mask;
uint64_t page_data = get_page_data(vaddr);
return (page_data & PAGE_ADDR_MASK) & ~(1ull << 63);
}
bool PageTable::is_page_free(vaddr_t vaddr) const
@@ -567,13 +497,13 @@ namespace Kernel
return true;
}
bool PageTable::reserve_page(vaddr_t vaddr, bool only_free, bool send_smp_message)
bool PageTable::reserve_page(vaddr_t vaddr, bool only_free)
{
SpinLockGuard _(m_lock);
ASSERT(vaddr % PAGE_SIZE == 0);
if (only_free && !is_page_free(vaddr))
return false;
map_page_at(0, vaddr, Flags::Reserved, MemoryType::Normal, send_smp_message);
map_page_at(0, vaddr, Flags::Reserved);
return true;
}
@@ -587,9 +517,7 @@ namespace Kernel
if (only_free && !is_range_free(vaddr, bytes))
return false;
for (size_t offset = 0; offset < bytes; offset += PAGE_SIZE)
reserve_page(vaddr + offset, true, false);
invalidate_range(vaddr, bytes / PAGE_SIZE, true);
reserve_page(vaddr + offset);
return true;
}
@@ -602,47 +530,48 @@ namespace Kernel
if (size_t rem = last_address % PAGE_SIZE)
last_address -= rem;
uint32_t pdpte = (first_address >> 30) & 0x1FF;
uint32_t pde = (first_address >> 21) & 0x1FF;
uint32_t pte = (first_address >> 12) & 0x1FF;
const uint32_t s_pdpte = (first_address >> 30) & 0x1FF;
const uint32_t s_pde = (first_address >> 21) & 0x1FF;
const uint32_t s_pte = (first_address >> 12) & 0x1FF;
const uint32_t e_pdpte = ((last_address - 1) >> 30) & 0x1FF;
const uint32_t e_pde = ((last_address - 1) >> 21) & 0x1FF;
const uint32_t e_pte = ((last_address - 1) >> 12) & 0x1FF;
const uint32_t e_pdpte = (last_address >> 30) & 0x1FF;
const uint32_t e_pde = (last_address >> 21) & 0x1FF;
const uint32_t e_pte = (last_address >> 12) & 0x1FF;
SpinLockGuard _(m_lock);
// Try to find free page that can be mapped without
// allocations (page table with unused entries)
const uint64_t* pdpt = P2V(m_highest_paging_struct);
for (; pdpte <= e_pdpte; pdpte++)
uint64_t* pdpt = reinterpret_cast<uint64_t*>(P2V(m_highest_paging_struct));
for (uint32_t pdpte = s_pdpte; pdpte < 4; pdpte++)
{
if (pdpte > e_pdpte)
break;
if (!(pdpt[pdpte] & Flags::Present))
continue;
const uint64_t* pd = P2V(pdpt[pdpte] & s_page_addr_mask);
for (; pde < 512; pde++)
uint64_t* pd = reinterpret_cast<uint64_t*>(P2V(pdpt[pdpte] & PAGE_ADDR_MASK));
for (uint32_t pde = s_pde; pde < 512; pde++)
{
if (pdpte == e_pdpte && pde > e_pde)
break;
if (!(pd[pde] & Flags::Present))
continue;
const uint64_t* pt = P2V(pd[pde] & s_page_addr_mask);
for (; pte < 512; pte++)
uint64_t* pt = (uint64_t*)P2V(pd[pde] & PAGE_ADDR_MASK);
for (uint32_t pte = s_pte; pte < 512; pte++)
{
if (pdpte == e_pdpte && pde == e_pde && pte > e_pte)
if (pdpte == e_pdpte && pde == e_pde && pte >= e_pte)
break;
if (pt[pte] & Flags::Used)
continue;
vaddr_t vaddr = 0;
vaddr |= (vaddr_t)pdpte << 30;
vaddr |= (vaddr_t)pde << 21;
vaddr |= (vaddr_t)pte << 12;
ASSERT(reserve_page(vaddr));
return vaddr;
if (!(pt[pte] & Flags::Used))
{
vaddr_t vaddr = 0;
vaddr |= (vaddr_t)pdpte << 30;
vaddr |= (vaddr_t)pde << 21;
vaddr |= (vaddr_t)pte << 12;
ASSERT(reserve_page(vaddr));
return vaddr;
}
}
pte = 0;
}
pde = 0;
}
// Find any free page
@@ -655,7 +584,7 @@ namespace Kernel
}
}
return 0;
ASSERT_NOT_REACHED();
}
vaddr_t PageTable::reserve_free_contiguous_pages(size_t page_count, vaddr_t first_address, vaddr_t last_address)
@@ -688,7 +617,7 @@ namespace Kernel
}
}
return 0;
ASSERT_NOT_REACHED();
}
static void dump_range(vaddr_t start, vaddr_t end, PageTable::flags_t flags)
@@ -711,7 +640,7 @@ namespace Kernel
flags_t flags = 0;
vaddr_t start = 0;
const uint64_t* pdpt = P2V(m_highest_paging_struct);
uint64_t* pdpt = reinterpret_cast<uint64_t*>(P2V(m_highest_paging_struct));
for (uint32_t pdpte = 0; pdpte < 4; pdpte++)
{
if (!(pdpt[pdpte] & Flags::Present))
@@ -720,7 +649,7 @@ namespace Kernel
start = 0;
continue;
}
const uint64_t* pd = P2V(pdpt[pdpte] & s_page_addr_mask);
uint64_t* pd = (uint64_t*)P2V(pdpt[pdpte] & PAGE_ADDR_MASK);
for (uint64_t pde = 0; pde < 512; pde++)
{
if (!(pd[pde] & Flags::Present))
@@ -729,7 +658,7 @@ namespace Kernel
start = 0;
continue;
}
const uint64_t* pt = P2V(pd[pde] & s_page_addr_mask);
uint64_t* pt = (uint64_t*)P2V(pd[pde] & PAGE_ADDR_MASK);
for (uint64_t pte = 0; pte < 512; pte++)
{
if (parse_flags(pt[pte]) != flags)

View File

@@ -1,41 +1,16 @@
.section .userspace, "ax"
// stack contains
// (4 bytes) return address (on return stack)
// (4 bytes) return stack
// (4 bytes) return rflags
// (8 bytes) restore sigmask
// (36 bytes) siginfo_t
// (4 bytes) signal number
// (4 bytes) signal handler
// return address
// signal number
// signal handler
.global signal_trampoline
signal_trampoline:
pushl %esi // gregs
pushl %edi
pushl %edx
pushl %ecx
pushl %ebx
pushl %eax
pushl %ebp
pusha
movl 84(%esp), %eax
pushl %eax; addl $4, (%esp)
pushl (%eax)
// FIXME: populate these
xorl %eax, %eax
pushl %eax // stack
pushl %eax
pushl %eax
pushl %eax // sigset
pushl %eax
pushl %eax // link
movl %esp, %edx // ucontext
leal 68(%esp), %esi // siginfo
movl 64(%esp), %edi // signal number
movl 60(%esp), %eax // handlers
movl 40(%esp), %edi
movl 36(%esp), %eax
// align stack to 16 bytes
movl %esp, %ebp
@@ -44,9 +19,7 @@ signal_trampoline:
subl $512, %esp
fxsave (%esp)
subl $4, %esp
pushl %edx
pushl %esi
subl $12, %esp
pushl %edi
call *%eax
addl $16, %esp
@@ -56,31 +29,9 @@ signal_trampoline:
// restore stack
movl %ebp, %esp
addl $24, %esp
popa
// restore sigmask
movl $83, %eax // SYS_SIGPROCMASK
movl $3, %ebx // SIG_SETMASK
leal 72(%esp), %ecx // set
xorl %edx, %edx // oset
int $0xF0
// restore registers
addl $8, %esp
popl %ebp
popl %eax
popl %ebx
popl %ecx
popl %edx
popl %edi
popl %esi
// skip handler, number, siginfo_t, sigmask
addl $52, %esp
// restore flags
popf
movl (%esp), %esp
ret

View File

@@ -1,6 +1,12 @@
// arguments in EAX, EBX, ECX, EDX, ESI, EDI
.global asm_syscall_handler
asm_syscall_handler:
# save segment registers
pushw %ds
pushw %es
pushw %fs
pushw %gs
# save general purpose registers
pushl %ebx
pushl %ecx
@@ -12,10 +18,13 @@ asm_syscall_handler:
# align stack
movl %esp, %ebp
andl $-16, %esp
subl $15, %esp
andl $0xFFFFFFF0, %esp
# push arguments
subl $8, %esp
subl $4, %esp
pushl %ebp
addl $32, (%esp)
pushl %edi
pushl %esi
pushl %edx
@@ -35,15 +44,6 @@ asm_syscall_handler:
movl %ebp, %esp
# restore userspace segments
movw $(0x20 | 3), %bx
movw %bx, %ds
movw %bx, %es
movw $(0x30 | 3), %bx
movw %bx, %fs
movw $(0x38 | 3), %bx
movw %bx, %gs
# restore general purpose registers
popl %ebp
popl %esi
@@ -52,6 +52,12 @@ asm_syscall_handler:
popl %ecx
popl %ebx
# restore segment registers
popw %gs
popw %fs
popw %es
popw %ds
iret
.global sys_fork_trampoline
@@ -63,7 +69,7 @@ sys_fork_trampoline:
call read_ip
testl %eax, %eax
jz .done
jz .reload_stack
movl %esp, %ebx
@@ -79,3 +85,9 @@ sys_fork_trampoline:
popl %ebx
popl %ebp
ret
.reload_stack:
call get_thread_start_sp
movl %eax, %esp
xorl %eax, %eax
jmp .done

View File

@@ -7,6 +7,9 @@ read_ip:
# void start_kernel_thread()
.global start_kernel_thread
start_kernel_thread:
call get_thread_start_sp
movl %eax, %esp
# STACK LAYOUT
# on_exit arg
# on_exit func
@@ -28,15 +31,25 @@ start_kernel_thread:
subl $12, %esp
pushl %edi
call *%esi
addl $16, %esp
.global start_userspace_thread
start_userspace_thread:
call load_thread_sse
call get_thread_start_sp
movl %eax, %esp
# ds, es = user data
movw $(0x20 | 3), %bx
movw %bx, %ds
movw %bx, %es
# gs = thread local
movw $(0x30 | 3), %bx
movw %bx, %fs
movw $(0x38 | 3), %bx
movw %bx, %gs
# fs = 0
xorw %bx, %bx
movw %bx, %fs
iret

View File

@@ -1,54 +0,0 @@
# bool safe_user_memcpy(void*, const void*, size_t)
.global safe_user_memcpy
.global safe_user_memcpy_end
.global safe_user_memcpy_fault
safe_user_memcpy:
xorl %eax, %eax
xchgl 4(%esp), %edi
xchgl 8(%esp), %esi
movl 12(%esp), %ecx
movl %edi, %edx
rep movsb
movl 4(%esp), %edi
movl 8(%esp), %esi
incl %eax
safe_user_memcpy_fault:
ret
safe_user_memcpy_end:
# bool safe_user_strncpy(void*, const void*, size_t)
.global safe_user_strncpy
.global safe_user_strncpy_end
.global safe_user_strncpy_fault
safe_user_strncpy:
xchgl 4(%esp), %edi
xchgl 8(%esp), %esi
movl 12(%esp), %ecx
testl %ecx, %ecx
jz safe_user_strncpy_fault
.safe_user_strncpy_loop:
movb (%esi), %al
movb %al, (%edi)
testb %al, %al
jz .safe_user_strncpy_done
incl %edi
incl %esi
decl %ecx
jnz .safe_user_strncpy_loop
safe_user_strncpy_fault:
xorl %eax, %eax
jmp .safe_user_strncpy_return
.safe_user_strncpy_done:
movl $1, %eax
.safe_user_strncpy_return:
movl 4(%esp), %edi
movl 8(%esp), %esi
ret
safe_user_strncpy_end:

View File

@@ -1,25 +0,0 @@
.global asm_yield_trampoline
asm_yield_trampoline:
leal 4(%esp), %ecx
movl 4(%esp), %esp
pushl -4(%ecx)
pushl %ecx
pushl %eax
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
pushl %esp
call scheduler_on_yield
addl $4, %esp
popl %ebp
popl %edi
popl %esi
popl %ebx
popl %eax
movl 4(%esp), %ecx
movl 0(%esp), %esp
jmp *%ecx

View File

@@ -11,28 +11,9 @@
.code32
// video mode info, page align modules
.set multiboot_flags, (1 << 2) | (1 << 0)
# multiboot2 header
.section .multiboot, "aw"
multiboot_start:
.long 0x1BADB002
.long multiboot_flags
.long -(0x1BADB002 + multiboot_flags)
.long 0
.long 0
.long 0
.long 0
.long 0
.long 0
.long FB_WIDTH
.long FB_HEIGHT
.long FB_BPP
multiboot_end:
.section .multiboot2, "aw"
.align 8
multiboot2_start:
.long 0xE85250D6
.long 0
@@ -68,6 +49,7 @@ multiboot2_start:
multiboot2_end:
.section .bananboot, "aw"
.align 8
bananboot_start:
.long 0xBABAB007
.long -(0xBABAB007 + FB_WIDTH + FB_HEIGHT + FB_BPP)
@@ -98,7 +80,8 @@ bananboot_end:
boot_pdpt:
.long V2P(boot_pd) + (PG_PRESENT)
.long 0
.skip 2 * 8
.quad 0
.quad 0
.long V2P(boot_pd) + (PG_PRESENT)
.long 0
.align 4096
@@ -111,16 +94,13 @@ boot_pd:
.endr
boot_pts:
.set i, 0
.rept 511
.rept 512
.rept 512
.long i + (PG_READ_WRITE | PG_PRESENT)
.long 0
.set i, i + 0x1000
.endr
.endr
.global g_boot_fast_page_pt
g_boot_fast_page_pt:
.skip 512 * 8
boot_gdt:
.quad 0x0000000000000000 # null descriptor
@@ -187,13 +167,6 @@ enable_sse:
movl %eax, %cr4
ret
enable_tsc:
# allow userspace to use RDTSC
movl %cr4, %ecx
andl $0xFFFFFFFB, %ecx
movl %ecx, %cr4
ret
initialize_paging:
# enable PAE
movl %cr4, %ecx
@@ -236,7 +209,6 @@ gdt_flush:
# do processor initialization
call check_requirements
call enable_sse
call enable_tsc
call initialize_paging
# load higher half stack pointer
@@ -276,7 +248,7 @@ system_halt:
jmp 1b
#define AP_REL(vaddr) ((vaddr) - ap_trampoline + 0xF000)
#define AP_V2P(vaddr) ((vaddr) - ap_trampoline + 0xF000)
.section .ap_init, "ax"
@@ -286,27 +258,21 @@ ap_trampoline:
jmp 1f
.align 8
ap_stack_paddr:
.skip 4
ap_stack_vaddr:
.skip 4
ap_prepare_paging:
.skip 4
ap_page_table:
.skip 4
ap_ready:
ap_stack_ptr:
.skip 4
ap_stack_loaded:
.skip 1
1: cli; cld
ljmpl $0x00, $AP_REL(ap_cs_clear)
ljmpl $0x00, $AP_V2P(ap_cs_clear)
ap_cs_clear:
# load ap gdt and enter protected mode
lgdt AP_REL(ap_gdtr)
lgdt AP_V2P(ap_gdtr)
movl %cr0, %eax
orb $1, %al
movl %eax, %cr0
ljmpl $0x08, $AP_REL(ap_protected_mode)
ljmpl $0x08, $AP_V2P(ap_protected_mode)
.code32
ap_protected_mode:
@@ -315,36 +281,32 @@ ap_protected_mode:
movw %ax, %ss
movw %ax, %es
movl AP_REL(ap_stack_paddr), %esp
movl AP_V2P(ap_stack_ptr), %esp
movb $1, AP_V2P(ap_stack_loaded)
leal V2P(enable_sse), %ecx; call *%ecx
leal V2P(enable_tsc), %ecx; call *%ecx
leal V2P(initialize_paging), %ecx; call *%ecx
# load boot gdt and enter long mode
lgdt V2P(boot_gdtr)
ljmpl $0x08, $AP_REL(ap_flush_gdt)
ljmpl $0x08, $AP_V2P(ap_flush_gdt)
ap_flush_gdt:
movl $ap_higher_half, %ecx
# move stack pointer to higher half
movl %esp, %esp
addl $KERNEL_OFFSET, %esp
# jump to higher half
leal ap_higher_half, %ecx
jmp *%ecx
ap_higher_half:
movl AP_REL(ap_prepare_paging), %eax
call *%eax
# load AP's initial values
movl AP_REL(ap_stack_vaddr), %esp
movl AP_REL(ap_page_table), %eax
movl $1, AP_REL(ap_ready)
movl %eax, %cr3
# clear rbp for stacktrace
xorl %ebp, %ebp
1: pause
cmpb $0, g_ap_startup_done
je 1b
jz 1b
lock incb g_ap_running_count

View File

@@ -1,67 +1,66 @@
.macro intr_header, n
.macro push_userspace
pushw %gs
pushw %fs
pushw %es
pushw %ds
pushal
testb $3, \n+8*4(%esp)
jz 1f
.endm
.macro load_kernel_segments
movw $0x10, %ax
movw %ax, %ds
movw %ax, %es
movw %ax, %fs
movw $0x28, %ax
movw %ax, %gs
1: cld
.endm
.macro intr_footer, n
testb $3, \n+8*4(%esp)
jz 1f
call cpp_check_signal
movw $(0x20 | 3), %bx
movw %bx, %ds
movw %bx, %es
movw $(0x30 | 3), %bx
movw %bx, %fs
movw $(0x38 | 3), %bx
movw %bx, %gs
1: popal
.macro pop_userspace
popal
popw %ds
popw %es
popw %fs
popw %gs
.endm
isr_stub:
intr_header 12
push_userspace
load_kernel_segments
cld
movl %cr0, %eax; pushl %eax
movl %cr2, %eax; pushl %eax
movl %cr3, %eax; pushl %eax
movl %cr4, %eax; pushl %eax
movl 48(%esp), %edi // isr number
movl 52(%esp), %esi // error code
leal 56(%esp), %edx // interrupt stack ptr
movl %esp, %ecx // register ptr
# stack frame for stack trace
leal 56(%esp), %eax
pushl (%eax)
pushl %ebp
movl %esp, %eax // register ptr
leal 64(%esp), %ebx // interrupt stack ptr
movl 60(%esp), %ecx // error code
movl 56(%esp), %edx // isr number
movl %esp, %ebp
andl $-16, %esp
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
pushl %esi
pushl %edi
call cpp_isr_handler
movl %ebp, %esp
addl $24, %esp
addl $16, %esp
intr_footer 12
pop_userspace
addl $8, %esp
iret
irq_stub:
intr_header 12
push_userspace
load_kernel_segments
cld
movl 32(%esp), %edi # interrupt number
movl 40(%esp), %edi # interrupt number
movl %esp, %ebp
andl $-16, %esp
@@ -72,13 +71,37 @@ irq_stub:
movl %ebp, %esp
intr_footer 12
pop_userspace
addl $8, %esp
iret
.global asm_yield_handler
asm_yield_handler:
# This can only be called from kernel, so no segment saving is needed
pushal
cld
leal 32(%esp), %edi # interrupt stack ptr
movl %esp, %esi # interrupt registers ptr
movl %esp, %ebp
andl $-16, %esp
subl $8, %esp
pushl %esi
pushl %edi
call cpp_yield_handler
movl %ebp, %esp
popal
iret
.global asm_ipi_handler
asm_ipi_handler:
intr_header 4
push_userspace
load_kernel_segments
cld
movl %esp, %ebp
andl $-16, %esp
@@ -87,12 +110,14 @@ asm_ipi_handler:
movl %ebp, %esp
intr_footer 4
pop_userspace
iret
.global asm_timer_handler
asm_timer_handler:
intr_header 4
push_userspace
load_kernel_segments
cld
movl %esp, %ebp
andl $-16, %esp
@@ -101,7 +126,7 @@ asm_timer_handler:
movl %ebp, %esp
intr_footer 4
pop_userspace
iret
.macro isr n

View File

@@ -11,21 +11,20 @@ SECTIONS
{
g_kernel_execute_start = .;
*(.multiboot)
*(.multiboot2)
*(.bananboot)
*(.text.*)
}
.ap_init ALIGN(4K) : AT(ADDR(.ap_init) - KERNEL_OFFSET)
{
g_ap_init_addr = .;
*(.ap_init)
g_kernel_execute_end = .;
}
.userspace ALIGN(4K) : AT(ADDR(.userspace) - KERNEL_OFFSET)
{
g_userspace_start = .;
*(.userspace)
g_userspace_end = .;
g_kernel_execute_end = .;
}
.ap_init ALIGN(4K) : AT(ADDR(.ap_init) - KERNEL_OFFSET)
{
g_ap_init_addr = .;
*(.ap_init)
}
.rodata ALIGN(4K) : AT(ADDR(.rodata) - KERNEL_OFFSET)
{

View File

@@ -2,6 +2,7 @@
#include <kernel/CPUID.h>
#include <kernel/Lock/SpinLock.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/kmalloc.h>
#include <kernel/Memory/PageTable.h>
extern uint8_t g_kernel_start[];
@@ -16,15 +17,13 @@ extern uint8_t g_kernel_writable_end[];
extern uint8_t g_userspace_start[];
extern uint8_t g_userspace_end[];
extern uint64_t g_boot_fast_page_pt[];
namespace Kernel
{
SpinLock PageTable::s_fast_page_lock;
static constexpr vaddr_t s_hhdm_offset = 0xFFFF800000000000;
static bool s_is_initialized = false;
static bool s_is_hddm_initialized = false;
constexpr uint64_t s_page_flag_mask = 0x8000000000000FFF;
constexpr uint64_t s_page_addr_mask = ~s_page_flag_mask;
@@ -36,8 +35,6 @@ namespace Kernel
static paddr_t s_global_pml4_entries[512] { 0 };
static uint64_t* s_fast_page_pt { nullptr };
static constexpr inline bool is_canonical(uintptr_t addr)
{
constexpr uintptr_t mask = 0xFFFF800000000000;
@@ -57,27 +54,68 @@ namespace Kernel
return addr;
}
static paddr_t allocate_zeroed_page_aligned_page()
struct FuncsKmalloc
{
const paddr_t paddr = Heap::get().take_free_page();
ASSERT(paddr);
memset(reinterpret_cast<void*>(paddr + s_hhdm_offset), 0, PAGE_SIZE);
return paddr;
}
static paddr_t allocate_zeroed_page_aligned_page()
{
void* page = kmalloc(PAGE_SIZE, PAGE_SIZE, true);
ASSERT(page);
memset(page, 0, PAGE_SIZE);
return kmalloc_paddr_of(reinterpret_cast<vaddr_t>(page)).value();
}
static void unallocate_page(paddr_t paddr)
static void unallocate_page(paddr_t paddr)
{
kfree(reinterpret_cast<void*>(kmalloc_vaddr_of(paddr).value()));
}
static paddr_t V2P(vaddr_t vaddr)
{
return vaddr - KERNEL_OFFSET + g_boot_info.kernel_paddr;
}
static uint64_t* P2V(paddr_t paddr)
{
return reinterpret_cast<uint64_t*>(paddr - g_boot_info.kernel_paddr + KERNEL_OFFSET);
}
};
struct FuncsHHDM
{
Heap::get().release_page(paddr);
}
static paddr_t allocate_zeroed_page_aligned_page()
{
const paddr_t paddr = Heap::get().take_free_page();
ASSERT(paddr);
memset(reinterpret_cast<void*>(paddr + s_hhdm_offset), 0, PAGE_SIZE);
return paddr;
}
static uint64_t* P2V(paddr_t paddr)
{
ASSERT(paddr != 0);
ASSERT(!BAN::Math::will_addition_overflow(paddr, s_hhdm_offset));
return reinterpret_cast<uint64_t*>(paddr + s_hhdm_offset);
}
static void unallocate_page(paddr_t paddr)
{
Heap::get().release_page(paddr);
}
static PageTable::flags_t parse_flags(uint64_t entry)
static paddr_t V2P(vaddr_t vaddr)
{
ASSERT(vaddr >= s_hhdm_offset);
ASSERT(vaddr < KERNEL_OFFSET);
return vaddr - s_hhdm_offset;
}
static uint64_t* P2V(paddr_t paddr)
{
ASSERT(paddr != 0);
ASSERT(!BAN::Math::will_addition_overflow(paddr, s_hhdm_offset));
return reinterpret_cast<uint64_t*>(paddr + s_hhdm_offset);
}
};
static paddr_t (*allocate_zeroed_page_aligned_page)() = &FuncsKmalloc::allocate_zeroed_page_aligned_page;
static void (*unallocate_page)(paddr_t) = &FuncsKmalloc::unallocate_page;
static paddr_t (*V2P)(vaddr_t) = &FuncsKmalloc::V2P;
static uint64_t* (*P2V)(paddr_t) = &FuncsKmalloc::P2V;
static inline PageTable::flags_t parse_flags(uint64_t entry)
{
using Flags = PageTable::Flags;
@@ -99,7 +137,7 @@ namespace Kernel
// 0: 4 KiB
// 1: 2 MiB
// 2: 1 GiB
static void map_hhdm_page(paddr_t pml4, paddr_t paddr, uint8_t page_size)
static void init_map_hhdm_page(paddr_t pml4, paddr_t paddr, uint8_t page_size)
{
ASSERT(0 <= page_size && page_size <= 2);
@@ -146,7 +184,7 @@ namespace Kernel
const uint64_t noexec_flag = s_has_nxe ? (static_cast<uint64_t>(1) << 63) : 0;
const paddr_t pdpt = get_or_allocate_entry(pml4, pml4e, noexec_flag);
s_global_pml4_entries[pml4e] = pdpt | hhdm_flags | noexec_flag;
s_global_pml4_entries[pml4e] = pdpt | hhdm_flags;
paddr_t lowest_paddr = pdpt;
uint16_t lowest_entry = pdpte;
@@ -169,11 +207,23 @@ namespace Kernel
});
}
static void initialize_hhdm(paddr_t pml4)
static void init_map_hhdm(paddr_t pml4)
{
for (const auto& entry : g_boot_info.memory_map_entries)
{
if (entry.type != MemoryMapEntry::Type::Available)
bool should_map = false;
switch (entry.type)
{
case MemoryMapEntry::Type::Available:
should_map = true;
break;
case MemoryMapEntry::Type::ACPIReclaim:
case MemoryMapEntry::Type::ACPINVS:
case MemoryMapEntry::Type::Reserved:
should_map = false;
break;
}
if (!should_map)
continue;
constexpr size_t one_gib = 1024 * 1024 * 1024;
@@ -185,39 +235,156 @@ namespace Kernel
{
if (s_has_gib && paddr % one_gib == 0 && paddr + one_gib <= entry_end)
{
map_hhdm_page(pml4, paddr, 2);
init_map_hhdm_page(pml4, paddr, 2);
paddr += one_gib;
}
else if (paddr % two_mib == 0 && paddr + two_mib <= entry_end)
{
map_hhdm_page(pml4, paddr, 1);
init_map_hhdm_page(pml4, paddr, 1);
paddr += two_mib;
}
else
{
map_hhdm_page(pml4, paddr, 0);
init_map_hhdm_page(pml4, paddr, 0);
paddr += PAGE_SIZE;
}
}
}
}
void PageTable::initialize_fast_page()
static paddr_t copy_page_from_kmalloc_to_heap(paddr_t kmalloc_paddr)
{
s_fast_page_pt = g_boot_fast_page_pt;
const paddr_t heap_paddr = Heap::get().take_free_page();
ASSERT(heap_paddr);
const vaddr_t kmalloc_vaddr = kmalloc_vaddr_of(kmalloc_paddr).value();
PageTable::with_fast_page(heap_paddr, [kmalloc_vaddr] {
memcpy(PageTable::fast_page_as_ptr(), reinterpret_cast<void*>(kmalloc_vaddr), PAGE_SIZE);
});
return heap_paddr;
}
static void detect_cpu_features()
static void copy_paging_structure_to_heap(uint64_t* old_table, uint64_t* new_table, int depth)
{
if (depth == 0)
return;
constexpr uint64_t page_flag_mask = 0x8000000000000FFF;
constexpr uint64_t page_addr_mask = ~page_flag_mask;
for (uint16_t index = 0; index < 512; index++)
{
const uint64_t old_entry = old_table[index];
if (old_entry == 0)
{
new_table[index] = 0;
continue;
}
const paddr_t old_paddr = old_entry & page_addr_mask;
const paddr_t new_paddr = copy_page_from_kmalloc_to_heap(old_paddr);
new_table[index] = new_paddr | (old_entry & page_flag_mask);
uint64_t* next_old_table = reinterpret_cast<uint64_t*>(old_paddr + s_hhdm_offset);
uint64_t* next_new_table = reinterpret_cast<uint64_t*>(new_paddr + s_hhdm_offset);
copy_paging_structure_to_heap(next_old_table, next_new_table, depth - 1);
}
}
static void free_kmalloc_paging_structure(uint64_t* table, int depth)
{
if (depth == 0)
return;
constexpr uint64_t page_flag_mask = 0x8000000000000FFF;
constexpr uint64_t page_addr_mask = ~page_flag_mask;
for (uint16_t index = 0; index < 512; index++)
{
const uint64_t entry = table[index];
if (entry == 0)
continue;
const paddr_t paddr = entry & page_addr_mask;
uint64_t* next_table = reinterpret_cast<uint64_t*>(paddr + s_hhdm_offset);
free_kmalloc_paging_structure(next_table, depth - 1);
kfree(reinterpret_cast<void*>(kmalloc_vaddr_of(paddr).value()));
}
}
void PageTable::initialize_pre_heap()
{
if (CPUID::has_nxe())
s_has_nxe = true;
if (CPUID::has_pge())
s_has_pge = true;
if (CPUID::has_1gib_pages())
s_has_gib = true;
ASSERT(s_kernel == nullptr);
s_kernel = new PageTable();
ASSERT(s_kernel);
s_kernel->m_highest_paging_struct = allocate_zeroed_page_aligned_page();
s_kernel->prepare_fast_page();
s_kernel->initialize_kernel();
for (auto pml4e : s_global_pml4_entries)
ASSERT(pml4e == 0);
const uint64_t* pml4 = P2V(s_kernel->m_highest_paging_struct);
s_global_pml4_entries[511] = pml4[511];
}
void PageTable::enable_cpu_features()
void PageTable::initialize_post_heap()
{
ASSERT(s_kernel);
init_map_hhdm(s_kernel->m_highest_paging_struct);
const paddr_t old_pml4_paddr = s_kernel->m_highest_paging_struct;
const paddr_t new_pml4_paddr = copy_page_from_kmalloc_to_heap(old_pml4_paddr);
uint64_t* old_pml4 = reinterpret_cast<uint64_t*>(kmalloc_vaddr_of(old_pml4_paddr).value());
uint64_t* new_pml4 = reinterpret_cast<uint64_t*>(new_pml4_paddr + s_hhdm_offset);
const paddr_t old_pdpt_paddr = old_pml4[511] & s_page_addr_mask;
const paddr_t new_pdpt_paddr = Heap::get().take_free_page();
ASSERT(new_pdpt_paddr);
uint64_t* old_pdpt = reinterpret_cast<uint64_t*>(old_pdpt_paddr + s_hhdm_offset);
uint64_t* new_pdpt = reinterpret_cast<uint64_t*>(new_pdpt_paddr + s_hhdm_offset);
copy_paging_structure_to_heap(old_pdpt, new_pdpt, 2);
new_pml4[511] = new_pdpt_paddr | (old_pml4[511] & s_page_flag_mask);
s_global_pml4_entries[511] = new_pml4[511];
s_kernel->m_highest_paging_struct = new_pml4_paddr;
s_kernel->load();
free_kmalloc_paging_structure(old_pdpt, 2);
kfree(reinterpret_cast<void*>(kmalloc_vaddr_of(old_pdpt_paddr).value()));
kfree(reinterpret_cast<void*>(kmalloc_vaddr_of(old_pml4_paddr).value()));
allocate_zeroed_page_aligned_page = &FuncsHHDM::allocate_zeroed_page_aligned_page;
unallocate_page = &FuncsHHDM::unallocate_page;
V2P = &FuncsHHDM::V2P;
P2V = &FuncsHHDM::P2V;
s_is_hddm_initialized = true;
// This is a hack to unmap fast page. fast page pt is copied
// while it is mapped, so we need to manually unmap it
SpinLockGuard _(s_fast_page_lock);
unmap_fast_page();
}
void PageTable::initial_load()
{
if (s_has_nxe)
{
@@ -256,63 +423,8 @@ namespace Kernel
"movq %%rax, %%cr0;"
::: "rax"
);
}
void PageTable::initialize_and_load()
{
detect_cpu_features();
enable_cpu_features();
const paddr_t boot_pml4_paddr = ({
paddr_t paddr;
asm volatile("movq %%cr3, %0" : "=r"(paddr));
paddr;
});
initialize_hhdm(boot_pml4_paddr);
ASSERT(s_kernel == nullptr);
s_kernel = new PageTable();
ASSERT(s_kernel != nullptr);
s_kernel->m_highest_paging_struct = allocate_zeroed_page_aligned_page();
ASSERT(s_kernel->m_highest_paging_struct);
uint64_t* pml4 = P2V(s_kernel->m_highest_paging_struct);
memcpy(pml4, s_global_pml4_entries, sizeof(s_global_pml4_entries));
s_kernel->map_kernel_memory();
s_global_pml4_entries[511] = pml4[511];
// update fast page pt
{
constexpr vaddr_t uc_vaddr = uncanonicalize(fast_page());
constexpr uint16_t pml4e = (uc_vaddr >> 39) & 0x1FF;
constexpr uint16_t pdpte = (uc_vaddr >> 30) & 0x1FF;
constexpr uint16_t pde = (uc_vaddr >> 21) & 0x1FF;
const auto get_or_allocate_entry =
[](paddr_t table_paddr, uint16_t entry, uint64_t flags)
{
uint64_t* table = P2V(table_paddr);
if (!(table[entry] & Flags::Present))
{
table[entry] = allocate_zeroed_page_aligned_page();
ASSERT(table[entry]);
}
table[entry] |= flags;
return table[entry] & s_page_addr_mask;
};
const paddr_t pml4 = s_kernel->m_highest_paging_struct;
const paddr_t pdpt = get_or_allocate_entry(pml4, pml4e, Flags::ReadWrite | Flags::Present);
const paddr_t pd = get_or_allocate_entry(pdpt, pdpte, Flags::ReadWrite | Flags::Present);
s_fast_page_pt = P2V(get_or_allocate_entry(pd, pde, Flags::ReadWrite | Flags::Present));
}
s_kernel->load();
load();
}
PageTable& PageTable::kernel()
@@ -328,12 +440,12 @@ namespace Kernel
return true;
}
void PageTable::map_kernel_memory()
void PageTable::initialize_kernel()
{
// Map (phys_kernel_start -> phys_kernel_end) to (virt_kernel_start -> virt_kernel_end)
const vaddr_t kernel_start = reinterpret_cast<vaddr_t>(g_kernel_start);
map_range_at(
kernel_start - KERNEL_OFFSET + g_boot_info.kernel_paddr,
V2P(kernel_start),
kernel_start,
g_kernel_end - g_kernel_start,
Flags::Present
@@ -342,7 +454,7 @@ namespace Kernel
// Map executable kernel memory as executable
const vaddr_t kernel_execute_start = reinterpret_cast<vaddr_t>(g_kernel_execute_start);
map_range_at(
kernel_execute_start - KERNEL_OFFSET + g_boot_info.kernel_paddr,
V2P(kernel_execute_start),
kernel_execute_start,
g_kernel_execute_end - g_kernel_execute_start,
Flags::Execute | Flags::Present
@@ -351,7 +463,7 @@ namespace Kernel
// Map writable kernel memory as writable
const vaddr_t kernel_writable_start = reinterpret_cast<vaddr_t>(g_kernel_writable_start);
map_range_at(
kernel_writable_start - KERNEL_OFFSET + g_boot_info.kernel_paddr,
V2P(kernel_writable_start),
kernel_writable_start,
g_kernel_writable_end - g_kernel_writable_start,
Flags::ReadWrite | Flags::Present
@@ -360,58 +472,109 @@ namespace Kernel
// Map userspace memory
const vaddr_t userspace_start = reinterpret_cast<vaddr_t>(g_userspace_start);
map_range_at(
userspace_start - KERNEL_OFFSET + g_boot_info.kernel_paddr,
V2P(userspace_start),
userspace_start,
g_userspace_end - g_userspace_start,
Flags::Execute | Flags::UserSupervisor | Flags::Present
);
}
void PageTable::prepare_fast_page()
{
constexpr vaddr_t uc_vaddr = uncanonicalize(fast_page());
constexpr uint64_t pml4e = (uc_vaddr >> 39) & 0x1FF;
constexpr uint64_t pdpte = (uc_vaddr >> 30) & 0x1FF;
constexpr uint64_t pde = (uc_vaddr >> 21) & 0x1FF;
uint64_t* pml4 = P2V(m_highest_paging_struct);
ASSERT(!(pml4[pml4e] & Flags::Present));
pml4[pml4e] = allocate_zeroed_page_aligned_page() | Flags::ReadWrite | Flags::Present;
uint64_t* pdpt = P2V(pml4[pml4e] & s_page_addr_mask);
ASSERT(!(pdpt[pdpte] & Flags::Present));
pdpt[pdpte] = allocate_zeroed_page_aligned_page() | Flags::ReadWrite | Flags::Present;
uint64_t* pd = P2V(pdpt[pdpte] & s_page_addr_mask);
ASSERT(!(pd[pde] & Flags::Present));
pd[pde] = allocate_zeroed_page_aligned_page() | Flags::ReadWrite | Flags::Present;
}
void PageTable::map_fast_page(paddr_t paddr)
{
ASSERT(paddr && paddr % PAGE_SIZE == 0);
ASSERT(s_kernel);
ASSERT(paddr);
ASSERT(paddr % PAGE_SIZE == 0);
ASSERT(s_fast_page_pt);
ASSERT(s_fast_page_lock.current_processor_has_lock());
ASSERT(!(*s_fast_page_pt & Flags::Present));
s_fast_page_pt[0] = paddr | Flags::ReadWrite | Flags::Present;
constexpr vaddr_t uc_vaddr = uncanonicalize(fast_page());
constexpr uint64_t pml4e = (uc_vaddr >> 39) & 0x1FF;
constexpr uint64_t pdpte = (uc_vaddr >> 30) & 0x1FF;
constexpr uint64_t pde = (uc_vaddr >> 21) & 0x1FF;
constexpr uint64_t pte = (uc_vaddr >> 12) & 0x1FF;
asm volatile("invlpg (%0)" :: "r"(fast_page()));
const uint64_t* pml4 = P2V(s_kernel->m_highest_paging_struct);
const uint64_t* pdpt = P2V(pml4[pml4e] & s_page_addr_mask);
const uint64_t* pd = P2V(pdpt[pdpte] & s_page_addr_mask);
uint64_t* pt = P2V(pd[pde] & s_page_addr_mask);
ASSERT(!(pt[pte] & Flags::Present));
pt[pte] = paddr | Flags::ReadWrite | Flags::Present;
invalidate(fast_page(), false);
}
void PageTable::unmap_fast_page()
{
ASSERT(s_fast_page_pt);
ASSERT(s_kernel);
ASSERT(s_fast_page_lock.current_processor_has_lock());
ASSERT((*s_fast_page_pt & Flags::Present));
s_fast_page_pt[0] = 0;
constexpr vaddr_t uc_vaddr = uncanonicalize(fast_page());
constexpr uint64_t pml4e = (uc_vaddr >> 39) & 0x1FF;
constexpr uint64_t pdpte = (uc_vaddr >> 30) & 0x1FF;
constexpr uint64_t pde = (uc_vaddr >> 21) & 0x1FF;
constexpr uint64_t pte = (uc_vaddr >> 12) & 0x1FF;
asm volatile("invlpg (%0)" :: "r"(fast_page()));
const uint64_t* pml4 = P2V(s_kernel->m_highest_paging_struct);
const uint64_t* pdpt = P2V(pml4[pml4e] & s_page_addr_mask);
const uint64_t* pd = P2V(pdpt[pdpte] & s_page_addr_mask);
uint64_t* pt = P2V(pd[pde] & s_page_addr_mask);
ASSERT(pt[pte] & Flags::Present);
pt[pte] = 0;
invalidate(fast_page(), false);
}
BAN::ErrorOr<PageTable*> PageTable::create_userspace()
{
SpinLockGuard _(s_kernel->m_lock);
PageTable* page_table = new PageTable;
if (page_table == nullptr)
return BAN::Error::from_errno(ENOMEM);
page_table->m_highest_paging_struct = allocate_zeroed_page_aligned_page();
if (page_table->m_highest_paging_struct == 0)
{
delete page_table;
return BAN::Error::from_errno(ENOMEM);
}
uint64_t* pml4 = P2V(page_table->m_highest_paging_struct);
memcpy(pml4, s_global_pml4_entries, sizeof(s_global_pml4_entries));
page_table->map_kernel_memory();
return page_table;
}
void PageTable::map_kernel_memory()
{
ASSERT(s_kernel);
ASSERT(s_global_pml4_entries[511]);
ASSERT(m_highest_paging_struct == 0);
m_highest_paging_struct = allocate_zeroed_page_aligned_page();
PageTable::with_fast_page(m_highest_paging_struct, [] {
for (size_t i = 0; i < 512; i++)
{
if (s_global_pml4_entries[i] == 0)
continue;
ASSERT(i >= 256);
PageTable::fast_page_as_sized<uint64_t>(i) = s_global_pml4_entries[i];
}
});
}
PageTable::~PageTable()
{
if (m_highest_paging_struct == 0)
@@ -449,39 +612,10 @@ namespace Kernel
Processor::set_current_page_table(this);
}
void PageTable::invalidate_range(vaddr_t vaddr, size_t pages, bool send_smp_message)
void PageTable::invalidate(vaddr_t vaddr, bool send_smp_message)
{
ASSERT(vaddr % PAGE_SIZE == 0);
const bool is_userspace = (vaddr < KERNEL_OFFSET);
if (is_userspace && this != &PageTable::current())
;
else if (pages <= 32 || !s_is_initialized)
{
for (size_t i = 0; i < pages; i++, vaddr += PAGE_SIZE)
asm volatile("invlpg (%0)" :: "r"(vaddr));
}
else if (is_userspace || !s_has_pge)
{
asm volatile("movq %0, %%cr3" :: "r"(m_highest_paging_struct));
}
else
{
asm volatile(
"movq %%cr4, %%rax;"
"andq $~0x80, %%rax;"
"movq %%rax, %%cr4;"
"movq %0, %%cr3;"
"orq $0x80, %%rax;"
"movq %%rax, %%cr4;"
:
: "r"(m_highest_paging_struct)
: "rax"
);
}
asm volatile("invlpg (%0)" :: "r"(vaddr) : "memory");
if (send_smp_message)
{
@@ -489,14 +623,13 @@ namespace Kernel
.type = Processor::SMPMessage::Type::FlushTLB,
.flush_tlb = {
.vaddr = vaddr,
.page_count = pages,
.page_table = vaddr < KERNEL_OFFSET ? this : nullptr,
.page_count = 1
}
});
}
}
void PageTable::unmap_page(vaddr_t vaddr, bool invalidate)
void PageTable::unmap_page(vaddr_t vaddr, bool send_smp_message)
{
ASSERT(vaddr);
ASSERT(vaddr != fast_page());
@@ -525,27 +658,30 @@ namespace Kernel
uint64_t* pd = P2V(pdpt[pdpte] & s_page_addr_mask);
uint64_t* pt = P2V(pd[pde] & s_page_addr_mask);
const paddr_t old_paddr = pt[pte] & PAGE_ADDR_MASK;
pt[pte] = 0;
if (invalidate && old_paddr != 0)
invalidate_page(vaddr, true);
invalidate(vaddr, send_smp_message);
}
void PageTable::unmap_range(vaddr_t vaddr, size_t size)
{
ASSERT(vaddr % PAGE_SIZE == 0);
const size_t page_count = range_page_count(vaddr, size);
size_t page_count = range_page_count(vaddr, size);
SpinLockGuard _(m_lock);
for (vaddr_t page = 0; page < page_count; page++)
unmap_page(vaddr + page * PAGE_SIZE, false);
invalidate_range(vaddr, page_count, true);
Processor::broadcast_smp_message({
.type = Processor::SMPMessage::Type::FlushTLB,
.flush_tlb = {
.vaddr = vaddr,
.page_count = page_count
}
});
}
void PageTable::map_page_at(paddr_t paddr, vaddr_t vaddr, flags_t flags, MemoryType memory_type, bool invalidate)
void PageTable::map_page_at(paddr_t paddr, vaddr_t vaddr, flags_t flags, MemoryType memory_type, bool send_smp_message)
{
ASSERT(vaddr);
ASSERT(vaddr != fast_page());
@@ -606,12 +742,9 @@ namespace Kernel
if (!(flags & Flags::Present))
uwr_flags &= ~Flags::Present;
const paddr_t old_paddr = pt[pte] & PAGE_ADDR_MASK;
pt[pte] = paddr | uwr_flags | extra_flags;
if (invalidate && old_paddr != 0)
invalidate_page(vaddr, true);
invalidate(vaddr, send_smp_message);
}
void PageTable::map_range_at(paddr_t paddr, vaddr_t vaddr, size_t size, flags_t flags, MemoryType memory_type)
@@ -627,66 +760,14 @@ namespace Kernel
SpinLockGuard _(m_lock);
for (size_t page = 0; page < page_count; page++)
map_page_at(paddr + page * PAGE_SIZE, vaddr + page * PAGE_SIZE, flags, memory_type, false);
invalidate_range(vaddr, page_count, true);
}
void PageTable::remove_writable_from_range(vaddr_t vaddr, size_t size)
{
ASSERT(vaddr);
ASSERT(vaddr % PAGE_SIZE == 0);
ASSERT(is_canonical(vaddr));
ASSERT(is_canonical(vaddr + size - 1));
const vaddr_t uc_vaddr_start = uncanonicalize(vaddr);
const vaddr_t uc_vaddr_end = uncanonicalize(vaddr + size - 1);
uint16_t pml4e = (uc_vaddr_start >> 39) & 0x1FF;
uint16_t pdpte = (uc_vaddr_start >> 30) & 0x1FF;
uint16_t pde = (uc_vaddr_start >> 21) & 0x1FF;
uint16_t pte = (uc_vaddr_start >> 12) & 0x1FF;
const uint16_t e_pml4e = (uc_vaddr_end >> 39) & 0x1FF;
const uint16_t e_pdpte = (uc_vaddr_end >> 30) & 0x1FF;
const uint16_t e_pde = (uc_vaddr_end >> 21) & 0x1FF;
const uint16_t e_pte = (uc_vaddr_end >> 12) & 0x1FF;
SpinLockGuard _(m_lock);
const uint64_t* pml4 = P2V(m_highest_paging_struct);
for (; pml4e <= e_pml4e; pml4e++)
{
if (!(pml4[pml4e] & Flags::ReadWrite))
continue;
const uint64_t* pdpt = P2V(pml4[pml4e] & s_page_addr_mask);
for (; pdpte < 512; pdpte++)
{
if (pml4e == e_pml4e && pdpte > e_pdpte)
break;
if (!(pdpt[pdpte] & Flags::ReadWrite))
continue;
const uint64_t* pd = P2V(pdpt[pdpte] & s_page_addr_mask);
for (; pde < 512; pde++)
{
if (pml4e == e_pml4e && pdpte == e_pdpte && pde > e_pde)
break;
if (!(pd[pde] & Flags::ReadWrite))
continue;
uint64_t* pt = P2V(pd[pde] & s_page_addr_mask);
for (; pte < 512; pte++)
{
if (pml4e == e_pml4e && pdpte == e_pdpte && pde == e_pde && pte > e_pte)
break;
pt[pte] &= ~static_cast<uint64_t>(Flags::ReadWrite);
}
pte = 0;
}
pde = 0;
Processor::broadcast_smp_message({
.type = Processor::SMPMessage::Type::FlushTLB,
.flush_tlb = {
.vaddr = vaddr,
.page_count = page_count
}
pdpte = 0;
}
invalidate_range(vaddr, size / PAGE_SIZE, true);
});
}
uint64_t PageTable::get_page_data(vaddr_t vaddr) const
@@ -733,13 +814,13 @@ namespace Kernel
return page_data & s_page_addr_mask;
}
bool PageTable::reserve_page(vaddr_t vaddr, bool only_free, bool invalidate)
bool PageTable::reserve_page(vaddr_t vaddr, bool only_free)
{
SpinLockGuard _(m_lock);
ASSERT(vaddr % PAGE_SIZE == 0);
if (only_free && !is_page_free(vaddr))
return false;
map_page_at(0, vaddr, Flags::Reserved, MemoryType::Normal, invalidate);
map_page_at(0, vaddr, Flags::Reserved);
return true;
}
@@ -753,8 +834,7 @@ namespace Kernel
if (only_free && !is_range_free(vaddr, bytes))
return false;
for (size_t offset = 0; offset < bytes; offset += PAGE_SIZE)
reserve_page(vaddr + offset, true, false);
invalidate_range(vaddr, bytes / PAGE_SIZE, true);
reserve_page(vaddr + offset);
return true;
}
@@ -768,9 +848,9 @@ namespace Kernel
last_address -= rem;
ASSERT(is_canonical(first_address));
ASSERT(is_canonical(last_address - 1));
ASSERT(is_canonical(last_address));
const vaddr_t uc_vaddr_start = uncanonicalize(first_address);
const vaddr_t uc_vaddr_end = uncanonicalize(last_address - 1);
const vaddr_t uc_vaddr_end = uncanonicalize(last_address);
uint16_t pml4e = (uc_vaddr_start >> 39) & 0x1FF;
uint16_t pdpte = (uc_vaddr_start >> 30) & 0x1FF;
@@ -787,8 +867,10 @@ namespace Kernel
// Try to find free page that can be mapped without
// allocations (page table with unused entries)
const uint64_t* pml4 = P2V(m_highest_paging_struct);
for (; pml4e <= e_pml4e; pml4e++)
for (; pml4e < 512; pml4e++)
{
if (pml4e > e_pml4e)
break;
if (!(pml4[pml4e] & Flags::Present))
continue;
const uint64_t* pdpt = P2V(pml4[pml4e] & s_page_addr_mask);
@@ -808,24 +890,22 @@ namespace Kernel
const uint64_t* pt = P2V(pd[pde] & s_page_addr_mask);
for (; pte < 512; pte++)
{
if (pml4e == e_pml4e && pdpte == e_pdpte && pde == e_pde && pte > e_pte)
if (pml4e == e_pml4e && pdpte == e_pdpte && pde == e_pde && pte >= e_pte)
break;
if (pt[pte] & Flags::Used)
continue;
vaddr_t vaddr = 0;
vaddr |= static_cast<uint64_t>(pml4e) << 39;
vaddr |= static_cast<uint64_t>(pdpte) << 30;
vaddr |= static_cast<uint64_t>(pde) << 21;
vaddr |= static_cast<uint64_t>(pte) << 12;
vaddr = canonicalize(vaddr);
ASSERT(reserve_page(vaddr));
return vaddr;
if (!(pt[pte] & Flags::Used))
{
vaddr_t vaddr = 0;
vaddr |= static_cast<uint64_t>(pml4e) << 39;
vaddr |= static_cast<uint64_t>(pdpte) << 30;
vaddr |= static_cast<uint64_t>(pde) << 21;
vaddr |= static_cast<uint64_t>(pte) << 12;
vaddr = canonicalize(vaddr);
ASSERT(reserve_page(vaddr));
return vaddr;
}
}
pte = 0;
}
pde = 0;
}
pdpte = 0;
}
for (vaddr_t uc_vaddr = uc_vaddr_start; uc_vaddr < uc_vaddr_end; uc_vaddr += PAGE_SIZE)
@@ -837,7 +917,7 @@ namespace Kernel
}
}
return 0;
ASSERT_NOT_REACHED();
}
vaddr_t PageTable::reserve_free_contiguous_pages(size_t page_count, vaddr_t first_address, vaddr_t last_address)
@@ -850,7 +930,7 @@ namespace Kernel
last_address -= rem;
ASSERT(is_canonical(first_address));
ASSERT(is_canonical(last_address - 1));
ASSERT(is_canonical(last_address));
SpinLockGuard _(m_lock);
@@ -879,7 +959,7 @@ namespace Kernel
}
}
return 0;
ASSERT_NOT_REACHED();
}
bool PageTable::is_page_free(vaddr_t page) const

View File

@@ -1,48 +1,30 @@
.section .userspace, "ax"
// stack contains
// (8 bytes) return address (on return stack)
// (8 bytes) return stack
// (8 bytes) return rflags
// (8 bytes) restore sigmask
// (56 bytes) siginfo_t
// (8 bytes) signal number
// (8 bytes) signal handler
// return address
// signal number
// signal handler
.global signal_trampoline
signal_trampoline:
pushq %r15 // gregs
pushq %r14
pushq %r13
pushq %r12
pushq %r11
pushq %r10
pushq %r9
pushq %r8
pushq %rsi
pushq %rdi
pushq %rdx
pushq %rcx
pushq %rax
pushq %rbx
pushq %rax
pushq %rcx
pushq %rdx
pushq %rbp
pushq %rdi
pushq %rsi
pushq %r8
pushq %r9
pushq %r10
pushq %r11
pushq %r12
pushq %r13
pushq %r14
pushq %r15
movq 208(%rsp), %rax
pushq %rax; addq $(128 + 8), (%rsp)
pushq (%rax)
// FIXME: populate these
xorq %rax, %rax
pushq %rax // stack
pushq %rax
pushq %rax
pushq %rax // sigset
pushq %rax // link
movq %rsp, %rdx // ucontext
leaq 192(%rsp), %rsi // siginfo
movq 184(%rsp), %rdi // signal number
movq 176(%rsp), %rax // handler
movq 128(%rsp), %rdi
movq 120(%rsp), %rax
// align stack to 16 bytes
movq %rsp, %rbp
@@ -58,40 +40,24 @@ signal_trampoline:
// restore stack
movq %rbp, %rsp
addq $40, %rsp
// restore sigmask
movq $83, %rdi // SYS_SIGPROCMASK
movq $3, %rsi // SIG_SETMASK
leaq 192(%rsp), %rdx // set
xorq %r10, %r10 // oset
syscall
// restore registers
addq $16, %rsp
popq %rbp
popq %rax
popq %rbx
popq %rcx
popq %rdx
popq %rdi
popq %rsi
popq %r8
popq %r9
popq %r10
popq %r11
popq %r12
popq %r13
popq %r14
popq %r15
popq %r14
popq %r13
popq %r12
popq %r11
popq %r10
popq %r9
popq %r8
popq %rsi
popq %rdi
popq %rbp
popq %rdx
popq %rcx
popq %rbx
popq %rax
// skip handler, number, siginfo_t, sigmask
addq $80, %rsp
// restore flags
addq $16, %rsp
popfq
movq (%rsp), %rsp
// return over red-zone
ret $128

View File

@@ -1,26 +1,49 @@
// arguments in RAX, RBX, RCX, RDX, RSI, RDI
// System V ABI: RDI, RSI, RDX, RCX, R8, R9
.global asm_syscall_handler
asm_syscall_handler:
swapgs
movq %rsp, %rax
movq %gs:8, %rsp
pushq $(0x20 | 3)
pushq %rax
pushq %r11
pushq $(0x28 | 3)
pushq %rbx
pushq %rcx
subq $8, %rsp
pushq %rdx
pushq %rdi
pushq %rsi
pushq %rbp
pushq %r8
pushq %r9
pushq %r10
pushq %r11
pushq %r12
pushq %r13
pushq %r14
pushq %r15
cld
movq %r10, %rcx
movq %rsi, %r8
movq %rdi, %r9
movq %rax, %rdi
movq %rbx, %rsi
xchgq %rcx, %rdx
leaq 112(%rsp), %rbx
pushq %rbx
call cpp_syscall_handler
addq $8, %rsp
movq 8(%rsp), %rcx
movq 24(%rsp), %r11
movq 32(%rsp), %rsp
popq %r15
popq %r14
popq %r13
popq %r12
popq %r11
popq %r10
popq %r9
popq %r8
popq %rbp
popq %rsi
popq %rdi
popq %rdx
popq %rcx
popq %rbx
iretq
swapgs
sysretq
.global sys_fork_trampoline
sys_fork_trampoline:
@@ -33,7 +56,7 @@ sys_fork_trampoline:
call read_ip
testq %rax, %rax
jz .done
je .reload_stack
movq %rax, %rsi
movq %rsp, %rdi
@@ -47,3 +70,9 @@ sys_fork_trampoline:
popq %rbp
popq %rbx
ret
.reload_stack:
call get_thread_start_sp
movq %rax, %rsp
xorq %rax, %rax
jmp .done

View File

@@ -7,6 +7,9 @@ read_ip:
# void start_kernel_thread()
.global start_kernel_thread
start_kernel_thread:
call get_thread_start_sp
movq %rax, %rsp
# STACK LAYOUT
# on_exit arg
# on_exit func
@@ -24,5 +27,9 @@ start_kernel_thread:
.global start_userspace_thread
start_userspace_thread:
swapgs
call load_thread_sse
call get_thread_start_sp
movq %rax, %rsp
iretq

View File

@@ -1,87 +0,0 @@
# bool safe_user_memcpy(void*, const void*, size_t)
.global safe_user_memcpy
.global safe_user_memcpy_end
.global safe_user_memcpy_fault
safe_user_memcpy:
xorq %rax, %rax
movq %rdx, %rcx
rep movsb
incq %rax
safe_user_memcpy_fault:
ret
safe_user_memcpy_end:
# bool safe_user_strncpy(void*, const void*, size_t)
.global safe_user_strncpy
.global safe_user_strncpy_end
.global safe_user_strncpy_fault
safe_user_strncpy:
movq %rdx, %rcx
testq %rcx, %rcx
jz safe_user_strncpy_fault
.safe_user_strncpy_align_loop:
testb $0x7, %sil
jz .safe_user_strncpy_align_done
movb (%rsi), %al
movb %al, (%rdi)
testb %al, %al
jz .safe_user_strncpy_done
incq %rdi
incq %rsi
decq %rcx
jnz .safe_user_strncpy_align_loop
jmp safe_user_strncpy_fault
.safe_user_strncpy_align_done:
movq $0x0101010101010101, %r8
movq $0x8080808080808080, %r9
.safe_user_strncpy_qword_loop:
cmpq $8, %rcx
jb .safe_user_strncpy_qword_done
movq (%rsi), %rax
movq %rax, %r10
movq %rax, %r11
# https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
subq %r8, %r10
notq %r11
andq %r11, %r10
andq %r9, %r10
jnz .safe_user_strncpy_byte_loop
movq %rax, (%rdi)
addq $8, %rdi
addq $8, %rsi
subq $8, %rcx
jnz .safe_user_strncpy_qword_loop
jmp safe_user_strncpy_fault
.safe_user_strncpy_qword_done:
testq %rcx, %rcx
jz safe_user_strncpy_fault
.safe_user_strncpy_byte_loop:
movb (%rsi), %al
movb %al, (%rdi)
testb %al, %al
jz .safe_user_strncpy_done
incq %rdi
incq %rsi
decq %rcx
jnz .safe_user_strncpy_byte_loop
safe_user_strncpy_fault:
xorq %rax, %rax
ret
.safe_user_strncpy_done:
movb $1, %al
ret
safe_user_strncpy_end:

View File

@@ -1,29 +0,0 @@
.global asm_yield_trampoline
asm_yield_trampoline:
leaq 8(%rsp), %rcx
movq %rdi, %rsp
subq $8, %rsp
pushq -8(%rcx)
pushq %rcx
pushq %rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
movq %rsp, %rdi
call scheduler_on_yield
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbp
popq %rbx
popq %rax
movq 8(%rsp), %rcx
movq 0(%rsp), %rsp
jmp *%rcx

View File

@@ -11,28 +11,9 @@
.code32
// custom addresses, video mode info, page align modules
.set multiboot_flags, (1 << 16) | (1 << 2) | (1 << 0)
# multiboot2 header
.section .multiboot, "aw"
multiboot_start:
.long 0x1BADB002
.long multiboot_flags
.long -(0x1BADB002 + multiboot_flags)
.long V2P(multiboot_start)
.long V2P(g_kernel_start)
.long V2P(g_kernel_bss_start)
.long V2P(g_kernel_end)
.long V2P(_start)
.long 0
.long FB_WIDTH
.long FB_HEIGHT
.long FB_BPP
multiboot_end:
.section .multiboot2, "aw"
.align 8
multiboot2_start:
.long 0xE85250D6
.long 0
@@ -68,6 +49,7 @@ multiboot2_start:
multiboot2_end:
.section .bananboot, "aw"
.align 8
bananboot_start:
.long 0xBABAB007
.long -(0xBABAB007 + FB_WIDTH + FB_HEIGHT + FB_BPP)
@@ -97,25 +79,27 @@ bananboot_end:
.align 4096
boot_pml4:
.quad V2P(boot_pdpt_lo) + (PG_READ_WRITE | PG_PRESENT)
.skip 510 * 8
.rept 510
.quad 0
.endr
.quad V2P(boot_pdpt_hi) + (PG_READ_WRITE | PG_PRESENT)
boot_pdpt_lo:
.quad V2P(boot_pd) + (PG_READ_WRITE | PG_PRESENT)
.skip 511 * 8
.rept 511
.quad 0
.endr
boot_pdpt_hi:
.skip 510 * 8
.rept 510
.quad 0
.endr
.quad V2P(boot_pd) + (PG_READ_WRITE | PG_PRESENT)
.skip 8
.quad 0
boot_pd:
.set i, 0
.rept 511
.rept 512
.quad i + (PG_PAGE_SIZE | PG_READ_WRITE | PG_PRESENT)
.set i, i + 0x200000
.endr
.quad V2P(g_boot_fast_page_pt) + (PG_READ_WRITE | PG_PRESENT)
.global g_boot_fast_page_pt
g_boot_fast_page_pt:
.skip 512 * 8
boot_gdt:
.quad 0x0000000000000000 # null descriptor
@@ -178,13 +162,6 @@ enable_sse:
movl %eax, %cr4
ret
enable_tsc:
# allow userspace to use RDTSC
movl %cr4, %ecx
andl $0xFFFFFFFB, %ecx
movl %ecx, %cr4
ret
initialize_paging:
# enable PAE
movl %cr4, %ecx
@@ -221,7 +198,6 @@ _start:
call check_requirements
call enable_sse
call enable_tsc
call initialize_paging
# flush gdt and jump to 64 bit
@@ -271,7 +247,7 @@ system_halt:
jmp 1b
#define AP_REL(vaddr) ((vaddr) - ap_trampoline + 0xF000)
#define AP_V2P(vaddr) ((vaddr) - ap_trampoline + 0xF000)
.section .ap_init, "ax"
@@ -281,27 +257,21 @@ ap_trampoline:
jmp 1f
.align 8
ap_stack_paddr:
.skip 8
ap_stack_vaddr:
.skip 8
ap_prepare_paging:
.skip 8
ap_page_table:
.skip 8
ap_ready:
.skip 8
ap_stack_ptr:
.skip 4
ap_stack_loaded:
.skip 1
1: cli; cld
ljmpl $0x00, $AP_REL(ap_cs_clear)
ljmpl $0x00, $AP_V2P(ap_cs_clear)
ap_cs_clear:
# load ap gdt and enter protected mode
lgdt AP_REL(ap_gdtr)
lgdt AP_V2P(ap_gdtr)
movl %cr0, %eax
orb $1, %al
movl %eax, %cr0
ljmpl $0x08, $AP_REL(ap_protected_mode)
ljmpl $0x08, $AP_V2P(ap_protected_mode)
.code32
ap_protected_mode:
@@ -310,42 +280,36 @@ ap_protected_mode:
movw %ax, %ss
movw %ax, %es
movl AP_REL(ap_stack_paddr), %esp
movl AP_V2P(ap_stack_ptr), %esp
movb $1, AP_V2P(ap_stack_loaded)
leal V2P(enable_sse), %ecx; call *%ecx
leal V2P(enable_tsc), %ecx; call *%ecx
leal V2P(initialize_paging), %ecx; call *%ecx
# load boot gdt and enter long mode
lgdt V2P(boot_gdtr)
ljmpl $0x08, $AP_REL(ap_long_mode)
ljmpl $0x08, $AP_V2P(ap_long_mode)
.code64
ap_long_mode:
movq $ap_higher_half, %rax
jmp *%rax
ap_higher_half:
movq AP_REL(ap_prepare_paging), %rax
call *%rax
# load AP's initial values
movq AP_REL(ap_stack_vaddr), %rsp
movq AP_REL(ap_page_table), %rax
movq $1, AP_REL(ap_ready)
movq %rax, %cr3
# move stack pointer to higher half
movl %esp, %esp
addq $KERNEL_OFFSET, %rsp
# clear rbp for stacktrace
xorq %rbp, %rbp
xorb %al, %al
1: pause
cmpb $0, g_ap_startup_done
je 1b
cmpb %al, g_ap_startup_done
jz 1b
lock incb g_ap_running_count
call ap_main
jmp system_halt
# jump to ap_main in higher half
movabsq $ap_main, %rcx
call *%rcx
jmp V2P(system_halt)
ap_gdt:
.quad 0x0000000000000000 # null descriptor

View File

@@ -1,4 +1,4 @@
.macro intr_header, n
.macro pushaq
pushq %rax
pushq %rcx
pushq %rdx
@@ -14,18 +14,10 @@
pushq %r13
pushq %r14
pushq %r15
testb $3, \n+15*8(%rsp)
jz 1f
swapgs
1: cld
.endm
.macro intr_footer, n
testb $3, \n+15*8(%rsp)
jz 1f
call cpp_check_signal
swapgs
1: popq %r15
.macro popaq
popq %r15
popq %r14
popq %r13
popq %r12
@@ -43,7 +35,8 @@
.endm
isr_stub:
intr_header 24
pushaq
cld
movq %cr0, %rax; pushq %rax
movq %cr2, %rax; pushq %rax
movq %cr3, %rax; pushq %rax
@@ -56,33 +49,43 @@ isr_stub:
call cpp_isr_handler
addq $32, %rsp
intr_footer 24
popaq
addq $16, %rsp
iretq
irq_stub:
intr_header 24
xorq %rbp, %rbp
pushaq
cld
movq 120(%rsp), %rdi # irq number
call cpp_irq_handler
intr_footer 24
popaq
addq $16, %rsp
iretq
.global asm_yield_handler
asm_yield_handler:
pushaq
cld
leaq 120(%rsp), %rdi # interrupt stack ptr
movq %rsp, %rsi # interrupt register ptr
call cpp_yield_handler
popaq
iretq
.global asm_ipi_handler
asm_ipi_handler:
intr_header 8
xorq %rbp, %rbp
pushaq
cld
call cpp_ipi_handler
intr_footer 8
popaq
iretq
.global asm_timer_handler
asm_timer_handler:
intr_header 8
xorq %rbp, %rbp
pushaq
cld
call cpp_timer_handler
intr_footer 8
popaq
iretq
.macro isr n

View File

@@ -11,21 +11,20 @@ SECTIONS
{
g_kernel_execute_start = .;
*(.multiboot)
*(.multiboot2)
*(.bananboot)
*(.text.*)
}
.ap_init ALIGN(4K) : AT(ADDR(.ap_init) - KERNEL_OFFSET)
{
g_ap_init_addr = .;
*(.ap_init)
g_kernel_execute_end = .;
}
.userspace ALIGN(4K) : AT(ADDR(.userspace) - KERNEL_OFFSET)
{
g_userspace_start = .;
*(.userspace)
g_userspace_end = .;
g_kernel_execute_end = .;
}
.ap_init ALIGN(4K) : AT(ADDR(.ap_init) - KERNEL_OFFSET)
{
g_ap_init_addr = .;
*(.ap_init)
}
.rodata ALIGN(4K) : AT(ADDR(.rodata) - KERNEL_OFFSET)
{
@@ -44,7 +43,6 @@ SECTIONS
}
.bss ALIGN(4K) : AT(ADDR(.bss) - KERNEL_OFFSET)
{
g_kernel_bss_start = .;
*(COMMON)
*(.bss)
g_kernel_writable_end = .;

View File

@@ -2,7 +2,6 @@
#include <BAN/Vector.h>
#include <kernel/ACPI/AML/Namespace.h>
#include <kernel/ACPI/EmbeddedController.h>
#include <kernel/ACPI/Headers.h>
#include <kernel/Memory/Types.h>
#include <kernel/ThreadBlocker.h>
@@ -36,12 +35,8 @@ namespace Kernel::ACPI
BAN::ErrorOr<void> poweroff();
BAN::ErrorOr<void> reset();
BAN::ErrorOr<void> register_gpe_handler(uint8_t gpe, void (*callback)(void*), void* argument);
void handle_irq() override;
BAN::Span<BAN::UniqPtr<EmbeddedController>> embedded_controllers() { return m_embedded_controllers.span(); }
private:
ACPI() = default;
BAN::ErrorOr<void> initialize_impl();
@@ -55,12 +50,6 @@ namespace Kernel::ACPI
BAN::ErrorOr<void> route_interrupt_link_device(const AML::Scope& device, uint64_t& routed_irq_mask);
BAN::ErrorOr<void> initialize_embedded_controller(const AML::Scope& embedded_controller);
BAN::ErrorOr<void> initialize_embedded_controllers();
BAN::Optional<GAS> find_gpe_block(size_t index);
bool enable_gpe(uint8_t gpe);
private:
paddr_t m_header_table_paddr = 0;
vaddr_t m_header_table_vaddr = 0;
@@ -79,23 +68,8 @@ namespace Kernel::ACPI
ThreadBlocker m_event_thread_blocker;
BAN::Vector<BAN::UniqPtr<EmbeddedController>> m_embedded_controllers;
struct GPEHandler
{
bool has_callback { false };
union {
AML::Reference* method;
struct
{
void (*callback)(void*);
void* argument;
};
};
};
bool m_has_any_gpes { false };
AML::Scope m_gpe_scope;
BAN::Array<GPEHandler, 0xFF> m_gpe_methods;
BAN::Array<AML::Reference*, 0xFF> m_gpe_methods { nullptr };
bool m_hardware_reduced { false };
AML::Namespace* m_namespace { nullptr };

View File

@@ -50,7 +50,6 @@ namespace Kernel::ACPI::AML
BAN::ErrorOr<void> for_each_child(const Scope&, const BAN::Function<BAN::Iteration(const Scope&, Reference*)>&);
BAN::ErrorOr<BAN::Vector<Scope>> find_device_with_eisa_id(BAN::StringView eisa_id);
BAN::ErrorOr<BAN::Vector<Scope>> find_device_with_eisa_id(BAN::Span<BAN::StringView> eisa_ids);
private:
BAN::ErrorOr<Scope> resolve_path(const Scope& scope, const NameString& name_string);

View File

@@ -1,69 +0,0 @@
#pragma once
#include <BAN/Atomic.h>
#include <BAN/UniqPtr.h>
#include <kernel/ACPI/AML/Scope.h>
#include <kernel/Lock/Mutex.h>
#include <kernel/Thread.h>
namespace Kernel::ACPI
{
class EmbeddedController
{
public:
static BAN::ErrorOr<BAN::UniqPtr<EmbeddedController>> create(AML::Scope&& scope, uint16_t command_port, uint16_t data_port, BAN::Optional<uint8_t> gpe);
~EmbeddedController();
BAN::ErrorOr<uint8_t> read_byte(uint8_t offset);
BAN::ErrorOr<void> write_byte(uint8_t offset, uint8_t value);
const AML::Scope& scope() const { return m_scope; }
private:
EmbeddedController(AML::Scope&& scope, uint16_t command_port, uint16_t data_port, bool has_gpe)
: m_scope(BAN::move(scope))
, m_command_port(command_port)
, m_data_port(data_port)
, m_has_gpe(has_gpe)
{ }
private:
void wait_status_bit(uint8_t bit, uint8_t value);
uint8_t read_one(uint16_t port);
void write_one(uint16_t port, uint8_t value);
static void handle_gpe_wrapper(void*);
void handle_gpe();
BAN::ErrorOr<void> call_query_method(uint8_t notification);
void thread_task();
struct Command
{
uint8_t command;
BAN::Optional<uint8_t> data1;
BAN::Optional<uint8_t> data2;
uint8_t* response;
BAN::Atomic<bool> done;
};
BAN::ErrorOr<void> send_command(Command& command);
private:
const AML::Scope m_scope;
const uint16_t m_command_port;
const uint16_t m_data_port;
const bool m_has_gpe;
Mutex m_mutex;
ThreadBlocker m_thread_blocker;
BAN::Optional<Command*> m_queued_command;
Thread* m_thread { nullptr };
};
}

View File

@@ -1,179 +0,0 @@
#pragma once
#include <BAN/ByteSpan.h>
#include <BAN/Debug.h>
#include <BAN/Optional.h>
namespace Kernel::ACPI
{
struct ResourceData
{
enum class Type
{
IRQ,
DMA,
IOPort,
FixedIOPort,
FixedDMA,
// TODO: large stuff the stuff :)
};
union {
struct {
uint16_t irq_mask;
union {
struct {
uint8_t edge_triggered : 1;
uint8_t : 2;
uint8_t active_low : 1;
uint8_t shared : 1;
uint8_t wake_capable : 1;
uint8_t : 2;
};
uint8_t raw;
};
} irq;
struct {
uint8_t channel_mask;
union {
struct {
uint8_t type : 2; // 0: 8 bit, 1: 8 and 16 bit, 2: 16 bit only
uint8_t bus_master : 1;
uint8_t : 2;
uint8_t channel_speed : 2; // 0: compatibility, 1: type A, 2: type B, 3: type F
uint8_t : 1;
};
uint8_t raw;
};
} dma;
struct {
uint16_t range_min_base;
uint16_t range_max_base;
uint8_t base_alignment;
uint8_t range_length;
} io_port;
struct {
uint16_t range_base;
uint8_t range_length;
} fixed_io_port;
struct {
uint16_t request_line;
uint16_t channel;
uint8_t transfer_width; // 0: 8 bit, 1: 16 bit, 2: 32 bit, 3: 64 bit, 4: 128 bit
} fixed_dma;
} as;
Type type;
};
class ResourceParser
{
public:
ResourceParser(BAN::ConstByteSpan buffer)
: m_buffer(buffer)
{}
BAN::Optional<ResourceData> get_next()
{
for (;;)
{
if (m_buffer.empty())
return {};
if (m_buffer[0] & 0x80)
{
dprintln("Skipping large resource 0x{2H}", m_buffer[0] & 0x7F);
const uint16_t length = (m_buffer[2] << 8) | m_buffer[1];
if (m_buffer.size() < static_cast<size_t>(3 + length))
return {};
m_buffer = m_buffer.slice(3 + length);
continue;
}
const uint8_t length = m_buffer[0] & 0x07;
if (m_buffer.size() < static_cast<size_t>(1 + length))
return {};
BAN::Optional<ResourceData> result;
switch ((m_buffer[0] >> 3) & 0x0F)
{
case 0x04:
if (length < 2)
break;
result = ResourceData {
.as = { .irq = {
.irq_mask = static_cast<uint16_t>((m_buffer[2] << 8) | m_buffer[1]),
.raw = (length >= 3) ? m_buffer[3] : static_cast<uint8_t>(1),
}},
.type = ResourceData::Type::IRQ,
};
break;
case 0x05:
if (length < 2)
break;
result = ResourceData {
.as = { .dma = {
.channel_mask = m_buffer[1],
.raw = m_buffer[2],
}},
.type = ResourceData::Type::DMA,
};
break;
case 0x08:
if (length < 7)
break;
result = ResourceData {
.as = { .io_port = {
.range_min_base = static_cast<uint16_t>(((m_buffer[3] << 8) | m_buffer[2]) & ((m_buffer[1] & 1) ? 0xFFFF : 0x03FF)),
.range_max_base = static_cast<uint16_t>(((m_buffer[5] << 8) | m_buffer[4]) & ((m_buffer[1] & 1) ? 0xFFFF : 0x03FF)),
.base_alignment = m_buffer[6],
.range_length = m_buffer[7],
}},
.type = ResourceData::Type::IOPort,
};
break;
case 0x09:
if (length < 3)
break;
result = ResourceData {
.as = { .fixed_io_port = {
.range_base = static_cast<uint16_t>(((m_buffer[2] << 8) | m_buffer[1]) & 0x03FF),
.range_length = m_buffer[3],
}},
.type = ResourceData::Type::FixedIOPort,
};
break;
case 0x0A:
if (length < 5)
break;
result = ResourceData {
.as = { .fixed_dma = {
.request_line = static_cast<uint16_t>((m_buffer[2] << 8) | m_buffer[1]),
.channel = static_cast<uint16_t>((m_buffer[4] << 8) | m_buffer[3]),
.transfer_width = m_buffer[5],
}},
.type = ResourceData::Type::FixedDMA,
};
break;
case 0x0F:
// End tag
return {};
case 0x06:
case 0x07:
case 0x0E:
dprintln("Skipping short resource 0x{2H}", (m_buffer[0] >> 3) & 0x0F);
break;
}
m_buffer = m_buffer.slice(1 + length);
if (result.has_value())
return result.release_value();
}
}
private:
BAN::ConstByteSpan m_buffer;
};
}

View File

@@ -1,37 +0,0 @@
#pragma once
#include <stdint.h>
namespace Kernel::API
{
enum SharedPageFeature : uint32_t
{
SPF_GETTIME = 1 << 0,
};
struct SharedPage
{
uint16_t gdt_cpu_offset;
uint32_t features;
struct
{
uint8_t shift;
uint64_t mult;
uint64_t realtime_seconds;
} gettime_shared;
struct
{
struct
{
uint32_t seq;
uint64_t last_ns;
uint64_t last_tsc;
} gettime_local;
} cpus[];
};
}

View File

@@ -1,34 +0,0 @@
#pragma once
#include <BAN/MacroUtils.h>
#if defined(__x86_64__)
#define _kas_instruction "syscall"
#define _kas_result rax
#define _kas_arguments rdi, rsi, rdx, r10, r8, r9
#define _kas_globbers rcx, rdx, rdi, rsi, r8, r9, r10, r11
#elif defined(__i686__)
#define _kas_instruction "int $0xF0"
#define _kas_result eax
#define _kas_arguments eax, ebx, ecx, edx, esi, edi
#define _kas_globbers
#endif
#define _kas_argument_var(index, value) register long _kas_a##index asm(_ban_stringify(_ban_get(index, _kas_arguments))) = (long)(value);
#define _kas_dummy_var(index, value) register long _kas_d##index asm(#value);
#define _kas_input(index, _) "r"(_kas_a##index)
#define _kas_output(index, _) , "=r"(_kas_d##index)
#define _kas_globber(_, value) #value
#define _kas_syscall(...) ({ \
register long _kas_ret asm(_ban_stringify(_kas_result)); \
_ban_for_each(_kas_argument_var, __VA_ARGS__) \
_ban_for_each(_kas_dummy_var, _kas_globbers) \
asm volatile( \
_kas_instruction \
: "=r"(_kas_ret) _ban_for_each(_kas_output, _kas_globbers) \
: _ban_for_each_comma(_kas_input, __VA_ARGS__) \
: "cc", "memory"); \
(void)_kas_a0; /* require 1 argument */ \
_kas_ret; \
})

View File

@@ -6,10 +6,10 @@
namespace Kernel
{
class AC97AudioController final : public AudioController, public Interruptable
class AC97AudioController : public AudioController, public Interruptable
{
public:
static BAN::ErrorOr<void> create(PCI::Device& pci_device);
static BAN::ErrorOr<BAN::RefPtr<AC97AudioController>> create(PCI::Device& pci_device);
void handle_irq() override;
@@ -19,24 +19,16 @@ namespace Kernel
uint32_t get_channels() const override { return 2; }
uint32_t get_sample_rate() const override { return 48000; }
uint32_t get_total_pins() const override { return 1; }
uint32_t get_current_pin() const override { return 0; }
BAN::ErrorOr<void> set_current_pin(uint32_t pin) override { if (pin != 0) return BAN::Error::from_errno(EINVAL); return {}; }
BAN::ErrorOr<void> set_volume_mdB(int32_t) override;
private:
AC97AudioController(PCI::Device& pci_device)
: m_pci_device(pci_device)
{ }
uint32_t get_volume_data() const;
BAN::ErrorOr<void> initialize();
BAN::ErrorOr<void> initialize_bld();
BAN::ErrorOr<void> initialize_interrupts();
bool queue_samples_to_bld();
void queue_samples_to_bld();
private:
static constexpr size_t m_bdl_entries = 32;

View File

@@ -1,39 +1,29 @@
#pragma once
#include <kernel/Device/Device.h>
#include <kernel/Memory/ByteRingBuffer.h>
#include <kernel/PCI.h>
#include <sys/ioctl.h>
namespace Kernel
{
class AudioController : public CharacterDevice
{
public:
static BAN::ErrorOr<void> create(PCI::Device& pci_device);
static BAN::ErrorOr<BAN::RefPtr<AudioController>> create(PCI::Device& pci_device);
dev_t rdev() const override { return m_rdev; }
BAN::StringView name() const override { return m_name; }
protected:
AudioController();
BAN::ErrorOr<void> initialize();
virtual void handle_new_data() = 0;
virtual uint32_t get_channels() const = 0;
virtual uint32_t get_sample_rate() const = 0;
virtual uint32_t get_total_pins() const = 0;
virtual uint32_t get_current_pin() const = 0;
virtual BAN::ErrorOr<void> set_current_pin(uint32_t) = 0;
virtual BAN::ErrorOr<void> set_volume_mdB(int32_t) = 0;
bool can_read_impl() const override { return false; }
bool can_write_impl() const override { SpinLockGuard _(m_spinlock); return !m_sample_data->full(); }
bool can_write_impl() const override { SpinLockGuard _(m_spinlock); return m_sample_data_size < m_sample_data_capacity; }
bool has_error_impl() const override { return false; }
bool has_hungup_impl() const override { return false; }
@@ -46,9 +36,9 @@ namespace Kernel
mutable SpinLock m_spinlock;
static constexpr size_t m_sample_data_capacity = 1 << 20;
BAN::UniqPtr<ByteRingBuffer> m_sample_data;
snd_volume_info m_volume_info {};
uint8_t m_sample_data[m_sample_data_capacity];
size_t m_sample_data_head { 0 };
size_t m_sample_data_size { 0 };
private:
const dev_t m_rdev;

View File

@@ -1,80 +0,0 @@
#pragma once
#include <kernel/Audio/Controller.h>
#include <kernel/Audio/HDAudio/Controller.h>
namespace Kernel
{
class HDAudioController;
class HDAudioFunctionGroup final : public AudioController
{
public:
static BAN::ErrorOr<BAN::RefPtr<HDAudioFunctionGroup>> create(BAN::RefPtr<HDAudioController>, uint8_t cid, HDAudio::AFGNode&&);
void on_stream_interrupt(uint8_t stream_index);
protected:
// FIXME: allow setting these :D
uint32_t get_channels() const override { return 2; }
uint32_t get_sample_rate() const override { return 48000; }
uint32_t get_total_pins() const override;
uint32_t get_current_pin() const override;
BAN::ErrorOr<void> set_current_pin(uint32_t) override;
BAN::ErrorOr<void> set_volume_mdB(int32_t) override;
void handle_new_data() override;
private:
HDAudioFunctionGroup(BAN::RefPtr<HDAudioController> controller, uint8_t cid, HDAudio::AFGNode&& afg_node)
: m_controller(controller)
, m_afg_node(BAN::move(afg_node))
, m_cid(cid)
{ }
~HDAudioFunctionGroup();
BAN::ErrorOr<void> initialize();
BAN::ErrorOr<void> initialize_stream();
BAN::ErrorOr<void> initialize_output();
BAN::ErrorOr<void> enable_output_path(uint8_t index);
BAN::ErrorOr<void> disable_output_path(uint8_t index);
void reset_stream();
BAN::ErrorOr<void> recurse_output_paths(const HDAudio::AFGWidget& widget, BAN::Vector<const HDAudio::AFGWidget*>& path);
uint16_t get_format_data() const;
size_t bdl_offset() const;
void queue_bdl_data();
private:
// use 6 512 sample BDL entries
// each entry is ~10.7 ms at 48 kHz
// -> total buffered audio is 64 ms
static constexpr size_t m_bdl_entry_sample_frames = 512;
static constexpr size_t m_bdl_entry_count = 6;
BAN::RefPtr<HDAudioController> m_controller;
const HDAudio::AFGNode m_afg_node;
const uint8_t m_cid;
BAN::Vector<BAN::Vector<const HDAudio::AFGWidget*>> m_output_paths;
BAN::Vector<const HDAudio::AFGWidget*> m_output_pins;
size_t m_output_path_index { SIZE_MAX };
uint8_t m_stream_id { 0xFF };
uint8_t m_stream_index { 0xFF };
BAN::UniqPtr<DMARegion> m_bdl_region;
size_t m_bdl_head { 0 };
size_t m_bdl_tail { 0 };
bool m_stream_running { false };
};
}

View File

@@ -1,77 +0,0 @@
#pragma once
#include <kernel/Audio/Controller.h>
#include <kernel/Audio/HDAudio/Definitions.h>
#include <kernel/Memory/DMARegion.h>
namespace Kernel
{
class HDAudioController : public Interruptable, public BAN::RefCounted<HDAudioController>
{
public:
static BAN::ErrorOr<void> create(PCI::Device& pci_device);
BAN::ErrorOr<uint32_t> send_command(HDAudio::CORBEntry);
uint8_t get_stream_index(HDAudio::StreamType type, uint8_t index) const;
BAN::ErrorOr<uint8_t> allocate_stream_id();
void deallocate_stream_id(uint8_t id);
BAN::ErrorOr<uint8_t> allocate_stream(HDAudio::StreamType type, void* afg);
void deallocate_stream(uint8_t index);
PCI::BarRegion& bar0() { return *m_bar0; }
bool is_64bit() const { return m_is64bit; }
void handle_irq() override;
private:
HDAudioController(PCI::Device& pci_device)
: m_pci_device(pci_device)
{ }
BAN::ErrorOr<void> initialize();
BAN::ErrorOr<void> initialize_ring_buffers();
BAN::ErrorOr<void> reset_controller();
BAN::ErrorOr<HDAudio::Codec> initialize_codec(uint8_t codec);
BAN::ErrorOr<HDAudio::AFGNode> initialize_node(uint8_t codec, uint8_t node);
BAN::ErrorOr<HDAudio::AFGWidget> initialize_widget(uint8_t codec, uint8_t node);
private:
struct RingBuffer
{
vaddr_t vaddr;
uint32_t index;
uint32_t size;
};
private:
PCI::Device& m_pci_device;
BAN::UniqPtr<PCI::BarRegion> m_bar0;
bool m_is64bit { false };
bool m_use_immediate_command { false };
uint8_t m_output_streams { 0 };
uint8_t m_input_streams { 0 };
uint8_t m_bidir_streams { 0 };
void* m_allocated_streams[30] {};
// NOTE: stream ids are from 1 to 15
uint16_t m_allocated_stream_ids { 0 };
Mutex m_command_mutex;
SpinLock m_rb_lock;
ThreadBlocker m_rb_blocker;
RingBuffer m_corb;
RingBuffer m_rirb;
BAN::UniqPtr<DMARegion> m_ring_buffer_region;
};
}

View File

@@ -1,90 +0,0 @@
#pragma once
#include <BAN/Vector.h>
namespace Kernel::HDAudio
{
struct CORBEntry
{
union {
struct {
uint32_t data : 8;
uint32_t command : 12;
uint32_t node_index : 8;
uint32_t codec_address : 4;
};
uint32_t raw;
};
};
static_assert(sizeof(CORBEntry) == sizeof(uint32_t));
struct BDLEntry
{
paddr_t address;
uint32_t length;
uint32_t ioc;
};
static_assert(sizeof(BDLEntry) == 16);
struct AFGWidget
{
enum class Type
{
OutputConverter,
InputConverter,
Mixer,
Selector,
PinComplex,
Power,
VolumeKnob,
BeepGenerator,
};
Type type;
uint8_t id;
union
{
struct
{
bool input;
bool output;
bool display; // HDMI or DP
uint32_t config;
} pin_complex;
};
struct Amplifier
{
uint8_t offset;
uint8_t num_steps;
uint8_t step_size;
bool mute;
};
BAN::Optional<Amplifier> output_amplifier;
BAN::Vector<uint16_t> connections;
};
struct AFGNode
{
uint8_t id;
BAN::Vector<AFGWidget> widgets;
};
struct Codec
{
uint8_t id;
BAN::Vector<AFGNode> nodes;
};
enum class StreamType
{
Input,
Output,
Bidirectional,
};
}

View File

@@ -1,50 +0,0 @@
#pragma once
#include <stdint.h>
namespace Kernel::HDAudio
{
enum Regs : uint8_t
{
GCAP = 0x00,
VMIN = 0x02,
VMAJ = 0x03,
GCTL = 0x08,
STATESTS = 0x0E,
INTCTL = 0x20,
INTSTS = 0x24,
CORBLBASE = 0x40,
CORBUBASE = 0x44,
CORBWP = 0x48,
CORBRP = 0x4A,
CORBCTL = 0x4C,
CORBSTS = 0x4D,
CORBSIZE = 0x4E,
RIRBLBASE = 0x50,
RIRBUBASE = 0x54,
RIRBWP = 0x58,
RINTCNT = 0x5A,
RIRBCTL = 0x5C,
RIRBSTS = 0x5D,
RIRBSIZE = 0x5E,
ICOI = 0x60,
ICII = 0x64,
ICIS = 0x68,
SDCTL = 0x00,
SDSTS = 0x03,
SDLPIB = 0x04,
SDCBL = 0x08,
SDLVI = 0x0C,
SDFIFOD = 0x10,
SDFMT = 0x12,
SDBDPL = 0x18,
SDBDPU = 0x1C,
};
}

View File

@@ -44,7 +44,7 @@ namespace Kernel
struct BootModule
{
paddr_t start;
uint64_t size;
size_t size;
};
struct BootInfo

View File

@@ -81,6 +81,5 @@ namespace CPUID
bool has_pge();
bool has_pat();
bool has_1gib_pages();
bool has_invariant_tsc();
}

View File

@@ -35,8 +35,10 @@ namespace Kernel
bool has_egid(gid_t) const;
BAN::Span<const gid_t> groups() const { return m_supplementary.span(); }
BAN::ErrorOr<void> set_groups(BAN::Span<const gid_t> groups);
BAN::ErrorOr<void> initialize_supplementary_groups();
private:
BAN::ErrorOr<BAN::String> find_username() const;
private:
uid_t m_ruid, m_euid, m_suid;

View File

@@ -70,15 +70,10 @@
#define DEBUG_USB_MOUSE 0
#define DEBUG_USB_MASS_STORAGE 0
#define DEBUG_HDAUDIO 0
namespace Debug
{
void dump_stack_trace();
void dump_stack_trace(uintptr_t ip, uintptr_t bp);
void dump_qr_code();
void putchar(char);
void print_prefix(const char*, int);

View File

@@ -16,11 +16,7 @@ namespace Kernel
virtual bool is_partition() const { return false; }
virtual bool is_storage_device() const { return false; }
virtual BAN::ErrorOr<BAN::UniqPtr<MemoryRegion>> mmap_region(PageTable&, off_t offset, size_t len, AddressRange, MemoryRegion::Type, PageTable::flags_t, int status_flags)
{
(void)offset; (void)len; (void)status_flags;
return BAN::Error::from_errno(ENOTSUP);
}
virtual BAN::ErrorOr<BAN::UniqPtr<MemoryRegion>> mmap_region(PageTable&, off_t offset, size_t len, AddressRange, MemoryRegion::Type, PageTable::flags_t) { (void)offset; (void)len; return BAN::Error::from_errno(EINVAL); }
virtual dev_t rdev() const override = 0;

View File

@@ -16,7 +16,6 @@ namespace Kernel
Debug,
Keyboard,
Mouse,
Joystick,
SCSI,
NVMeController,
NVMeNamespace,

View File

@@ -10,7 +10,6 @@ namespace Kernel
{
public:
static BAN::ErrorOr<BAN::RefPtr<FramebufferDevice>> create_from_boot_framebuffer();
static BAN::RefPtr<FramebufferDevice> boot_framebuffer();
~FramebufferDevice();
uint32_t width() const { return m_width; }
@@ -28,7 +27,7 @@ namespace Kernel
void sync_pixels_linear(uint32_t first_pixel, uint32_t pixel_count);
void sync_pixels_rectangle(uint32_t top_right_x, uint32_t top_right_y, uint32_t width, uint32_t height);
virtual BAN::ErrorOr<BAN::UniqPtr<MemoryRegion>> mmap_region(PageTable&, off_t offset, size_t len, AddressRange, MemoryRegion::Type, PageTable::flags_t, int status_flags) override;
virtual BAN::ErrorOr<BAN::UniqPtr<MemoryRegion>> mmap_region(PageTable&, off_t offset, size_t len, AddressRange, MemoryRegion::Type, PageTable::flags_t) override;
virtual dev_t rdev() const override { return m_rdev; }
virtual BAN::StringView name() const override { return m_name.sv(); }
@@ -37,8 +36,6 @@ namespace Kernel
virtual BAN::ErrorOr<size_t> read_impl(off_t, BAN::ByteSpan) override;
virtual BAN::ErrorOr<size_t> write_impl(off_t, BAN::ConstByteSpan) override;
BAN::ErrorOr<long> ioctl_impl(int cmd, void* arg) override;
virtual bool can_read_impl() const override { return true; }
virtual bool can_write_impl() const override { return true; }
virtual bool has_error_impl() const override { return false; }

View File

@@ -20,6 +20,6 @@ namespace Kernel::ELF
BAN::Vector<BAN::UniqPtr<MemoryRegion>> regions;
};
BAN::ErrorOr<LoadResult> load_from_inode(BAN::RefPtr<Inode> root, BAN::RefPtr<Inode> inode, const Credentials&, PageTable&);
BAN::ErrorOr<LoadResult> load_from_inode(BAN::RefPtr<Inode>, const Credentials&, PageTable&);
}

View File

@@ -1,8 +1,12 @@
#pragma once
#include <BAN/Array.h>
#include <BAN/CircularQueue.h>
#include <BAN/HashMap.h>
#include <BAN/HashSet.h>
#include <kernel/FS/Inode.h>
#include <limits.h>
#include <sys/epoll.h>
namespace Kernel
@@ -49,52 +53,57 @@ namespace Kernel
BAN::ErrorOr<void> fsync_impl() override { return {}; }
private:
struct InodeRefPtrHash
{
BAN::hash_t operator()(const BAN::RefPtr<Inode>& inode)
{
return BAN::hash<const Inode*>()(inode.ptr());
}
};
struct ListenEventList
{
ListenEventList() = default;
ListenEventList(const ListenEventList&) = delete;
ListenEventList& operator=(const ListenEventList&) = delete;
ListenEventList(ListenEventList&& other)
: events(BAN::move(other.events))
{}
ListenEventList& operator=(ListenEventList&& other)
{
events = BAN::move(other.events);
return *this;
}
BAN::HashMap<int, epoll_event> events;
BAN::Array<epoll_event, OPEN_MAX> events;
uint32_t bitmap[(OPEN_MAX + 31) / 32] {};
bool has_fd(int fd) const
{
return events.contains(fd);
// For some reason having (fd < 0 || ...) makes GCC 15.1.0
// think bitmap access can be out of bounds...
if (static_cast<size_t>(fd) >= events.size())
return false;
return bitmap[fd / 32] & (1u << (fd % 32));
}
bool empty() const
{
return events.empty();
for (auto val : bitmap)
if (val != 0)
return false;
return true;
}
BAN::ErrorOr<void> add_fd(int fd, epoll_event event)
void add_fd(int fd, epoll_event event)
{
TRY(events.insert(fd, event));
return {};
ASSERT(!has_fd(fd));
bitmap[fd / 32] |= (1u << (fd % 32));
events[fd] = event;
}
void remove_fd(int fd)
{
events.remove(fd);
ASSERT(has_fd(fd));
bitmap[fd / 32] &= ~(1u << (fd % 32));
events[fd] = {};
}
};
private:
ThreadBlocker m_thread_blocker;
SpinLock m_ready_lock;
BAN::HashMap<BAN::RefPtr<Inode>, uint32_t> m_ready_events;
BAN::HashMap<BAN::RefPtr<Inode>, uint32_t> m_processing_events;
BAN::HashMap<BAN::RefPtr<Inode>, ListenEventList> m_listening_events;
BAN::HashMap<BAN::RefPtr<Inode>, uint32_t, InodeRefPtrHash> m_ready_events;
BAN::HashMap<BAN::RefPtr<Inode>, uint32_t, InodeRefPtrHash> m_processing_events;
BAN::HashMap<BAN::RefPtr<Inode>, ListenEventList, InodeRefPtrHash> m_listening_events;
};
}

View File

@@ -22,7 +22,6 @@ namespace Kernel
void add_inode(BAN::StringView path, BAN::RefPtr<TmpInode>);
void initiate_disk_cache_drop();
void initiate_sync(bool should_block);
private:
@@ -38,10 +37,6 @@ namespace Kernel
ThreadBlocker m_sync_done;
ThreadBlocker m_sync_thread_blocker;
volatile bool m_should_sync { false };
SpinLock m_disk_cache_lock;
ThreadBlocker m_disk_cache_thread_blocker;
BAN::Atomic<bool> m_should_drop_disk_cache { false };
};
}

View File

@@ -1,52 +0,0 @@
#pragma once
#include <kernel/FS/Inode.h>
namespace Kernel
{
class EventFD final : public Inode
{
public:
static BAN::ErrorOr<BAN::RefPtr<Inode>> create(uint64_t initval, bool semaphore);
ino_t ino() const override { return 0; }
Mode mode() const override { return { Mode::IFCHR | Mode::IRUSR | Mode::IWUSR }; }
nlink_t nlink() const override { return ref_count(); }
uid_t uid() const override { return 0; }
gid_t gid() const override { return 0; }
off_t size() const override { return 0; }
timespec atime() const override { return {}; }
timespec mtime() const override { return {}; }
timespec ctime() const override { return {}; }
blksize_t blksize() const override { return 8; }
blkcnt_t blocks() const override { return 0; }
dev_t dev() const override { return 0; }
dev_t rdev() const override { return 0; }
const FileSystem* filesystem() const override { return nullptr; }
protected:
BAN::ErrorOr<size_t> read_impl(off_t, BAN::ByteSpan) override;
BAN::ErrorOr<size_t> write_impl(off_t, BAN::ConstByteSpan) override;
BAN::ErrorOr<void> fsync_impl() final override { return {}; }
bool can_read_impl() const override { return m_value > 0; }
bool can_write_impl() const override { return m_value < UINT64_MAX - 1; }
bool has_error_impl() const override { return false; }
bool has_hungup_impl() const override { return false; }
private:
EventFD(uint64_t initval, bool is_semaphore)
: m_is_semaphore(is_semaphore)
, m_value(initval)
{ }
private:
const bool m_is_semaphore;
uint64_t m_value;
ThreadBlocker m_thread_blocker;
};
}

View File

@@ -26,32 +26,18 @@ namespace Kernel
class BlockBufferWrapper
{
BAN_NON_COPYABLE(BlockBufferWrapper);
BAN_NON_MOVABLE(BlockBufferWrapper);
public:
BlockBufferWrapper(BAN::Span<uint8_t> buffer, void (*callback)(void*, const uint8_t*), void* argument)
BlockBufferWrapper(BAN::Span<uint8_t> buffer, bool& used)
: m_buffer(buffer)
, m_callback(callback)
, m_argument(argument)
{ }
BlockBufferWrapper(BlockBufferWrapper&& other) { *this = BAN::move(other); }
, m_used(used)
{
ASSERT(m_used);
}
~BlockBufferWrapper()
{
if (m_callback == nullptr)
return;
m_callback(m_argument, m_buffer.data());
}
BlockBufferWrapper& operator=(BlockBufferWrapper&& other)
{
this->m_buffer = other.m_buffer;
this->m_callback = other.m_callback;
this->m_argument = other.m_argument;
other.m_buffer = {};
other.m_callback = nullptr;
other.m_argument = nullptr;
return *this;
m_used = false;
}
size_t size() const { return m_buffer.size(); }
@@ -67,8 +53,7 @@ namespace Kernel
private:
BAN::Span<uint8_t> m_buffer;
void (*m_callback)(void*, const uint8_t*);
void* m_argument;
bool& m_used;
};
public:
@@ -94,7 +79,7 @@ namespace Kernel
BAN::ErrorOr<void> sync_superblock();
BAN::ErrorOr<void> sync_block(uint32_t block);
BAN::ErrorOr<BlockBufferWrapper> get_block_buffer();
BlockBufferWrapper get_block_buffer();
BAN::ErrorOr<uint32_t> reserve_free_block(uint32_t primary_bgd);
BAN::ErrorOr<void> release_block(uint32_t block);
@@ -117,13 +102,10 @@ namespace Kernel
{
public:
BlockBufferManager() = default;
BAN::ErrorOr<BlockBufferWrapper> get_buffer();
BlockBufferWrapper get_buffer();
BAN::ErrorOr<void> initialize(size_t block_size);
private:
void destroy_callback(const uint8_t* buffer_ptr);
private:
struct BlockBuffer
{
@@ -131,20 +113,8 @@ namespace Kernel
bool used { false };
};
struct ThreadInfo
{
pid_t tid { 0 };
size_t buffers { 0 };
};
private:
static constexpr size_t max_threads = 8;
static constexpr size_t max_buffers_per_thread = 6;
Mutex m_buffer_mutex;
ThreadBlocker m_buffer_blocker;
BAN::Array<BlockBuffer, max_threads * max_buffers_per_thread> m_buffers;
BAN::Array<ThreadInfo, max_threads> m_thread_infos;
BAN::Array<BlockBuffer, 10> m_buffers;
};
private:

View File

@@ -37,7 +37,6 @@ namespace Kernel
virtual BAN::ErrorOr<void> create_file_impl(BAN::StringView, mode_t, uid_t, gid_t) override;
virtual BAN::ErrorOr<void> create_directory_impl(BAN::StringView, mode_t, uid_t, gid_t) override;
virtual BAN::ErrorOr<void> link_inode_impl(BAN::StringView, BAN::RefPtr<Inode>) override;
virtual BAN::ErrorOr<void> rename_inode_impl(BAN::RefPtr<Inode>, BAN::StringView, BAN::StringView) override;
virtual BAN::ErrorOr<void> unlink_impl(BAN::StringView) override;
virtual BAN::ErrorOr<BAN::String> link_target_impl() override;
@@ -61,18 +60,18 @@ namespace Kernel
// NOTE: the inode might have more blocks than what this suggests if it has been shrinked
uint32_t max_used_data_block_count() const { return size() / blksize(); }
BAN::ErrorOr<BAN::Optional<uint32_t>> block_from_indirect_block(uint32_t& block, uint32_t index, uint32_t depth, bool allocate);
BAN::ErrorOr<BAN::Optional<uint32_t>> fs_block_of_data_block_index(uint32_t data_block_index, bool allocate);
BAN::ErrorOr<BAN::Optional<uint32_t>> block_from_indirect_block(uint32_t block, uint32_t index, uint32_t depth);
BAN::ErrorOr<BAN::Optional<uint32_t>> fs_block_of_data_block_index(uint32_t data_block_index);
BAN::ErrorOr<void> link_inode_to_directory(Ext2Inode&, BAN::StringView name);
BAN::ErrorOr<void> remove_inode_from_directory(BAN::StringView name, bool cleanup_directory);
BAN::ErrorOr<bool> is_directory_empty();
BAN::ErrorOr<void> cleanup_indirect_block(uint32_t block, uint32_t depth);
BAN::ErrorOr<void> cleanup_default_links();
BAN::ErrorOr<void> cleanup_data_blocks();
BAN::ErrorOr<void> cleanup_from_fs();
BAN::ErrorOr<uint32_t> allocate_new_block_to_indirect_block(uint32_t& block, uint32_t index, uint32_t depth);
BAN::ErrorOr<uint32_t> allocate_new_block(uint32_t data_block_index);
BAN::ErrorOr<void> sync();
uint32_t block_group() const;
@@ -85,26 +84,6 @@ namespace Kernel
{}
static BAN::ErrorOr<BAN::RefPtr<Ext2Inode>> create(Ext2FS&, uint32_t);
private:
struct ScopedSync
{
ScopedSync(Ext2Inode& inode)
: inode(inode)
, inode_info(inode.m_inode)
{ }
~ScopedSync()
{
if (memcmp(&inode.m_inode, &inode_info, sizeof(Ext2::Inode)) == 0)
return;
if (auto ret = inode.sync(); ret.is_error())
dwarnln("failed to sync inode: {}", ret.error());
}
Ext2Inode& inode;
Ext2::Inode inode_info;
};
private:
Ext2FS& m_fs;
Ext2::Inode m_inode;

View File

@@ -22,7 +22,7 @@ namespace Kernel
class FileBackedRegion;
class FileSystem;
struct SharedFileData;
class SharedFileData;
class Inode : public BAN::RefCounted<Inode>
{
@@ -97,7 +97,6 @@ namespace Kernel
BAN::ErrorOr<void> create_file(BAN::StringView, mode_t, uid_t, gid_t);
BAN::ErrorOr<void> create_directory(BAN::StringView, mode_t, uid_t, gid_t);
BAN::ErrorOr<void> link_inode(BAN::StringView, BAN::RefPtr<Inode>);
BAN::ErrorOr<void> rename_inode(BAN::RefPtr<Inode>, BAN::StringView, BAN::StringView);
BAN::ErrorOr<void> unlink(BAN::StringView);
// Link API
@@ -109,12 +108,10 @@ namespace Kernel
BAN::ErrorOr<void> bind(const sockaddr* address, socklen_t address_len);
BAN::ErrorOr<void> connect(const sockaddr* address, socklen_t address_len);
BAN::ErrorOr<void> listen(int backlog);
BAN::ErrorOr<size_t> sendmsg(const msghdr& message, int flags);
BAN::ErrorOr<size_t> recvmsg(msghdr& message, int flags);
BAN::ErrorOr<size_t> sendto(BAN::ConstByteSpan message, const sockaddr* address, socklen_t address_len);
BAN::ErrorOr<size_t> recvfrom(BAN::ByteSpan buffer, sockaddr* address, socklen_t* address_len);
BAN::ErrorOr<void> getsockname(sockaddr* address, socklen_t* address_len);
BAN::ErrorOr<void> getpeername(sockaddr* address, socklen_t* address_len);
BAN::ErrorOr<void> getsockopt(int level, int option, void* value, socklen_t* value_len);
BAN::ErrorOr<void> setsockopt(int level, int option, const void* value, socklen_t value_len);
// General API
BAN::ErrorOr<size_t> read(off_t, BAN::ByteSpan buffer);
@@ -142,13 +139,12 @@ namespace Kernel
protected:
// Directory API
virtual BAN::ErrorOr<BAN::RefPtr<Inode>> find_inode_impl(BAN::StringView) { return BAN::Error::from_errno(ENOTSUP); }
virtual BAN::ErrorOr<size_t> list_next_inodes_impl(off_t, struct dirent*, size_t) { return BAN::Error::from_errno(ENOTSUP); }
virtual BAN::ErrorOr<void> create_file_impl(BAN::StringView, mode_t, uid_t, gid_t) { return BAN::Error::from_errno(ENOTSUP); }
virtual BAN::ErrorOr<void> create_directory_impl(BAN::StringView, mode_t, uid_t, gid_t) { return BAN::Error::from_errno(ENOTSUP); }
virtual BAN::ErrorOr<void> link_inode_impl(BAN::StringView, BAN::RefPtr<Inode>) { return BAN::Error::from_errno(ENOTSUP); }
virtual BAN::ErrorOr<void> rename_inode_impl(BAN::RefPtr<Inode>, BAN::StringView, BAN::StringView) { return BAN::Error::from_errno(ENOTSUP); }
virtual BAN::ErrorOr<void> unlink_impl(BAN::StringView) { return BAN::Error::from_errno(ENOTSUP); }
virtual BAN::ErrorOr<BAN::RefPtr<Inode>> find_inode_impl(BAN::StringView) { return BAN::Error::from_errno(ENOTSUP); }
virtual BAN::ErrorOr<size_t> list_next_inodes_impl(off_t, struct dirent*, size_t) { return BAN::Error::from_errno(ENOTSUP); }
virtual BAN::ErrorOr<void> create_file_impl(BAN::StringView, mode_t, uid_t, gid_t) { return BAN::Error::from_errno(ENOTSUP); }
virtual BAN::ErrorOr<void> create_directory_impl(BAN::StringView, mode_t, uid_t, gid_t) { return BAN::Error::from_errno(ENOTSUP); }
virtual BAN::ErrorOr<void> link_inode_impl(BAN::StringView, BAN::RefPtr<Inode>) { return BAN::Error::from_errno(ENOTSUP); }
virtual BAN::ErrorOr<void> unlink_impl(BAN::StringView) { return BAN::Error::from_errno(ENOTSUP); }
// Link API
virtual BAN::ErrorOr<BAN::String> link_target_impl() { return BAN::Error::from_errno(ENOTSUP); }
@@ -159,12 +155,10 @@ namespace Kernel
virtual BAN::ErrorOr<void> connect_impl(const sockaddr*, socklen_t) { return BAN::Error::from_errno(ENOTSUP); }
virtual BAN::ErrorOr<void> listen_impl(int) { return BAN::Error::from_errno(ENOTSUP); }
virtual BAN::ErrorOr<void> bind_impl(const sockaddr*, socklen_t) { return BAN::Error::from_errno(ENOTSUP); }
virtual BAN::ErrorOr<size_t> recvmsg_impl(msghdr&, int) { return BAN::Error::from_errno(ENOTSUP); }
virtual BAN::ErrorOr<size_t> sendmsg_impl(const msghdr&, int) { return BAN::Error::from_errno(ENOTSUP); }
virtual BAN::ErrorOr<size_t> sendto_impl(BAN::ConstByteSpan, const sockaddr*, socklen_t) { return BAN::Error::from_errno(ENOTSUP); }
virtual BAN::ErrorOr<size_t> recvfrom_impl(BAN::ByteSpan, sockaddr*, socklen_t*) { return BAN::Error::from_errno(ENOTSUP); }
virtual BAN::ErrorOr<void> getsockname_impl(sockaddr*, socklen_t*) { return BAN::Error::from_errno(ENOTSUP); }
virtual BAN::ErrorOr<void> getpeername_impl(sockaddr*, socklen_t*) { return BAN::Error::from_errno(ENOTSUP); }
virtual BAN::ErrorOr<void> getsockopt_impl(int, int, void*, socklen_t*) { return BAN::Error::from_errno(ENOTSUP); }
virtual BAN::ErrorOr<void> setsockopt_impl(int, int, const void*, socklen_t) { return BAN::Error::from_errno(ENOTSUP); }
// General API
virtual BAN::ErrorOr<size_t> read_impl(off_t, BAN::ByteSpan) { return BAN::Error::from_errno(ENOTSUP); }
@@ -193,7 +187,7 @@ namespace Kernel
friend class Epoll;
friend class FileBackedRegion;
friend class OpenFileDescriptorSet;
friend struct SharedFileData;
friend class SharedFileData;
friend class TTY;
};

View File

@@ -2,7 +2,6 @@
#include <BAN/Array.h>
#include <kernel/FS/Inode.h>
#include <kernel/Memory/ByteRingBuffer.h>
#include <kernel/ThreadBlocker.h>
namespace Kernel
@@ -39,7 +38,7 @@ namespace Kernel
virtual BAN::ErrorOr<size_t> write_impl(off_t, BAN::ConstByteSpan) override;
virtual BAN::ErrorOr<void> fsync_impl() final override { return {}; }
virtual bool can_read_impl() const override { return !m_buffer->empty(); }
virtual bool can_read_impl() const override { return m_buffer_size > 0; }
virtual bool can_write_impl() const override { return true; }
virtual bool has_error_impl() const override { return m_reading_count == 0; }
virtual bool has_hungup_impl() const override { return m_writing_count == 0; }
@@ -55,7 +54,9 @@ namespace Kernel
timespec m_ctime {};
ThreadBlocker m_thread_blocker;
BAN::UniqPtr<ByteRingBuffer> m_buffer;
BAN::Array<uint8_t, PAGE_SIZE> m_buffer;
BAN::Atomic<size_t> m_buffer_size { 0 };
size_t m_buffer_tail { 0 };
BAN::Atomic<uint32_t> m_writing_count { 1 };
BAN::Atomic<uint32_t> m_reading_count { 1 };

View File

@@ -57,31 +57,6 @@ namespace Kernel
size_t (Process::*m_callback)(off_t, BAN::ByteSpan) const;
};
class ProcSymlinkProcessInode final : public TmpInode
{
public:
static BAN::ErrorOr<BAN::RefPtr<ProcSymlinkProcessInode>> create_new(Process& process, BAN::ErrorOr<BAN::String> (Process::*callback)() const, TmpFileSystem&, mode_t);
~ProcSymlinkProcessInode() = default;
virtual uid_t uid() const override { return m_process.credentials().ruid(); }
virtual gid_t gid() const override { return m_process.credentials().rgid(); }
protected:
virtual BAN::ErrorOr<BAN::String> link_target_impl() override;
virtual bool can_read_impl() const override { return false; }
virtual bool can_write_impl() const override { return false; }
virtual bool has_error_impl() const override { return false; }
virtual bool has_hungup_impl() const override { return false; }
private:
ProcSymlinkProcessInode(Process& process, BAN::ErrorOr<BAN::String> (Process::*)() const, TmpFileSystem&, const TmpInodeInfo&);
private:
Process& m_process;
BAN::ErrorOr<BAN::String> (Process::*m_callback)() const;
};
class ProcROInode final : public TmpInode
{
public:
@@ -107,57 +82,4 @@ namespace Kernel
size_t (*m_callback)(off_t, BAN::ByteSpan);
};
class ProcSymlinkInode final : public TmpInode
{
public:
static BAN::ErrorOr<BAN::RefPtr<ProcSymlinkInode>> create_new(BAN::ErrorOr<BAN::String> (*)(void*), void (*)(void*), void* data, TmpFileSystem&, mode_t, uid_t, gid_t);
~ProcSymlinkInode();
protected:
virtual BAN::ErrorOr<BAN::String> link_target_impl() override;
virtual bool can_read_impl() const override { return false; }
virtual bool can_write_impl() const override { return false; }
virtual bool has_error_impl() const override { return false; }
virtual bool has_hungup_impl() const override { return false; }
private:
ProcSymlinkInode(BAN::ErrorOr<BAN::String> (*callback)(void*), void (*destructor)(void*), void* data, TmpFileSystem&, const TmpInodeInfo&);
private:
BAN::ErrorOr<BAN::String> (*m_callback)(void*);
void (*m_destructor)(void*);
void* m_data;
};
class ProcFDDirectoryInode final : public TmpInode
{
public:
static BAN::ErrorOr<BAN::RefPtr<ProcFDDirectoryInode>> create_new(Process&, TmpFileSystem&, mode_t);
~ProcFDDirectoryInode() = default;
virtual uid_t uid() const override { return m_process.credentials().ruid(); }
virtual gid_t gid() const override { return m_process.credentials().rgid(); }
protected:
virtual BAN::ErrorOr<BAN::RefPtr<Inode>> find_inode_impl(BAN::StringView) override;
virtual BAN::ErrorOr<size_t> list_next_inodes_impl(off_t, struct dirent*, size_t) override;
virtual BAN::ErrorOr<void> create_file_impl(BAN::StringView, mode_t, uid_t, gid_t) override { return BAN::Error::from_errno(EPERM); }
virtual BAN::ErrorOr<void> create_directory_impl(BAN::StringView, mode_t, uid_t, gid_t) override { return BAN::Error::from_errno(EPERM); }
virtual BAN::ErrorOr<void> link_inode_impl(BAN::StringView, BAN::RefPtr<Inode>) override { return BAN::Error::from_errno(EPERM); }
virtual BAN::ErrorOr<void> rename_inode_impl(BAN::RefPtr<Inode>, BAN::StringView, BAN::StringView) override { return BAN::Error::from_errno(EPERM); }
virtual BAN::ErrorOr<void> unlink_impl(BAN::StringView) override { return BAN::Error::from_errno(EPERM); }
virtual bool can_read_impl() const override { return false; }
virtual bool can_write_impl() const override { return false; }
virtual bool has_error_impl() const override { return false; }
virtual bool has_hungup_impl() const override { return false; }
private:
ProcFDDirectoryInode(Process&, TmpFileSystem&, const TmpInodeInfo&);
private:
Process& m_process;
};
}

View File

@@ -51,6 +51,8 @@ namespace Kernel
: m_info(info)
{}
BAN::ErrorOr<size_t> read_impl(off_t, BAN::ByteSpan buffer) override { return recvfrom_impl(buffer, nullptr, nullptr); }
BAN::ErrorOr<size_t> write_impl(off_t, BAN::ConstByteSpan buffer) override { return sendto_impl(buffer, nullptr, 0); }
BAN::ErrorOr<void> fsync_impl() final override { return {}; }
private:

View File

@@ -28,7 +28,7 @@ namespace Kernel
// 1x singly indirect
// 1x doubly indirect
// 1x triply indirect
BAN::Array<size_t, 5> block;
BAN::Array<paddr_t, 5> block;
static constexpr size_t direct_block_count = 2;
#elif ARCH(i686)
uint32_t __padding;
@@ -36,8 +36,8 @@ namespace Kernel
// 1x singly indirect
// 1x doubly indirect
// 1x triply indirect
BAN::Array<size_t, 16> block;
static constexpr size_t direct_block_count = 13;
BAN::Array<paddr_t, 8> block;
static constexpr size_t direct_block_count = 5;
#else
#error
#endif

View File

@@ -156,8 +156,6 @@ namespace Kernel
virtual BAN::ErrorOr<size_t> list_next_inodes_impl(off_t, struct dirent*, size_t) override final;
virtual BAN::ErrorOr<void> create_file_impl(BAN::StringView, mode_t, uid_t, gid_t) override final;
virtual BAN::ErrorOr<void> create_directory_impl(BAN::StringView, mode_t, uid_t, gid_t) override final;
virtual BAN::ErrorOr<void> link_inode_impl(BAN::StringView, BAN::RefPtr<Inode>) override final;
virtual BAN::ErrorOr<void> rename_inode_impl(BAN::RefPtr<Inode>, BAN::StringView, BAN::StringView) override final;
virtual BAN::ErrorOr<void> unlink_impl(BAN::StringView) override;
virtual bool can_read_impl() const override { return false; }
@@ -169,8 +167,6 @@ namespace Kernel
template<TmpFuncs::for_each_valid_entry_callback F>
void for_each_valid_entry(F callback);
BAN::ErrorOr<void> unlink_inode(BAN::StringView, bool cleanup);
friend class TmpInode;
};

View File

@@ -1,11 +1,12 @@
#pragma once
#include <kernel/BootInfo.h>
#include <kernel/FS/Inode.h>
#include <kernel/FS/FileSystem.h>
namespace Kernel
{
BAN::ErrorOr<bool> unpack_boot_module_into_directory(BAN::RefPtr<Inode>, const BootModule&);
bool is_ustar_boot_module(const BootModule&);
BAN::ErrorOr<void> unpack_boot_module_into_filesystem(BAN::RefPtr<FileSystem>, const BootModule&);
}

View File

@@ -71,13 +71,13 @@ namespace Kernel
File root_file()
{
return File { root_inode(), "/"_sv };
return File(root_inode(), "/"_sv);
}
BAN::ErrorOr<File> file_from_relative_path(BAN::RefPtr<Inode> root_inode, const File& parent, const Credentials&, BAN::StringView, int);
BAN::ErrorOr<File> file_from_absolute_path(BAN::RefPtr<Inode> root_inode, const Credentials& credentials, BAN::StringView path, int flags)
BAN::ErrorOr<File> file_from_relative_path(const File& parent, const Credentials&, BAN::StringView, int);
BAN::ErrorOr<File> file_from_absolute_path(const Credentials& credentials, BAN::StringView path, int flags)
{
return file_from_relative_path(root_inode, File { root_inode, "/"_sv }, credentials, path, flags);
return file_from_relative_path(root_file(), credentials, path, flags);
}
private:

View File

@@ -129,16 +129,9 @@ namespace Kernel
}
#if ARCH(i686)
void set_fsbase(uintptr_t addr);
void set_gsbase(uintptr_t addr);
void set_tls(uintptr_t addr);
#endif
static uint16_t cpu_index_offset() { return m_cpu_index_offset; }
void set_cpu_index(uint8_t index)
{
write_entry(m_cpu_index_offset, 0, index, 0xF2, 0x4);
}
private:
GDT() = default;
@@ -157,13 +150,11 @@ namespace Kernel
private:
#if ARCH(x86_64)
BAN::Array<SegmentDescriptor, 9> m_gdt; // null, kernel code, kernel data, user code (32 bit), user data, user code (64 bit), cpu-index, tss low, tss high
static constexpr uint16_t m_cpu_index_offset = 0x30;
static constexpr uint16_t m_tss_offset = 0x38;
BAN::Array<SegmentDescriptor, 7> m_gdt; // null, kernel code, kernel data, user code, user data, tss low, tss high
static constexpr uint16_t m_tss_offset = 0x28;
#elif ARCH(i686)
BAN::Array<SegmentDescriptor, 10> m_gdt; // null, kernel code, kernel data, user code, user data, processor data, fsbase, gsbase, cpu-index, tss
static constexpr uint16_t m_cpu_index_offset = 0x40;
static constexpr uint16_t m_tss_offset = 0x48;
BAN::Array<SegmentDescriptor, 8> m_gdt; // null, kernel code, kernel data, user code, user data, processor data, tls, tss
static constexpr uint16_t m_tss_offset = 0x38;
#endif
TaskStateSegment m_tss;
const GDTR m_gdtr {

View File

@@ -18,12 +18,10 @@ namespace Kernel
constexpr uint8_t IRQ_VECTOR_BASE = 0x20;
constexpr uint8_t IRQ_MSI_BASE = 0x80;
constexpr uint8_t IRQ_MSI_END = 0xF0;
#if ARCH(i686)
constexpr uint8_t IRQ_SYSCALL = 0xF0; // hard coded in kernel/API/Syscall.h
#endif
constexpr uint8_t IRQ_IPI = 0xF1;
constexpr uint8_t IRQ_TIMER = 0xF2;
constexpr uint8_t IRQ_SYSCALL = 0xF0;
constexpr uint8_t IRQ_YIELD = 0xF1;
constexpr uint8_t IRQ_IPI = 0xF2;
constexpr uint8_t IRQ_TIMER = 0xF3;
#if ARCH(x86_64)
struct GateDescriptor

View File

@@ -15,7 +15,6 @@ namespace Kernel
{
Mouse,
Keyboard,
Joystick,
};
public:

View File

@@ -14,15 +14,6 @@ namespace Kernel::Input
class PS2Controller
{
public:
enum class DeviceType
{
None,
Unknown,
Keyboard,
Mouse,
};
public:
static BAN::ErrorOr<void> initialize(uint8_t scancode_set);
static PS2Controller& get();
@@ -33,12 +24,10 @@ namespace Kernel::Input
// Returns true, if byte is used as command, if returns false, byte is meant to device
bool handle_command_byte(PS2Device*, uint8_t);
uint8_t data_port() const { return m_data_port; }
private:
PS2Controller() = default;
BAN::ErrorOr<void> initialize_impl(uint8_t scancode_set);
BAN::ErrorOr<DeviceType> identify_device(uint8_t);
BAN::ErrorOr<void> identify_device(uint8_t, uint8_t scancode_set);
void device_initialize_task(void*);
@@ -72,9 +61,6 @@ namespace Kernel::Input
};
private:
uint16_t m_command_port { PS2::IOPort::COMMAND };
uint16_t m_data_port { PS2::IOPort::DATA };
BAN::RefPtr<PS2Device> m_devices[2];
Mutex m_mutex;

View File

@@ -27,6 +27,7 @@ namespace Kernel
uintptr_t r10;
uintptr_t r9;
uintptr_t r8;
uintptr_t rdi;
uintptr_t rsi;
uintptr_t rbp;
@@ -35,18 +36,6 @@ namespace Kernel
uintptr_t rcx;
uintptr_t rax;
};
struct YieldRegisters
{
uintptr_t r15;
uintptr_t r14;
uintptr_t r13;
uintptr_t r12;
uintptr_t rbp;
uintptr_t rbx;
uintptr_t ret;
uintptr_t sp;
uintptr_t ip;
};
#elif ARCH(i686)
struct InterruptRegisters
{
@@ -59,16 +48,6 @@ namespace Kernel
uintptr_t ecx;
uintptr_t eax;
};
struct YieldRegisters
{
uintptr_t ebp;
uintptr_t edi;
uintptr_t esi;
uintptr_t ebx;
uintptr_t ret;
uintptr_t sp;
uintptr_t ip;
};
#endif
}

View File

@@ -1,97 +0,0 @@
#pragma once
#include <kernel/Lock/Mutex.h>
#include <kernel/Lock/LockGuard.h>
namespace Kernel
{
class RWLock
{
BAN_NON_COPYABLE(RWLock);
BAN_NON_MOVABLE(RWLock);
public:
RWLock() = default;
void rd_lock()
{
LockGuard _(m_mutex);
while (m_writers_waiting > 0 || m_writer_active)
m_thread_blocker.block_indefinite(&m_mutex);
m_readers_active++;
}
void rd_unlock()
{
LockGuard _(m_mutex);
if (--m_readers_active == 0)
m_thread_blocker.unblock();
}
void wr_lock()
{
LockGuard _(m_mutex);
m_writers_waiting++;
while (m_readers_active > 0 || m_writer_active)
m_thread_blocker.block_indefinite(&m_mutex);
m_writers_waiting--;
m_writer_active = true;
}
void wr_unlock()
{
LockGuard _(m_mutex);
m_writer_active = false;
m_thread_blocker.unblock();
}
private:
Mutex m_mutex;
ThreadBlocker m_thread_blocker;
uint32_t m_readers_active { 0 };
uint32_t m_writers_waiting { 0 };
bool m_writer_active { false };
};
class RWLockRDGuard
{
BAN_NON_COPYABLE(RWLockRDGuard);
BAN_NON_MOVABLE(RWLockRDGuard);
public:
RWLockRDGuard(RWLock& lock)
: m_lock(lock)
{
m_lock.rd_lock();
}
~RWLockRDGuard()
{
m_lock.rd_unlock();
}
private:
RWLock& m_lock;
};
class RWLockWRGuard
{
BAN_NON_COPYABLE(RWLockWRGuard);
BAN_NON_MOVABLE(RWLockWRGuard);
public:
RWLockWRGuard(RWLock& lock)
: m_lock(lock)
{
m_lock.wr_lock();
}
~RWLockWRGuard()
{
m_lock.wr_unlock();
}
private:
RWLock& m_lock;
};
}

View File

@@ -1,76 +0,0 @@
#pragma once
#include <BAN/ByteSpan.h>
#include <BAN/UniqPtr.h>
#include <BAN/Vector.h>
#include <kernel/Memory/Types.h>
namespace Kernel
{
class ByteRingBuffer
{
public:
static BAN::ErrorOr<BAN::UniqPtr<ByteRingBuffer>> create(size_t size);
~ByteRingBuffer();
void push(BAN::ConstByteSpan data)
{
ASSERT(data.size() + m_size <= m_capacity);
uint8_t* buffer_head = reinterpret_cast<uint8_t*>(m_vaddr) + (m_tail + m_size) % m_capacity;
memcpy(buffer_head, data.data(), data.size());
m_size += data.size();
}
void pop(size_t size)
{
ASSERT(size <= m_size);
m_tail = (m_tail + size) % m_capacity;
m_size -= size;
}
void pop_back(size_t size)
{
ASSERT(size <= m_size);
m_size -= size;
}
BAN::ConstByteSpan get_data() const
{
const uint8_t* base = reinterpret_cast<const uint8_t*>(m_vaddr);
return { base + m_tail, m_size };
}
uint8_t front() const
{
ASSERT(!empty());
return reinterpret_cast<const uint8_t*>(m_vaddr)[m_tail];
}
uint8_t back() const
{
ASSERT(!empty());
return reinterpret_cast<const uint8_t*>(m_vaddr)[m_tail + m_size - 1];
}
bool empty() const { return m_size == 0; }
bool full() const { return m_size == m_capacity; }
size_t free() const { return m_capacity - m_size; }
size_t size() const { return m_size; }
size_t capacity() const { return m_capacity; }
private:
ByteRingBuffer(size_t capacity)
: m_capacity(capacity)
{ }
private:
size_t m_size { 0 };
size_t m_tail { 0 };
const size_t m_capacity;
vaddr_t m_vaddr { 0 };
};
}

View File

@@ -8,7 +8,7 @@ namespace Kernel
class DMARegion
{
public:
static BAN::ErrorOr<BAN::UniqPtr<DMARegion>> create(size_t size, PageTable::MemoryType type = PageTable::MemoryType::Uncached);
static BAN::ErrorOr<BAN::UniqPtr<DMARegion>> create(size_t size);
~DMARegion();
size_t size() const { return m_size; }

View File

@@ -27,19 +27,18 @@ namespace Kernel
BAN_NON_MOVABLE(FileBackedRegion);
public:
static BAN::ErrorOr<BAN::UniqPtr<FileBackedRegion>> create(BAN::RefPtr<Inode>, PageTable&, off_t offset, size_t size, AddressRange address_range, Type, PageTable::flags_t, int status_flags);
static BAN::ErrorOr<BAN::UniqPtr<FileBackedRegion>> create(BAN::RefPtr<Inode>, PageTable&, off_t offset, size_t size, AddressRange address_range, Type, PageTable::flags_t);
~FileBackedRegion();
BAN::ErrorOr<void> msync(vaddr_t, size_t, int) override;
virtual BAN::ErrorOr<void> msync(vaddr_t, size_t, int) override;
BAN::ErrorOr<BAN::UniqPtr<MemoryRegion>> clone(PageTable& new_page_table) override;
BAN::ErrorOr<BAN::UniqPtr<MemoryRegion>> split(size_t offset) override;
virtual BAN::ErrorOr<BAN::UniqPtr<MemoryRegion>> clone(PageTable& new_page_table) override;
protected:
BAN::ErrorOr<bool> allocate_page_containing_impl(vaddr_t vaddr, bool wants_write) override;
virtual BAN::ErrorOr<bool> allocate_page_containing_impl(vaddr_t vaddr, bool wants_write) override;
private:
FileBackedRegion(BAN::RefPtr<Inode>, PageTable&, off_t offset, ssize_t size, Type type, PageTable::flags_t flags, int status_flags);
FileBackedRegion(BAN::RefPtr<Inode>, PageTable&, off_t offset, ssize_t size, Type flags, PageTable::flags_t page_flags);
private:
BAN::RefPtr<Inode> m_inode;

View File

@@ -18,8 +18,6 @@ namespace Kernel
static void initialize();
static Heap& get();
void release_boot_modules();
paddr_t take_free_page();
void release_page(paddr_t);

View File

@@ -11,37 +11,22 @@ namespace Kernel
BAN_NON_MOVABLE(MemoryBackedRegion);
public:
static BAN::ErrorOr<BAN::UniqPtr<MemoryBackedRegion>> create(PageTable&, size_t size, AddressRange, Type, PageTable::flags_t, int status_flags);
static BAN::ErrorOr<BAN::UniqPtr<MemoryBackedRegion>> create(PageTable&, size_t size, AddressRange, Type, PageTable::flags_t);
~MemoryBackedRegion();
BAN::ErrorOr<BAN::UniqPtr<MemoryRegion>> clone(PageTable& new_page_table) override;
BAN::ErrorOr<BAN::UniqPtr<MemoryRegion>> split(size_t offset) override;
virtual BAN::ErrorOr<BAN::UniqPtr<MemoryRegion>> clone(PageTable& new_page_table) override;
BAN::ErrorOr<void> msync(vaddr_t, size_t, int) override { return {}; }
virtual BAN::ErrorOr<void> msync(vaddr_t, size_t, int) override { return {}; }
// Copy data from buffer into this region
// This can fail if no memory is mapped and no free memory was available
BAN::ErrorOr<void> copy_data_to_region(size_t offset_into_region, const uint8_t* buffer, size_t buffer_size);
protected:
BAN::ErrorOr<bool> allocate_page_containing_impl(vaddr_t vaddr, bool wants_write) override;
virtual BAN::ErrorOr<bool> allocate_page_containing_impl(vaddr_t vaddr, bool wants_write) override;
private:
MemoryBackedRegion(PageTable&, size_t size, Type, PageTable::flags_t, int status_flags);
private:
struct PhysicalPage
{
PhysicalPage(paddr_t paddr)
: paddr(paddr)
{ }
~PhysicalPage();
BAN::Atomic<uint32_t> ref_count { 1 };
const paddr_t paddr;
};
BAN::Vector<PhysicalPage*> m_physical_pages;
Mutex m_mutex;
MemoryBackedRegion(PageTable&, size_t size, Type, PageTable::flags_t);
};
}

View File

@@ -9,6 +9,12 @@
namespace Kernel
{
struct AddressRange
{
vaddr_t start;
vaddr_t end;
};
class MemoryRegion
{
BAN_NON_COPYABLE(MemoryRegion);
@@ -27,17 +33,12 @@ namespace Kernel
bool contains(vaddr_t address) const;
bool contains_fully(vaddr_t address, size_t size) const;
bool overlaps(vaddr_t address, size_t size) const;
bool is_contained_by(vaddr_t address, size_t size) const;
bool writable() const { return m_flags & PageTable::Flags::ReadWrite; }
size_t size() const { return m_size; }
vaddr_t vaddr() const { return m_vaddr; }
int status_flags() const { return m_status_flags; }
Type type() const { return m_type; }
PageTable::flags_t flags() const { return m_flags; }
size_t virtual_page_count() const { return BAN::Math::div_round_up<size_t>(m_size, PAGE_SIZE); }
size_t physical_page_count() const { return m_physical_page_count; }
@@ -45,7 +46,6 @@ namespace Kernel
void unpin();
void wait_not_pinned();
BAN::ErrorOr<void> mprotect(PageTable::flags_t);
virtual BAN::ErrorOr<void> msync(vaddr_t, size_t, int) = 0;
// Returns error if no memory was available
@@ -54,20 +54,18 @@ namespace Kernel
BAN::ErrorOr<bool> allocate_page_containing(vaddr_t address, bool wants_write);
virtual BAN::ErrorOr<BAN::UniqPtr<MemoryRegion>> clone(PageTable& new_page_table) = 0;
virtual BAN::ErrorOr<BAN::UniqPtr<MemoryRegion>> split(size_t offset) = 0;
protected:
MemoryRegion(PageTable&, size_t size, Type type, PageTable::flags_t flags, int status_flags);
MemoryRegion(PageTable&, size_t size, Type type, PageTable::flags_t flags);
BAN::ErrorOr<void> initialize(AddressRange);
virtual BAN::ErrorOr<bool> allocate_page_containing_impl(vaddr_t address, bool wants_write) = 0;
protected:
PageTable& m_page_table;
size_t m_size { 0 };
const size_t m_size;
const Type m_type;
PageTable::flags_t m_flags;
const int m_status_flags;
const PageTable::flags_t m_flags;
vaddr_t m_vaddr { 0 };
size_t m_physical_page_count { 0 };

View File

@@ -46,22 +46,13 @@ namespace Kernel
};
public:
static void initialize_fast_page();
static void initialize_and_load();
static void enable_cpu_features();
static void initialize_pre_heap();
static void initialize_post_heap();
static PageTable& kernel();
static PageTable& current() { return *reinterpret_cast<PageTable*>(Processor::get_current_page_table()); }
static constexpr vaddr_t fast_page()
{
#if ARCH(x86_64)
return 0xffffffffbfe00000;
#elif ARCH(i686)
return 0xffe00000;
#endif
}
static constexpr vaddr_t fast_page() { return KERNEL_OFFSET; }
template<with_fast_page_callback F>
static void with_fast_page(paddr_t paddr, F callback)
@@ -109,42 +100,40 @@ namespace Kernel
static BAN::ErrorOr<PageTable*> create_userspace();
~PageTable();
void unmap_page(vaddr_t, bool invalidate = true);
void unmap_page(vaddr_t, bool send_smp_message = true);
void unmap_range(vaddr_t, size_t bytes);
void map_page_at(paddr_t, vaddr_t, flags_t, MemoryType = MemoryType::Normal, bool invalidate = true);
void map_page_at(paddr_t, vaddr_t, flags_t, MemoryType = MemoryType::Normal, bool send_smp_message = true);
void map_range_at(paddr_t, vaddr_t, size_t bytes, flags_t, MemoryType = MemoryType::Normal);
void remove_writable_from_range(vaddr_t, size_t);
paddr_t physical_address_of(vaddr_t) const;
flags_t get_page_flags(vaddr_t) const;
bool is_page_free(vaddr_t) const;
bool is_range_free(vaddr_t, size_t bytes) const;
bool reserve_page(vaddr_t, bool only_free = true, bool invalidate = true);
bool reserve_page(vaddr_t, bool only_free = true);
bool reserve_range(vaddr_t, size_t bytes, bool only_free = true);
vaddr_t reserve_free_page(vaddr_t first_address, vaddr_t last_address = UINTPTR_MAX);
vaddr_t reserve_free_contiguous_pages(size_t page_count, vaddr_t first_address, vaddr_t last_address = UINTPTR_MAX);
void load();
void invalidate_page(vaddr_t addr, bool send_smp_message) { invalidate_range(addr, 1, send_smp_message); }
void invalidate_range(vaddr_t addr, size_t pages, bool send_smp_message);
void initial_load();
InterruptState lock() const { return m_lock.lock(); }
void unlock(InterruptState state) const { m_lock.unlock(state); }
paddr_t paddr() const { return m_highest_paging_struct; }
void debug_dump();
private:
PageTable() = default;
uint64_t get_page_data(vaddr_t) const;
void initialize_kernel();
void map_kernel_memory();
void prepare_fast_page();
static void invalidate(vaddr_t, bool send_smp_message);
static void map_fast_page(paddr_t);
static void unmap_fast_page();

View File

@@ -10,7 +10,7 @@ namespace Kernel
class PhysicalRange
{
public:
PhysicalRange(paddr_t, uint64_t);
PhysicalRange(paddr_t, size_t);
paddr_t reserve_page();
void release_page(paddr_t);

View File

@@ -6,8 +6,6 @@
#include <kernel/Lock/SpinLock.h>
#include <kernel/Memory/MemoryRegion.h>
#include <fcntl.h>
namespace Kernel
{
@@ -57,17 +55,15 @@ namespace Kernel
public:
static BAN::ErrorOr<BAN::UniqPtr<SharedMemoryObject>> create(BAN::RefPtr<SharedMemoryObjectManager::Object>, PageTable&, AddressRange);
BAN::ErrorOr<BAN::UniqPtr<MemoryRegion>> clone(PageTable& new_page_table) override;
BAN::ErrorOr<BAN::UniqPtr<MemoryRegion>> split(size_t offset) override;
BAN::ErrorOr<void> msync(vaddr_t, size_t, int) override { return {}; }
virtual BAN::ErrorOr<BAN::UniqPtr<MemoryRegion>> clone(PageTable& new_page_table) override;
virtual BAN::ErrorOr<void> msync(vaddr_t, size_t, int) override { return {}; }
protected:
BAN::ErrorOr<bool> allocate_page_containing_impl(vaddr_t vaddr, bool wants_write) override;
virtual BAN::ErrorOr<bool> allocate_page_containing_impl(vaddr_t vaddr, bool wants_write) override;
private:
SharedMemoryObject(BAN::RefPtr<SharedMemoryObjectManager::Object> object, PageTable& page_table)
: MemoryRegion(page_table, object->size, MemoryRegion::Type::SHARED, object->flags, O_EXEC | O_RDWR)
: MemoryRegion(page_table, object->size, MemoryRegion::Type::SHARED, object->flags)
, m_object(object)
{ }

View File

@@ -4,7 +4,7 @@
#if ARCH(x86_64)
#define KERNEL_OFFSET 0xFFFFFFFF80000000
#define USERSPACE_END 0x800000000000
#define USERSPACE_END 0xFFFF800000000000
#elif ARCH(i686)
#define KERNEL_OFFSET 0xC0000000
#define USERSPACE_END 0xC0000000
@@ -23,10 +23,4 @@ namespace Kernel
using vaddr_t = uintptr_t;
using paddr_t = uint64_t;
struct AddressRange
{
vaddr_t start;
vaddr_t end;
};
}

View File

@@ -14,27 +14,44 @@ namespace Kernel
BAN_NON_MOVABLE(VirtualRange);
public:
static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_to_vaddr_range(PageTable&, AddressRange address_range, size_t, PageTable::flags_t flags, bool add_guard_pages);
// Create virtual range to fixed virtual address
static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_to_vaddr(PageTable&, vaddr_t, size_t, PageTable::flags_t flags, bool preallocate_pages, bool add_guard_pages);
// Create virtual range to virtual address range
static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_to_vaddr_range(PageTable&, vaddr_t vaddr_start, vaddr_t vaddr_end, size_t, PageTable::flags_t flags, bool preallocate_pages, bool add_guard_pages);
~VirtualRange();
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> clone(PageTable&);
vaddr_t vaddr() const { return m_vaddr + (m_has_guard_pages ? PAGE_SIZE : 0); }
size_t size() const { return m_size - (m_has_guard_pages ? 2 * PAGE_SIZE : 0); }
PageTable::flags_t flags() const { return m_flags; }
paddr_t paddr_of(vaddr_t vaddr) const { return m_page_table.physical_address_of(vaddr & PAGE_ADDR_MASK); }
paddr_t paddr_of(vaddr_t vaddr) const
{
ASSERT(vaddr % PAGE_SIZE == 0);
const size_t index = (vaddr - this->vaddr()) / PAGE_SIZE;
ASSERT(index < m_paddrs.size());
const paddr_t paddr = m_paddrs[index];
ASSERT(paddr);
return paddr;
}
bool contains(vaddr_t address) const { return vaddr() <= address && address < vaddr() + size(); }
BAN::ErrorOr<void> allocate_page_for_demand_paging(vaddr_t address);
private:
VirtualRange(PageTable&, bool has_guard_pages, vaddr_t, size_t, PageTable::flags_t);
VirtualRange(PageTable&, bool preallocated, bool has_guard_pages, vaddr_t, size_t, PageTable::flags_t);
BAN::ErrorOr<void> initialize();
private:
PageTable& m_page_table;
const bool m_preallocated;
const bool m_has_guard_pages;
const vaddr_t m_vaddr;
const size_t m_size;
const PageTable::flags_t m_flags;
BAN::Vector<paddr_t> m_paddrs;
SpinLock m_lock;
friend class BAN::UniqPtr<VirtualRange>;

View File

@@ -13,3 +13,4 @@ void* kmalloc(size_t size, size_t align, bool force_identity_map = false);
void kfree(void*);
BAN::Optional<Kernel::paddr_t> kmalloc_paddr_of(Kernel::vaddr_t);
BAN::Optional<Kernel::vaddr_t> kmalloc_vaddr_of(Kernel::paddr_t);

View File

@@ -31,18 +31,35 @@ namespace Kernel
public:
static BAN::ErrorOr<BAN::UniqPtr<ARPTable>> create();
~ARPTable();
BAN::ErrorOr<BAN::MACAddress> get_mac_from_ipv4(NetworkInterface&, BAN::IPv4Address);
BAN::ErrorOr<void> handle_arp_packet(NetworkInterface&, BAN::ConstByteSpan);
void add_arp_packet(NetworkInterface&, BAN::ConstByteSpan);
private:
ARPTable() = default;
ARPTable();
void packet_handle_task();
BAN::ErrorOr<void> handle_arp_packet(NetworkInterface&, const ARPPacket&);
private:
SpinLock m_arp_table_lock;
struct PendingArpPacket
{
NetworkInterface& interface;
ARPPacket packet;
};
private:
SpinLock m_table_lock;
SpinLock m_pending_lock;
BAN::HashMap<BAN::IPv4Address, BAN::MACAddress> m_arp_table;
Thread* m_thread { nullptr };
BAN::CircularQueue<PendingArpPacket, 128> m_pending_packets;
ThreadBlocker m_pending_thread_blocker;
friend class BAN::UniqPtr<ARPTable>;
};

View File

@@ -23,14 +23,14 @@ namespace Kernel
static BAN::ErrorOr<BAN::RefPtr<E1000>> create(PCI::Device&);
~E1000();
BAN::MACAddress get_mac_address() const override { return m_mac_address; }
virtual BAN::MACAddress get_mac_address() const override { return m_mac_address; }
bool link_up() override { return m_link_up; }
int link_speed() override;
virtual bool link_up() override { return m_link_up; }
virtual int link_speed() override;
size_t payload_mtu() const override { return E1000_RX_BUFFER_SIZE - sizeof(EthernetHeader); }
virtual size_t payload_mtu() const override { return E1000_RX_BUFFER_SIZE - sizeof(EthernetHeader); }
void handle_irq() final override;
virtual void handle_irq() final override;
protected:
E1000(PCI::Device& pci_device)
@@ -45,12 +45,12 @@ namespace Kernel
uint32_t read32(uint16_t reg);
void write32(uint16_t reg, uint32_t value);
BAN::ErrorOr<void> send_bytes(BAN::MACAddress destination, EtherType protocol, BAN::Span<const BAN::ConstByteSpan> payload) override;
virtual BAN::ErrorOr<void> send_bytes(BAN::MACAddress destination, EtherType protocol, BAN::ConstByteSpan) override;
bool can_read_impl() const override { return false; }
bool can_write_impl() const override { return false; }
bool has_error_impl() const override { return false; }
bool has_hungup_impl() const override { return false; }
virtual bool can_read_impl() const override { return false; }
virtual bool can_write_impl() const override { return false; }
virtual bool has_error_impl() const override { return false; }
virtual bool has_hungup_impl() const override { return false; }
private:
BAN::ErrorOr<void> read_mac_address();
@@ -61,7 +61,7 @@ namespace Kernel
void enable_link();
BAN::ErrorOr<void> enable_interrupt();
void receive_thread();
void handle_receive();
protected:
PCI::Device& m_pci_device;
@@ -75,10 +75,6 @@ namespace Kernel
BAN::UniqPtr<DMARegion> m_tx_descriptor_region;
SpinLock m_lock;
bool m_thread_should_die { false };
BAN::Atomic<bool> m_thread_is_dead { true };
ThreadBlocker m_thread_blocker;
BAN::MACAddress m_mac_address {};
bool m_link_up { false };

View File

@@ -12,8 +12,8 @@ namespace Kernel
static BAN::ErrorOr<BAN::RefPtr<E1000E>> create(PCI::Device&);
protected:
void detect_eeprom() override;
uint32_t eeprom_read(uint8_t addr) override;
virtual void detect_eeprom() override;
virtual uint32_t eeprom_read(uint8_t addr) override;
private:
E1000E(PCI::Device& pci_device)

View File

@@ -38,13 +38,14 @@ namespace Kernel
public:
static BAN::ErrorOr<BAN::UniqPtr<IPv4Layer>> create();
~IPv4Layer();
ARPTable& arp_table() { return *m_arp_table; }
BAN::ErrorOr<void> handle_ipv4_packet(NetworkInterface&, BAN::ConstByteSpan);
void add_ipv4_packet(NetworkInterface&, BAN::ConstByteSpan);
virtual void unbind_socket(uint16_t port) override;
virtual BAN::ErrorOr<void> bind_socket_with_target(BAN::RefPtr<NetworkSocket>, const sockaddr* target_address, socklen_t target_address_len) override;
virtual BAN::ErrorOr<void> bind_socket_to_unused(BAN::RefPtr<NetworkSocket>, const sockaddr* send_address, socklen_t send_address_len) override;
virtual BAN::ErrorOr<void> bind_socket_to_address(BAN::RefPtr<NetworkSocket>, const sockaddr* address, socklen_t address_len) override;
virtual BAN::ErrorOr<void> get_socket_address(BAN::RefPtr<NetworkSocket>, sockaddr* address, socklen_t* address_len) override;
@@ -54,15 +55,33 @@ namespace Kernel
virtual size_t header_size() const override { return sizeof(IPv4Header); }
private:
IPv4Layer() = default;
IPv4Layer();
BAN::ErrorOr<in_port_t> find_free_port();
void add_ipv4_header(BAN::ByteSpan packet, BAN::IPv4Address src_ipv4, BAN::IPv4Address dst_ipv4, uint8_t protocol) const;
void packet_handle_task();
BAN::ErrorOr<void> handle_ipv4_packet(NetworkInterface&, BAN::ByteSpan);
private:
BAN::UniqPtr<ARPTable> m_arp_table;
struct PendingIPv4Packet
{
NetworkInterface& interface;
};
RecursiveSpinLock m_bound_socket_lock;
BAN::HashMap<int, BAN::WeakPtr<NetworkSocket>> m_bound_sockets;
private:
RecursiveSpinLock m_bound_socket_lock;
BAN::UniqPtr<ARPTable> m_arp_table;
Thread* m_thread { nullptr };
static constexpr size_t pending_packet_buffer_size = 128 * PAGE_SIZE;
BAN::UniqPtr<VirtualRange> m_pending_packet_buffer;
BAN::CircularQueue<PendingIPv4Packet, 128> m_pending_packets;
ThreadBlocker m_pending_thread_blocker;
SpinLock m_pending_lock;
size_t m_pending_total_size { 0 };
BAN::HashMap<int, BAN::WeakPtr<NetworkSocket>> m_bound_sockets;
friend class BAN::UniqPtr<IPv4Layer>;
};

View File

@@ -9,7 +9,6 @@ namespace Kernel
{
public:
static constexpr size_t buffer_size = BAN::numeric_limits<uint16_t>::max() + 1;
static constexpr size_t buffer_count = 32;
public:
static BAN::ErrorOr<BAN::RefPtr<LoopbackInterface>> create();
@@ -25,9 +24,8 @@ namespace Kernel
LoopbackInterface()
: NetworkInterface(Type::Loopback)
{}
~LoopbackInterface();
BAN::ErrorOr<void> send_bytes(BAN::MACAddress destination, EtherType protocol, BAN::Span<const BAN::ConstByteSpan> payload) override;
BAN::ErrorOr<void> send_bytes(BAN::MACAddress destination, EtherType protocol, BAN::ConstByteSpan) override;
bool can_read_impl() const override { return false; }
bool can_write_impl() const override { return false; }
@@ -35,27 +33,8 @@ namespace Kernel
bool has_hungup_impl() const override { return false; }
private:
void receive_thread();
private:
struct Descriptor
{
uint8_t* addr;
uint32_t size;
uint8_t state;
};
private:
Mutex m_buffer_lock;
SpinLock m_buffer_lock;
BAN::UniqPtr<VirtualRange> m_buffer;
uint32_t m_buffer_tail { 0 };
uint32_t m_buffer_head { 0 };
Descriptor m_descriptors[buffer_count] {};
bool m_thread_should_die { false };
BAN::Atomic<bool> m_thread_is_dead { true };
ThreadBlocker m_thread_blocker;
};
}

View File

@@ -60,11 +60,7 @@ namespace Kernel
virtual dev_t rdev() const override { return m_rdev; }
virtual BAN::StringView name() const override { return m_name; }
BAN::ErrorOr<void> send_bytes(BAN::MACAddress destination, EtherType protocol, BAN::ConstByteSpan payload)
{
return send_bytes(destination, protocol, { &payload, 1 });
}
virtual BAN::ErrorOr<void> send_bytes(BAN::MACAddress destination, EtherType protocol, BAN::Span<const BAN::ConstByteSpan> payload) = 0;
virtual BAN::ErrorOr<void> send_bytes(BAN::MACAddress destination, EtherType protocol, BAN::ConstByteSpan) = 0;
private:
const Type m_type;

View File

@@ -11,7 +11,7 @@ namespace Kernel
BAN::IPv4Address src_ipv4 { 0 };
BAN::IPv4Address dst_ipv4 { 0 };
BAN::NetworkEndian<uint16_t> protocol { 0 };
BAN::NetworkEndian<uint16_t> length { 0 };
BAN::NetworkEndian<uint16_t> extra { 0 };
};
static_assert(sizeof(PseudoHeader) == 12);
@@ -23,7 +23,7 @@ namespace Kernel
virtual ~NetworkLayer() {}
virtual void unbind_socket(uint16_t port) = 0;
virtual BAN::ErrorOr<void> bind_socket_with_target(BAN::RefPtr<NetworkSocket>, const sockaddr* target_address, socklen_t target_address_len) = 0;
virtual BAN::ErrorOr<void> bind_socket_to_unused(BAN::RefPtr<NetworkSocket>, const sockaddr* send_address, socklen_t send_address_len) = 0;
virtual BAN::ErrorOr<void> bind_socket_to_address(BAN::RefPtr<NetworkSocket>, const sockaddr* address, socklen_t address_len) = 0;
virtual BAN::ErrorOr<void> get_socket_address(BAN::RefPtr<NetworkSocket>, sockaddr* address, socklen_t* address_len) = 0;
@@ -36,7 +36,6 @@ namespace Kernel
NetworkLayer() = default;
};
uint16_t calculate_internet_checksum(BAN::ConstByteSpan buffer);
uint16_t calculate_internet_checksum(BAN::Span<const BAN::ConstByteSpan> buffers);
uint16_t calculate_internet_checksum(BAN::ConstByteSpan packet, const PseudoHeader& pseudo_header);
}

View File

@@ -5,8 +5,6 @@
#include <kernel/Networking/NetworkInterface.h>
#include <kernel/Networking/NetworkLayer.h>
#include <netinet/in.h>
namespace Kernel
{
@@ -26,30 +24,18 @@ namespace Kernel
static constexpr uint16_t PORT_NONE = 0;
public:
void bind_address_and_port(const sockaddr*, socklen_t);
void bind_interface_and_port(NetworkInterface*, uint16_t port);
~NetworkSocket();
BAN::ErrorOr<BAN::RefPtr<NetworkInterface>> interface(const sockaddr* target, socklen_t target_len);
NetworkInterface& interface() { ASSERT(m_interface); return *m_interface; }
virtual size_t protocol_header_size() const = 0;
virtual void get_protocol_header(BAN::ByteSpan header, BAN::ConstByteSpan payload, uint16_t dst_port, PseudoHeader) = 0;
virtual void add_protocol_header(BAN::ByteSpan packet, uint16_t dst_port, PseudoHeader) = 0;
virtual NetworkProtocol protocol() const = 0;
virtual void receive_packet(BAN::ConstByteSpan, const sockaddr* sender, socklen_t sender_len) = 0;
bool is_bound() const { return m_address_len >= static_cast<socklen_t>(sizeof(sa_family_t)) && m_address.ss_family != AF_UNSPEC; }
in_port_t bound_port() const
{
ASSERT(is_bound());
ASSERT(m_address.ss_family == AF_INET && m_address_len >= static_cast<socklen_t>(sizeof(sockaddr_in)));
return BAN::network_endian_to_host(reinterpret_cast<const sockaddr_in*>(&m_address)->sin_port);
}
const sockaddr* address() const { return reinterpret_cast<const sockaddr*>(&m_address); }
socklen_t address_len() const { return m_address_len; }
private:
bool can_interface_send_to(const NetworkInterface&, const sockaddr*, socklen_t) const;
bool is_bound() const { return m_interface != nullptr; }
protected:
NetworkSocket(NetworkLayer&, const Socket::Info&);
@@ -59,9 +45,9 @@ namespace Kernel
virtual BAN::ErrorOr<void> getpeername_impl(sockaddr*, socklen_t*) override = 0;
protected:
NetworkLayer& m_network_layer;
sockaddr_storage m_address { .ss_family = AF_UNSPEC, .ss_storage = {} };
socklen_t m_address_len { 0 };
NetworkLayer& m_network_layer;
NetworkInterface* m_interface = nullptr;
uint16_t m_port { PORT_NONE };
};
}

View File

@@ -29,11 +29,9 @@ namespace Kernel
: NetworkInterface(Type::Ethernet)
, m_pci_device(pci_device)
{ }
~RTL8169();
BAN::ErrorOr<void> initialize();
virtual BAN::ErrorOr<void> send_bytes(BAN::MACAddress destination, EtherType protocol, BAN::Span<const BAN::ConstByteSpan>) override;
virtual BAN::ErrorOr<void> send_bytes(BAN::MACAddress destination, EtherType protocol, BAN::ConstByteSpan) override;
virtual bool can_read_impl() const override { return false; }
virtual bool can_write_impl() const override { return false; }
@@ -49,7 +47,7 @@ namespace Kernel
void enable_link();
BAN::ErrorOr<void> enable_interrupt();
void receive_thread();
void handle_receive();
protected:
PCI::Device& m_pci_device;
@@ -65,9 +63,6 @@ namespace Kernel
BAN::UniqPtr<DMARegion> m_tx_descriptor_region;
SpinLock m_lock;
bool m_thread_should_die { false };
BAN::Atomic<bool> m_thread_is_dead { true };
ThreadBlocker m_thread_blocker;
uint32_t m_rx_current { 0 };

Some files were not shown because too many files have changed in this diff Show More