BAN/Kernel: Rework assertion/panic system

BAN/Assert.h does not need any includes meaning it can be included
anywhere without problems.
This commit is contained in:
Bananymous 2024-03-04 11:41:54 +02:00
parent 8141b9977d
commit 9c36d7c338
20 changed files with 96 additions and 75 deletions

22
BAN/BAN/Assert.cpp Normal file
View File

@ -0,0 +1,22 @@
#include <BAN/Assert.h>
#if __is_kernel
#include <kernel/Panic.h>
[[noreturn]] void __ban_assertion_failed(const char* location, const char* msg)
{
Kernel::panic_impl(location, msg);
}
#else
#include <BAN/Debug.h>
[[noreturn]] void __ban_assertion_failed(const char* location, const char* msg)
{
derrorln("{}: {}", location, msg);
__builtin_trap();
}
#endif

View File

@ -3,6 +3,7 @@ cmake_minimum_required(VERSION 3.26)
project(BAN CXX) project(BAN CXX)
set(BAN_SOURCES set(BAN_SOURCES
BAN/Assert.cpp
BAN/New.cpp BAN/New.cpp
BAN/String.cpp BAN/String.cpp
BAN/StringView.cpp BAN/StringView.cpp

View File

@ -1,33 +1,13 @@
#pragma once #pragma once
#include <BAN/Traits.h> #define __ban_assert_stringify_helper(s) #s
#define __ban_assert_stringify(s) __ban_assert_stringify_helper(s)
#if defined(__is_kernel) #define ASSERT(cond) \
#include <kernel/Panic.h> (__builtin_expect(!(cond), 0) \
? __ban_assertion_failed(__FILE__ ":" __ban_assert_stringify(__LINE__), "ASSERT(" #cond ") failed") \
: (void)0)
#define ASSERT(cond) \ #define ASSERT_NOT_REACHED() ASSERT(false)
do { \
if (!(cond)) \
Kernel::panic("ASSERT(" #cond ") failed"); \
} while (false)
#define __ASSERT_BIN_OP(lhs, rhs, name, op) \ [[noreturn]] void __ban_assertion_failed(const char* location, const char* msg);
do { \
auto&& _lhs = (lhs); \
auto&& _rhs = (rhs); \
if (!(_lhs op _rhs)) \
Kernel::panic(name "(" #lhs ", " #rhs ") ({} " #op " {}) failed", _lhs, _rhs); \
} while (false)
#define ASSERT_LT(lhs, rhs) __ASSERT_BIN_OP(lhs, rhs, "ASSERT_LT", <)
#define ASSERT_LTE(lhs, rhs) __ASSERT_BIN_OP(lhs, rhs, "ASSERT_LTE", <=)
#define ASSERT_GT(lhs, rhs) __ASSERT_BIN_OP(lhs, rhs, "ASSERT_GT", >)
#define ASSERT_GTE(lhs, rhs) __ASSERT_BIN_OP(lhs, rhs, "ASSERT_GTE", >=)
#define ASSERT_EQ(lhs, rhs) __ASSERT_BIN_OP(lhs, rhs, "ASSERT_EQ", ==)
#define ASSERT_NEQ(lhs, rhs) __ASSERT_BIN_OP(lhs, rhs, "ASSERT_NEQ", !=)
#define ASSERT_NOT_REACHED() Kernel::panic("ASSERT_NOT_REACHED() failed")
#else
#include <assert.h>
#define ASSERT(cond) assert((cond) && "ASSERT("#cond") failed")
#define ASSERT_NOT_REACHED() do { assert(false && "ASSERT_NOT_REACHED() failed"); __builtin_unreachable(); } while (false)
#endif

View File

@ -1,6 +1,7 @@
#pragma once #pragma once
#include <BAN/Assert.h> #include <BAN/Assert.h>
#include <BAN/Move.h>
#include <BAN/PlacementNew.h> #include <BAN/PlacementNew.h>
#include <stdint.h> #include <stdint.h>

View File

@ -1,8 +1,10 @@
#pragma once #pragma once
#if __is_kernel #if __is_kernel
#error "This is userspace only file"
#endif #include <kernel/Debug.h>
#else
#include <BAN/Formatter.h> #include <BAN/Formatter.h>
#include <stdio.h> #include <stdio.h>
@ -29,3 +31,5 @@
dprintln(__VA_ARGS__); \ dprintln(__VA_ARGS__); \
BAN::Formatter::print(__debug_putchar, "\e[m"); \ BAN::Formatter::print(__debug_putchar, "\e[m"); \
} while(false) } while(false)
#endif

View File

@ -1,4 +1,6 @@
#pragma once #pragma once
#include <stddef.h>
inline void* operator new(size_t, void* addr) { return addr; } inline void* operator new(size_t, void* addr) { return addr; }
inline void* operator new[](size_t, void* addr) { return addr; } inline void* operator new[](size_t, void* addr) { return addr; }

View File

@ -136,6 +136,7 @@ set(LAI_SOURCES
) )
set(BAN_SOURCES set(BAN_SOURCES
../BAN/BAN/Assert.cpp
../BAN/BAN/New.cpp ../BAN/BAN/New.cpp
../BAN/BAN/String.cpp ../BAN/BAN/String.cpp
../BAN/BAN/StringView.cpp ../BAN/BAN/StringView.cpp

View File

@ -163,6 +163,13 @@ namespace Kernel::IDT
extern "C" void cpp_isr_handler(uint64_t isr, uint64_t error, InterruptStack& interrupt_stack, const Registers* regs) extern "C" void cpp_isr_handler(uint64_t isr, uint64_t error, InterruptStack& interrupt_stack, const Registers* regs)
{ {
if (g_paniced)
{
// FIXME: tell other processors kernel panic has occured
dprintln("Processor {} halted", Processor::current_id());
asm volatile("cli; 1: hlt; jmp 1b");
}
#if __enable_sse #if __enable_sse
bool from_userspace = (interrupt_stack.cs & 0b11) == 0b11; bool from_userspace = (interrupt_stack.cs & 0b11) == 0b11;
if (from_userspace) if (from_userspace)

View File

@ -208,7 +208,7 @@ namespace Kernel
void PageTable::map_fast_page(paddr_t paddr) void PageTable::map_fast_page(paddr_t paddr)
{ {
ASSERT(s_kernel); ASSERT(s_kernel);
ASSERT_NEQ(paddr, 0); ASSERT(paddr);
SpinLockGuard _(s_fast_page_lock); SpinLockGuard _(s_fast_page_lock);
@ -322,7 +322,7 @@ namespace Kernel
ASSERT(vaddr); ASSERT(vaddr);
ASSERT(vaddr != fast_page()); ASSERT(vaddr != fast_page());
if (vaddr >= KERNEL_OFFSET) if (vaddr >= KERNEL_OFFSET)
ASSERT_GTE(vaddr, (vaddr_t)g_kernel_start); ASSERT(vaddr >= (vaddr_t)g_kernel_start);
if ((vaddr >= KERNEL_OFFSET) != (this == s_kernel)) if ((vaddr >= KERNEL_OFFSET) != (this == s_kernel))
Kernel::panic("unmapping {8H}, kernel: {}", vaddr, this == s_kernel); Kernel::panic("unmapping {8H}, kernel: {}", vaddr, this == s_kernel);
@ -368,7 +368,7 @@ namespace Kernel
ASSERT(vaddr); ASSERT(vaddr);
ASSERT(vaddr != fast_page()); ASSERT(vaddr != fast_page());
if (vaddr >= KERNEL_OFFSET && s_current) if (vaddr >= KERNEL_OFFSET && s_current)
ASSERT_GTE(vaddr, (vaddr_t)g_kernel_start); ASSERT(vaddr >= (vaddr_t)g_kernel_start);
if ((vaddr >= KERNEL_OFFSET) != (this == s_kernel)) if ((vaddr >= KERNEL_OFFSET) != (this == s_kernel))
Kernel::panic("mapping {8H} to {8H}, kernel: {}", paddr, vaddr, this == s_kernel); Kernel::panic("mapping {8H} to {8H}, kernel: {}", paddr, vaddr, this == s_kernel);

View File

@ -21,12 +21,12 @@ namespace Kernel
{ {
auto tid = Scheduler::current_tid(); auto tid = Scheduler::current_tid();
if (tid == m_locker) if (tid == m_locker)
ASSERT_GT(m_lock_depth, 0); ASSERT(m_lock_depth > 0);
else else
{ {
while (!m_locker.compare_exchange(-1, tid)) while (!m_locker.compare_exchange(-1, tid))
Scheduler::get().reschedule(); Scheduler::get().reschedule();
ASSERT_EQ(m_lock_depth, 0); ASSERT(m_lock_depth == 0);
} }
m_lock_depth++; m_lock_depth++;
} }
@ -35,20 +35,20 @@ namespace Kernel
{ {
auto tid = Scheduler::current_tid(); auto tid = Scheduler::current_tid();
if (tid == m_locker) if (tid == m_locker)
ASSERT_GT(m_lock_depth, 0); ASSERT(m_lock_depth > 0);
else else
{ {
if (!m_locker.compare_exchange(-1, tid)) if (!m_locker.compare_exchange(-1, tid))
return false; return false;
ASSERT_EQ(m_lock_depth, 0); ASSERT(m_lock_depth == 0);
} }
m_lock_depth++; m_lock_depth++;
} }
void unlock() void unlock()
{ {
ASSERT_EQ(m_locker.load(), Scheduler::current_tid()); ASSERT(m_locker == Scheduler::current_tid());
ASSERT_GT(m_lock_depth, 0); ASSERT(m_lock_depth > 0);
if (--m_lock_depth == 0) if (--m_lock_depth == 0)
m_locker = -1; m_locker = -1;
} }
@ -74,7 +74,7 @@ namespace Kernel
{ {
auto tid = Scheduler::current_tid(); auto tid = Scheduler::current_tid();
if (tid == m_locker) if (tid == m_locker)
ASSERT_GT(m_lock_depth, 0); ASSERT(m_lock_depth > 0);
else else
{ {
bool has_priority = tid ? !Thread::current().is_userspace() : true; bool has_priority = tid ? !Thread::current().is_userspace() : true;
@ -82,7 +82,7 @@ namespace Kernel
m_queue_length++; m_queue_length++;
while (!(has_priority || m_queue_length == 0) || !m_locker.compare_exchange(-1, tid)) while (!(has_priority || m_queue_length == 0) || !m_locker.compare_exchange(-1, tid))
Scheduler::get().reschedule(); Scheduler::get().reschedule();
ASSERT_EQ(m_lock_depth, 0); ASSERT(m_lock_depth == 0);
} }
m_lock_depth++; m_lock_depth++;
} }
@ -91,7 +91,7 @@ namespace Kernel
{ {
auto tid = Scheduler::current_tid(); auto tid = Scheduler::current_tid();
if (tid == m_locker) if (tid == m_locker)
ASSERT_GT(m_lock_depth, 0); ASSERT(m_lock_depth > 0);
else else
{ {
bool has_priority = tid ? !Thread::current().is_userspace() : true; bool has_priority = tid ? !Thread::current().is_userspace() : true;
@ -99,7 +99,7 @@ namespace Kernel
return false; return false;
if (has_priority) if (has_priority)
m_queue_length++; m_queue_length++;
ASSERT_EQ(m_lock_depth, 0); ASSERT(m_lock_depth == 0);
} }
m_lock_depth++; m_lock_depth++;
} }
@ -107,8 +107,8 @@ namespace Kernel
void unlock() void unlock()
{ {
auto tid = Scheduler::current_tid(); auto tid = Scheduler::current_tid();
ASSERT_EQ(m_locker.load(), tid); ASSERT(m_locker == tid);
ASSERT_GT(m_lock_depth, 0); ASSERT(m_lock_depth > 0);
if (--m_lock_depth == 0) if (--m_lock_depth == 0)
{ {
bool has_priority = tid ? !Thread::current().is_userspace() : true; bool has_priority = tid ? !Thread::current().is_userspace() : true;

View File

@ -2,28 +2,29 @@
#include <kernel/Debug.h> #include <kernel/Debug.h>
#define panic(...) detail::panic_impl(__FILE__, __LINE__, __VA_ARGS__) #define __panic_stringify_helper(s) #s
#define __panic_stringify(s) __panic_stringify_helper(s)
namespace Kernel::detail #define panic(...) panic_impl(__FILE__ ":" __panic_stringify(__LINE__), __VA_ARGS__)
namespace Kernel
{ {
extern bool g_paniced; extern volatile bool g_paniced;
template<typename... Args> template<typename... Args>
__attribute__((__noreturn__)) __attribute__((__noreturn__))
static void panic_impl(const char* file, int line, const char* message, Args&&... args) static void panic_impl(const char* location, const char* message, Args&&... args)
{ {
asm volatile("cli"); asm volatile("cli");
derrorln("Kernel panic at {}:{}", file, line); derrorln("Kernel panic at {}", location);
derrorln(message, BAN::forward<Args>(args)...); derrorln(message, BAN::forward<Args>(args)...);
if (!g_paniced) if (!g_paniced)
{ {
g_paniced = true; g_paniced = true;
Debug::dump_stack_trace(); Debug::dump_stack_trace();
} }
for (;;) asm volatile("ud2");
asm volatile("hlt");
__builtin_unreachable();
} }
} }

View File

@ -128,9 +128,9 @@ namespace Kernel
auto inode_location = find_inode(ino); auto inode_location = find_inode(ino);
PageTable::with_fast_page(inode_location.paddr, [&] { PageTable::with_fast_page(inode_location.paddr, [&] {
auto& inode_info = PageTable::fast_page_as_sized<TmpInodeInfo>(inode_location.index); auto& inode_info = PageTable::fast_page_as_sized<TmpInodeInfo>(inode_location.index);
ASSERT_EQ(inode_info.nlink, 0); ASSERT(inode_info.nlink == 0);
for (auto paddr : inode_info.block) for (auto paddr : inode_info.block)
ASSERT_EQ(paddr, 0); ASSERT(paddr == 0);
inode_info = {}; inode_info = {};
}); });
ASSERT(!m_inode_cache.contains(ino)); ASSERT(!m_inode_cache.contains(ino));
@ -166,8 +166,8 @@ namespace Kernel
{ {
LockGuard _(m_mutex); LockGuard _(m_mutex);
ASSERT_GTE(ino, first_inode); ASSERT(ino >= first_inode);
ASSERT_LT(ino, max_inodes); ASSERT(ino < max_inodes);
constexpr size_t inodes_per_page = PAGE_SIZE / sizeof(TmpInodeInfo); constexpr size_t inodes_per_page = PAGE_SIZE / sizeof(TmpInodeInfo);
@ -220,7 +220,7 @@ namespace Kernel
{ {
LockGuard _(m_mutex); LockGuard _(m_mutex);
ASSERT_GT(index, 0); ASSERT(index > 0);
return find_indirect(m_data_pages, index - first_data_page, 3); return find_indirect(m_data_pages, index - first_data_page, 3);
} }

View File

@ -10,7 +10,7 @@ namespace Kernel
InterruptState SpinLock::lock() InterruptState SpinLock::lock()
{ {
auto id = Processor::current_id(); auto id = Processor::current_id();
ASSERT_NEQ(m_locker.load(), id); ASSERT(m_locker != id);
auto state = Processor::get_interrupt_state(); auto state = Processor::get_interrupt_state();
Processor::set_interrupt_state(InterruptState::Disabled); Processor::set_interrupt_state(InterruptState::Disabled);
@ -23,7 +23,7 @@ namespace Kernel
void SpinLock::unlock(InterruptState state) void SpinLock::unlock(InterruptState state)
{ {
ASSERT_EQ(m_locker.load(), Processor::current_id()); ASSERT(m_locker == Processor::current_id());
m_locker.store(PROCESSOR_NONE, BAN::MemoryOrder::memory_order_release); m_locker.store(PROCESSOR_NONE, BAN::MemoryOrder::memory_order_release);
Processor::set_interrupt_state(state); Processor::set_interrupt_state(state);
} }
@ -36,12 +36,12 @@ namespace Kernel
Processor::set_interrupt_state(InterruptState::Disabled); Processor::set_interrupt_state(InterruptState::Disabled);
if (id == m_locker) if (id == m_locker)
ASSERT_GT(m_lock_depth, 0); ASSERT(m_lock_depth > 0);
else else
{ {
while (!m_locker.compare_exchange(PROCESSOR_NONE, id, BAN::MemoryOrder::memory_order_acquire)) while (!m_locker.compare_exchange(PROCESSOR_NONE, id, BAN::MemoryOrder::memory_order_acquire))
__builtin_ia32_pause(); __builtin_ia32_pause();
ASSERT_EQ(m_lock_depth, 0); ASSERT(m_lock_depth == 0);
} }
m_lock_depth++; m_lock_depth++;
@ -51,8 +51,8 @@ namespace Kernel
void RecursiveSpinLock::unlock(InterruptState state) void RecursiveSpinLock::unlock(InterruptState state)
{ {
ASSERT_EQ(m_locker.load(), Processor::current_id()); ASSERT(m_locker == Processor::current_id());
ASSERT_GT(m_lock_depth, 0); ASSERT(m_lock_depth > 0);
if (--m_lock_depth == 0) if (--m_lock_depth == 0)
m_locker.store(PROCESSOR_NONE, BAN::MemoryOrder::memory_order_release); m_locker.store(PROCESSOR_NONE, BAN::MemoryOrder::memory_order_release);
Processor::set_interrupt_state(state); Processor::set_interrupt_state(state);

View File

@ -129,7 +129,7 @@ namespace Kernel
size_t file_offset = m_offset + (vaddr - m_vaddr); size_t file_offset = m_offset + (vaddr - m_vaddr);
size_t bytes = BAN::Math::min<size_t>(m_size - file_offset, PAGE_SIZE); size_t bytes = BAN::Math::min<size_t>(m_size - file_offset, PAGE_SIZE);
ASSERT_EQ(&PageTable::current(), &m_page_table); ASSERT(&PageTable::current() == &m_page_table);
auto read_ret = m_inode->read(file_offset, BAN::ByteSpan((uint8_t*)vaddr, bytes)); auto read_ret = m_inode->read(file_offset, BAN::ByteSpan((uint8_t*)vaddr, bytes));
if (read_ret.is_error()) if (read_ret.is_error())

View File

@ -158,9 +158,9 @@ namespace Kernel
return; return;
// Verify no overflow // Verify no overflow
ASSERT_LTE(bytes, size()); ASSERT(bytes <= size());
ASSERT_LTE(offset, size()); ASSERT(offset <= size());
ASSERT_LTE(offset, size() - bytes); ASSERT(offset <= size() - bytes);
if (&PageTable::current() == &m_page_table || &PageTable::kernel() == &m_page_table) if (&PageTable::current() == &m_page_table || &PageTable::kernel() == &m_page_table)
{ {

View File

@ -259,7 +259,7 @@ namespace Kernel
BAN::ErrorOr<void> E1000::send_bytes(BAN::MACAddress destination, EtherType protocol, BAN::ConstByteSpan buffer) BAN::ErrorOr<void> E1000::send_bytes(BAN::MACAddress destination, EtherType protocol, BAN::ConstByteSpan buffer)
{ {
ASSERT_LTE(buffer.size() + sizeof(EthernetHeader), E1000_TX_BUFFER_SIZE); ASSERT(buffer.size() + sizeof(EthernetHeader) <= E1000_TX_BUFFER_SIZE);
SpinLockGuard _(m_lock); SpinLockGuard _(m_lock);
@ -299,7 +299,7 @@ namespace Kernel
auto& descriptor = reinterpret_cast<volatile e1000_rx_desc*>(m_rx_descriptor_region->vaddr())[rx_current]; auto& descriptor = reinterpret_cast<volatile e1000_rx_desc*>(m_rx_descriptor_region->vaddr())[rx_current];
if (!(descriptor.status & 1)) if (!(descriptor.status & 1))
break; break;
ASSERT_LTE((uint16_t)descriptor.length, E1000_RX_BUFFER_SIZE); ASSERT(descriptor.length <= E1000_RX_BUFFER_SIZE);
NetworkManager::get().on_receive(*this, BAN::ConstByteSpan { NetworkManager::get().on_receive(*this, BAN::ConstByteSpan {
reinterpret_cast<const uint8_t*>(m_rx_buffer_region->vaddr() + rx_current * E1000_RX_BUFFER_SIZE), reinterpret_cast<const uint8_t*>(m_rx_buffer_region->vaddr() + rx_current * E1000_RX_BUFFER_SIZE),

View File

@ -455,7 +455,7 @@ namespace Kernel
if (m_send_window.data_size > 0 && m_send_window.current_ack - m_send_window.has_ghost_byte > m_send_window.start_seq) if (m_send_window.data_size > 0 && m_send_window.current_ack - m_send_window.has_ghost_byte > m_send_window.start_seq)
{ {
uint32_t acknowledged_bytes = m_send_window.current_ack - m_send_window.start_seq - m_send_window.has_ghost_byte; uint32_t acknowledged_bytes = m_send_window.current_ack - m_send_window.start_seq - m_send_window.has_ghost_byte;
ASSERT_LTE(acknowledged_bytes, m_send_window.data_size); ASSERT(acknowledged_bytes <= m_send_window.data_size);
m_send_window.data_size -= acknowledged_bytes; m_send_window.data_size -= acknowledged_bytes;
m_send_window.start_seq += acknowledged_bytes; m_send_window.start_seq += acknowledged_bytes;

View File

@ -1,6 +1,6 @@
#include <kernel/Panic.h> #include <kernel/Panic.h>
namespace Kernel::detail namespace Kernel
{ {
bool g_paniced = false; volatile bool g_paniced = false;
} }

View File

@ -170,7 +170,7 @@ namespace Kernel
// Signal mask is inherited // Signal mask is inherited
// Setup stack for returning // Setup stack for returning
ASSERT_EQ(m_rsp % PAGE_SIZE, 0u); ASSERT(m_rsp % PAGE_SIZE == 0);
PageTable::with_fast_page(process().page_table().physical_address_of(m_rsp - PAGE_SIZE), [&] { PageTable::with_fast_page(process().page_table().physical_address_of(m_rsp - PAGE_SIZE), [&] {
uintptr_t rsp = PageTable::fast_page() + PAGE_SIZE; uintptr_t rsp = PageTable::fast_page() + PAGE_SIZE;
write_to_stack(rsp, nullptr); // alignment write_to_stack(rsp, nullptr); // alignment
@ -199,7 +199,7 @@ namespace Kernel
m_signal_pending_mask = 0; m_signal_pending_mask = 0;
m_signal_block_mask = ~0ull; m_signal_block_mask = ~0ull;
ASSERT_EQ(m_rsp % PAGE_SIZE, 0u); ASSERT(m_rsp % PAGE_SIZE == 0);
PageTable::with_fast_page(process().page_table().physical_address_of(m_rsp - PAGE_SIZE), [&] { PageTable::with_fast_page(process().page_table().physical_address_of(m_rsp - PAGE_SIZE), [&] {
uintptr_t rsp = PageTable::fast_page() + PAGE_SIZE; uintptr_t rsp = PageTable::fast_page() + PAGE_SIZE;
write_to_stack(rsp, nullptr); // alignment write_to_stack(rsp, nullptr); // alignment

View File

@ -30,6 +30,8 @@ set(LIBC_SOURCES
unistd.cpp unistd.cpp
math.S math.S
icxxabi.cpp icxxabi.cpp
../BAN/BAN/Assert.cpp
) )
add_custom_target(libc-headers add_custom_target(libc-headers