Compare commits

..

No commits in common. "99e30a4d7dc75e36d369ccbf1f821ec4f55770af" and "fe17958b9f5c8b0f8c5534e94c3ff1ddf6e4c725" have entirely different histories.

37 changed files with 444 additions and 697 deletions

View File

@ -86,7 +86,7 @@ namespace LibELF
return BAN::Error::from_errno(ENOEXEC); return BAN::Error::from_errno(ENOEXEC);
} }
#if ARCH(i686) #if ARCH(i386)
if (m_file_header.e_ident[EI_CLASS] != ELFCLASS32) if (m_file_header.e_ident[EI_CLASS] != ELFCLASS32)
#elif ARCH(x86_64) #elif ARCH(x86_64)
if (m_file_header.e_ident[EI_CLASS] != ELFCLASS64) if (m_file_header.e_ident[EI_CLASS] != ELFCLASS64)

View File

@ -42,7 +42,7 @@ namespace LibELF
const Elf32SectionHeader& section_header32(size_t) const; const Elf32SectionHeader& section_header32(size_t) const;
const char* lookup_section_name32(uint32_t) const; const char* lookup_section_name32(uint32_t) const;
const char* lookup_string32(size_t, uint32_t) const; const char* lookup_string32(size_t, uint32_t) const;
#if ARCH(i686) #if ARCH(i386)
const Elf32FileHeader& file_header_native() const { return file_header32(); } const Elf32FileHeader& file_header_native() const { return file_header32(); }
const Elf32ProgramHeader& program_header_native(size_t index) const { return program_header32(index); } const Elf32ProgramHeader& program_header_native(size_t index) const { return program_header32(index); }
const Elf32SectionHeader& section_header_native(size_t index) const { return section_header32(index); } const Elf32SectionHeader& section_header_native(size_t index) const { return section_header32(index); }

View File

@ -155,7 +155,7 @@ namespace LibELF
Elf64Xword p_align; Elf64Xword p_align;
}; };
#if ARCH(i686) #if ARCH(i386)
using ElfNativeAddr = Elf32Addr; using ElfNativeAddr = Elf32Addr;
using ElfNativeOff = Elf32Off; using ElfNativeOff = Elf32Off;
using ElfNativeHalf = Elf32Half; using ElfNativeHalf = Elf32Half;

View File

@ -4,7 +4,7 @@ project(kernel CXX C ASM)
if("${BANAN_ARCH}" STREQUAL "x86_64") if("${BANAN_ARCH}" STREQUAL "x86_64")
set(ELF_FORMAT elf64-x86-64) set(ELF_FORMAT elf64-x86-64)
elseif("${BANAN_ARCH}" STREQUAL "i686") elseif("${BANAN_ARCH}" STREQUAL "i386")
set(ELF_FORMAT elf32-i386) set(ELF_FORMAT elf32-i386)
endif() endif()
@ -120,16 +120,13 @@ if("${BANAN_ARCH}" STREQUAL "x86_64")
${LAI_SOURCES} ${LAI_SOURCES}
kernel/lai_host.cpp kernel/lai_host.cpp
) )
elseif("${BANAN_ARCH}" STREQUAL "i686") elseif("${BANAN_ARCH}" STREQUAL "i386")
set(KERNEL_SOURCES set(KERNEL_SOURCES
${KERNEL_SOURCES} ${KERNEL_SOURCES}
arch/i686/boot.S arch/i386/boot.S
arch/i686/GDT.cpp arch/i386/SpinLock.S
arch/i686/IDT.cpp arch/i386/Syscall.S
arch/i686/PageTable.cpp arch/i386/Thread.S
arch/i686/Signal.S
arch/i686/Syscall.S
arch/i686/Thread.S
) )
else() else()
message(FATAL_ERROR "unsupported architecure ${BANAN_ARCH}") message(FATAL_ERROR "unsupported architecure ${BANAN_ARCH}")
@ -182,8 +179,8 @@ if("${BANAN_ARCH}" STREQUAL "x86_64")
target_compile_options(kernel PUBLIC -mcmodel=kernel -mno-red-zone) target_compile_options(kernel PUBLIC -mcmodel=kernel -mno-red-zone)
target_link_options(kernel PUBLIC LINKER:-z,max-page-size=4096) target_link_options(kernel PUBLIC LINKER:-z,max-page-size=4096)
target_link_options(kernel PUBLIC LINKER:-T,${CMAKE_CURRENT_SOURCE_DIR}/arch/x86_64/linker.ld) target_link_options(kernel PUBLIC LINKER:-T,${CMAKE_CURRENT_SOURCE_DIR}/arch/x86_64/linker.ld)
elseif("${BANAN_ARCH}" STREQUAL "i686") elseif("${BANAN_ARCH}" STREQUAL "i386")
target_link_options(kernel PUBLIC LINKER:-T,${CMAKE_CURRENT_SOURCE_DIR}/arch/i686/linker.ld) target_link_options(kernel PUBLIC LINKER:-T,${CMAKE_CURRENT_SOURCE_DIR}/arch/i386/linker.ld)
endif() endif()
target_link_options(kernel PUBLIC -ffreestanding -nostdlib) target_link_options(kernel PUBLIC -ffreestanding -nostdlib)

View File

@ -0,0 +1,19 @@
.global spinlock_lock_asm
spinlock_lock_asm:
movl 4(%esp), %eax
lock; btsl $0, (%eax)
jnc .done
.retry:
pause
testl $1, (%eax)
jne .retry
lock; btsl $0, (%eax)
jc .retry
.done:
ret
.global spinlock_unlock_asm
spinlock_unlock_asm:
movl 4(%esp), %eax
movl $0, (%eax)
ret

View File

@ -1,6 +1,5 @@
.global sys_fork_trampoline .global sys_fork_trampoline
sys_fork_trampoline: sys_fork_trampoline:
ud2
subl $4, %esp subl $4, %esp
pushl %ebx pushl %ebx
pushl %ebp pushl %ebp

47
kernel/arch/i386/Thread.S Normal file
View File

@ -0,0 +1,47 @@
# uint32_t read_ip()
.global read_ip
read_ip:
popl %eax
jmp *%eax
exit_thread_trampoline:
addl $4, %esp
pushl (%esp)
ret
# void start_thread(uint32_t sp, uint32_t ip)
.global start_thread
start_thread:
movl 8(%esp), %ecx
movl 4(%esp), %esp
movl $0, %ebp
pushl $exit_thread_trampoline
sti
jmp *%ecx
# void continue_thread(uint32_t sp, uint32_t ip)
.global continue_thread
continue_thread:
movl 8(%esp), %ecx
movl 4(%esp), %esp
movl $0, %eax
jmp *%ecx
# void thread_jump_userspace(uint32_t sp, uint32_t ip)
.global thread_jump_userspace
thread_jump_userspace:
movl $0x23, %eax
movw %ax, %ds
movw %ax, %es
movw %ax, %fs
movw %ax, %gs
movl 8(%esp), %ecx
movl 4(%esp), %esp
pushl $0x23
pushl %esp
pushfl
pushl $0x1B
pushl %ecx
iret

182
kernel/arch/i386/boot.S Normal file
View File

@ -0,0 +1,182 @@
# Declare constants for the multiboot header
.set ALIGN, 1<<0 # align loaded modules on page boundaries
.set MEMINFO, 1<<1 # provide memory map
.set VIDEOINFO, 1<<2 # provide video info
.set MB_FLAGS, ALIGN | MEMINFO | VIDEOINFO # this is the Multiboot 'flag' field
.set MB_MAGIC, 0x1BADB002 # 'magic number' lets bootloader find the header
.set MB_CHECKSUM, -(MB_MAGIC + MB_FLAGS) #checksum of above, to prove we are multiboot
# Multiboot header
.section .multiboot, "aw"
.align 4
.long MB_MAGIC
.long MB_FLAGS
.long MB_CHECKSUM
.skip 20
.long 0
.long 800
.long 600
.long 32
.section .bss, "aw", @nobits
# Create stack
.global g_boot_stack_bottom
g_boot_stack_bottom:
.skip 16384
.global g_boot_stack_top
g_boot_stack_top:
# 0 MiB -> 1 MiB: bootloader stuff
# 1 MiB -> : kernel
.align 32
boot_page_directory_pointer_table:
.skip 4 * 8
.align 4096
boot_page_directory1:
.skip 512 * 8
.global g_kernel_cmdline
g_kernel_cmdline:
.skip 4096
.global g_multiboot_info
g_multiboot_info:
.skip 4
.global g_multiboot_magic
g_multiboot_magic:
.skip 4
.section .text
boot_gdt:
.quad 0x0000000000000000 # null
.quad 0x00CF9A000000FFFF # kernel code
.quad 0x00CF92000000FFFF # kernel data
boot_gdtr:
.short . - boot_gdt - 1
.long boot_gdt
has_cpuid:
pushfl
pushfl
xorl $0x00200000, (%esp)
popfl
pushfl
popl %eax
xorl (%esp), %eax
popfl
testl $0x00200000, %eax
ret
has_pae:
movl $0, %eax
cpuid
testl $(1 << 6), %edx
ret
has_sse:
movl $1, %eax
cpuid
testl $(1 << 25), %edx
ret
check_requirements:
call has_cpuid
jz .exit
call has_pae
jz .exit
call has_sse
jz .exit
ret
.exit:
jmp system_halt
copy_kernel_commandline:
pushl %esi
pushl %edi
movl g_multiboot_info, %esi
addl $16, %esi
movl (%esi), %esi
movl $1024, %ecx
movl $g_kernel_cmdline, %edi
rep movsl
popl %edi
popl %esi
ret
enable_sse:
movl %cr0, %eax
andw $0xFFFB, %ax
orw $0x0002, %ax
movl %eax, %cr0
movl %cr4, %eax
orw $0x0600, %ax
movl %eax, %cr4
ret
initialize_paging:
# identity map first 6 MiB
movl $(0x00000000 + 0x83), boot_page_directory1 + 0
movl $(0x00200000 + 0x83), boot_page_directory1 + 8
movl $(0x00400000 + 0x83), boot_page_directory1 + 16
movl $(boot_page_directory1 + 0x01), boot_page_directory_pointer_table
# enable PAE
movl %cr4, %ecx
orl $0x20, %ecx
movl %ecx, %cr4
# set address of paging structures
movl $boot_page_directory_pointer_table, %ecx
movl %ecx, %cr3
# enable paging
movl %cr0, %ecx
orl $0x80000000, %ecx
movl %ecx, %cr0
ret
initialize_gdt:
lgdt boot_gdtr
# flush gdt
movw $0x10, %ax
movw %ax, %ds
movw %ax, %es
movw %ax, %fs
movw %ax, %gs
movw %ax, %ss
jmp $0x08, $flush
flush:
ret
.global _start
.type _start, @function
_start:
# Initialize stack and multiboot info
movl $g_boot_stack_top, %esp
movl %eax, g_multiboot_magic
movl %ebx, g_multiboot_info
call copy_kernel_commandline
call check_requirements
call enable_sse
call initialize_paging
call initialize_gdt
call _init
# call to the kernel itself (clear ebp for stacktrace)
xorl %ebp, %ebp
call kernel_main
call _fini
system_halt:
xchgw %bx, %bx
cli
1: hlt
jmp 1b

View File

@ -0,0 +1,28 @@
ENTRY (_start)
SECTIONS
{
. = 0x00100000;
g_kernel_start = .;
.text BLOCK(4K) : ALIGN(4K)
{
*(.multiboot)
*(.text)
}
.rodata BLOCK(4K) : ALIGN(4K)
{
*(.rodata.*)
}
.data BLOCK(4K) : ALIGN(4K)
{
*(.data)
}
.bss BLOCK(4K) : ALIGN(4K)
{
*(COMMON)
*(.bss)
}
g_kernel_end = .;
}

View File

@ -1,21 +0,0 @@
#include <kernel/GDT.h>
namespace Kernel
{
GDT* GDT::create()
{
ASSERT_NOT_REACHED();
}
void GDT::write_entry(uint8_t, uint32_t, uint32_t, uint8_t, uint8_t)
{
ASSERT_NOT_REACHED();
}
void GDT::write_tss()
{
ASSERT_NOT_REACHED();
}
}

View File

@ -1,31 +0,0 @@
#include <kernel/IDT.h>
namespace Kernel
{
IDT* IDT::create()
{
ASSERT_NOT_REACHED();
}
[[noreturn]] void IDT::force_triple_fault()
{
ASSERT_NOT_REACHED();
}
void IDT::register_irq_handler(uint8_t, Interruptable*)
{
ASSERT_NOT_REACHED();
}
void IDT::register_interrupt_handler(uint8_t, void (*)())
{
ASSERT_NOT_REACHED();
}
void IDT::register_syscall_handler(uint8_t, void (*)())
{
ASSERT_NOT_REACHED();
}
}

View File

@ -1,144 +0,0 @@
#include <kernel/Memory/PageTable.h>
#include <kernel/Lock/SpinLock.h>
namespace Kernel
{
RecursiveSpinLock PageTable::s_fast_page_lock;
void PageTable::initialize()
{
ASSERT_NOT_REACHED();
}
PageTable& PageTable::kernel()
{
ASSERT_NOT_REACHED();
}
bool PageTable::is_valid_pointer(uintptr_t)
{
ASSERT_NOT_REACHED();
}
BAN::ErrorOr<PageTable*> PageTable::create_userspace()
{
ASSERT_NOT_REACHED();
}
PageTable::~PageTable()
{
ASSERT_NOT_REACHED();
}
void PageTable::unmap_page(vaddr_t)
{
ASSERT_NOT_REACHED();
}
void PageTable::unmap_range(vaddr_t, size_t)
{
ASSERT_NOT_REACHED();
}
void PageTable::map_range_at(paddr_t, vaddr_t, size_t, flags_t)
{
ASSERT_NOT_REACHED();
}
void PageTable::map_page_at(paddr_t, vaddr_t, flags_t)
{
ASSERT_NOT_REACHED();
}
paddr_t PageTable::physical_address_of(vaddr_t) const
{
ASSERT_NOT_REACHED();
}
PageTable::flags_t PageTable::get_page_flags(vaddr_t) const
{
ASSERT_NOT_REACHED();
}
bool PageTable::is_page_free(vaddr_t) const
{
ASSERT_NOT_REACHED();
}
bool PageTable::is_range_free(vaddr_t, size_t) const
{
ASSERT_NOT_REACHED();
}
bool PageTable::reserve_page(vaddr_t, bool)
{
ASSERT_NOT_REACHED();
}
bool PageTable::reserve_range(vaddr_t, size_t, bool)
{
ASSERT_NOT_REACHED();
}
vaddr_t PageTable::reserve_free_page(vaddr_t, vaddr_t)
{
ASSERT_NOT_REACHED();
}
vaddr_t PageTable::reserve_free_contiguous_pages(size_t, vaddr_t, vaddr_t)
{
ASSERT_NOT_REACHED();
}
void PageTable::load()
{
ASSERT_NOT_REACHED();
}
void PageTable::initial_load()
{
ASSERT_NOT_REACHED();
}
void PageTable::debug_dump()
{
ASSERT_NOT_REACHED();
}
uint64_t PageTable::get_page_data(vaddr_t) const
{
ASSERT_NOT_REACHED();
}
void PageTable::initialize_kernel()
{
ASSERT_NOT_REACHED();
}
void PageTable::map_kernel_memory()
{
ASSERT_NOT_REACHED();
}
void PageTable::prepare_fast_page()
{
ASSERT_NOT_REACHED();
}
void PageTable::invalidate(vaddr_t)
{
ASSERT_NOT_REACHED();
}
void PageTable::map_fast_page(paddr_t)
{
ASSERT_NOT_REACHED();
}
void PageTable::unmap_fast_page()
{
ASSERT_NOT_REACHED();
}
}

View File

@ -1,31 +0,0 @@
.section .userspace, "aw"
// stack contains
// return address
// signal number
// signal handler
.global signal_trampoline
signal_trampoline:
ud2
pushl %ebp
movl %esp, %ebp
subl $8, %esp
pusha
movl 40(%esp), %edi
movl 36(%esp), %eax
subl $12, %esp
pushl %edi
call *%eax
addl $16, %esp
popa
leave
addl $8, %esp
ret

View File

@ -1,20 +0,0 @@
# uint32_t read_ip()
.global read_ip
read_ip:
popl %eax
jmp *%eax
# void start_thread(uint32_t sp, uint32_t ip)
.global start_thread
start_thread:
ud2
# void continue_thread(uint32_t sp, uint32_t ip)
.global continue_thread
continue_thread:
ud2
# void thread_userspace_trampoline(uint32_t sp, uint32_t ip, int argc, char** argv, char** envp)
.global thread_userspace_trampoline
thread_userspace_trampoline:
ud2

View File

@ -1,268 +0,0 @@
.set PG_PRESENT, 1<<0
.set PG_READ_WRITE, 1<<1
.set PG_PAGE_SIZE, 1<<7
.set FB_WIDTH, 800
.set FB_HEIGHT, 600
.set FB_BPP, 32
#define KERNEL_OFFSET 0xC0000000
#define V2P(vaddr) ((vaddr) - KERNEL_OFFSET)
.code32
# multiboot2 header
.section .multiboot, "aw"
.align 8
multiboot2_start:
.long 0xE85250D6
.long 0
.long multiboot2_end - multiboot2_start
.long -(0xE85250D6 + (multiboot2_end - multiboot2_start))
# framebuffer tag
.align 8
.short 5
.short 0
.long 20
.long FB_WIDTH
.long FB_HEIGHT
.long FB_BPP
# legacy start
.align 8
.short 3
.short 0
.long 12
.long V2P(_start)
.align 8
.short 0
.short 0
.long 8
multiboot2_end:
.section .bananboot, "aw"
.align 8
bananboot_start:
.long 0xBABAB007
.long -(0xBABAB007 + FB_WIDTH + FB_HEIGHT + FB_BPP)
.long FB_WIDTH
.long FB_HEIGHT
.long FB_BPP
bananboot_end:
.section .bss, "aw", @nobits
boot_stack_bottom:
.skip 4096 * 4
boot_stack_top:
.global g_kernel_cmdline
g_kernel_cmdline:
.skip 4096
bootloader_magic:
.skip 8
bootloader_info:
.skip 8
.section .data
.align 4096
boot_pml4:
boot_pdpt_lo:
boot_pdpt_hi:
boot_pd:
boot_gdt:
.quad 0x0000000000000000 # null descriptor
.quad 0x00CF9A000000FFFF # kernel code
.quad 0x00CF92000000FFFF # kernel data
boot_gdtr:
.short . - boot_gdt - 1
.long V2P(boot_gdt)
.global g_ap_startup_done
g_ap_startup_done:
.byte 0
.global g_ap_running_count
g_ap_running_count:
.byte 0
.global g_ap_stack_loaded
g_ap_stack_loaded:
.byte 0
.section .text
has_cpuid:
pushfl
pushfl
xorl $0x00200000, (%esp)
popfl
pushfl
popl %eax
xorl (%esp), %eax
popfl
testl $0x00200000, %eax
ret
check_requirements:
call has_cpuid
jz .exit
ret
.exit:
jmp system_halt
enable_sse:
movl %cr0, %eax
andw $0xFFFB, %ax
orw $0x0002, %ax
movl %eax, %cr0
movl %cr4, %eax
orw $0x0600, %ax
movl %eax, %cr4
ret
initialize_paging:
# enable PAE
movl %cr4, %ecx
orl $0x20, %ecx
movl %ecx, %cr4
# set address of paging structures
movl $V2P(boot_pml4), %ecx
movl %ecx, %cr3
# enable paging
movl %cr0, %ecx
orl $0x80000000, %ecx
movl %ecx, %cr0
ret
.global _start
.type _start, @function
_start:
cli; cld
# Initialize stack and multiboot info
movl %eax, V2P(bootloader_magic)
movl %ebx, V2P(bootloader_info)
movl $V2P(boot_stack_top), %esp
call check_requirements
call enable_sse
call initialize_paging
# flush gdt
lgdt V2P(boot_gdtr)
ljmpl $0x08, $V2P(gdt_flush)
gdt_flush:
movw $0x10, %ax
movw %ax, %ds
movw %ax, %ss
movw %ax, %es
# move stack pointer to higher half
movl %esp, %esp
addl $KERNEL_OFFSET, %esp
# jump to higher half
leal higher_half, %ecx
jmp *%ecx
higher_half:
# call global constuctors
call _init
# call to the kernel itself (clear ebp for stacktrace)
xorl %ebp, %ebp
movl V2P(bootloader_magic), %edi
movl V2P(bootloader_info), %esi
call kernel_main
# call global destructors
call _fini
system_halt:
xchgw %bx, %bx
cli
1: hlt
jmp 1b
.section .ap_init, "ax"
.code16
.global ap_trampoline
ap_trampoline:
jmp 1f
.align 8
ap_stack_ptr:
.skip 4
1:
cli; cld
ljmpl $0x00, $ap_cs_clear
ap_cs_clear:
xorw %ax, %ax
movw %ax, %ds
# load ap gdt and enter protected mode
lgdt ap_gdtr
movl %cr0, %eax
orb $1, %al
movl %eax, %cr0
ljmpl $0x08, $ap_protected_mode
.code32
ap_protected_mode:
movw $0x10, %ax
movw %ax, %ds
movw %ax, %ss
movw %ax, %es
movl ap_stack_ptr, %esp
movb $1, V2P(g_ap_stack_loaded)
call V2P(enable_sse)
call V2P(initialize_paging)
# load boot gdt and enter long mode
lgdt V2P(boot_gdtr)
ljmpl $0x08, $ap_flush_gdt
ap_flush_gdt:
# move stack pointer to higher half
movl %esp, %esp
addl $KERNEL_OFFSET, %esp
# jump to higher half
leal ap_higher_half, %ecx
jmp *%ecx
ap_higher_half:
# clear rbp for stacktrace
xorl %ebp, %ebp
1: pause
cmpb $0, g_ap_startup_done
jz 1b
lock incb g_ap_running_count
call ap_main
jmp system_halt
ap_gdt:
.quad 0x0000000000000000 # null descriptor
.quad 0x00CF9A000000FFFF # 32 bit code
.quad 0x00CF92000000FFFF # 32 bit data
ap_gdtr:
.short . - ap_gdt - 1
.long ap_gdt

View File

@ -1,45 +0,0 @@
ENTRY (_start)
KERNEL_OFFSET = 0xC0000000;
SECTIONS
{
. = 0xF000;
.ap_init ALIGN(4K) : AT(ADDR(.ap_init))
{
g_ap_init_addr = .;
*(.ap_init)
}
. = 0x00100000 + KERNEL_OFFSET;
g_kernel_start = .;
.text ALIGN(4K) : AT(ADDR(.text) - KERNEL_OFFSET)
{
g_kernel_execute_start = .;
*(.multiboot)
*(.bananboot)
*(.text.*)
}
.userspace ALIGN(4K) : AT(ADDR(.userspace) - KERNEL_OFFSET)
{
g_userspace_start = .;
*(.userspace)
g_userspace_end = .;
g_kernel_execute_end = .;
}
.rodata ALIGN(4K) : AT(ADDR(.rodata) - KERNEL_OFFSET)
{
*(.rodata.*)
}
.data ALIGN(4K) : AT(ADDR(.data) - KERNEL_OFFSET)
{
*(.data)
}
.bss ALIGN(4K) : AT(ADDR(.bss) - KERNEL_OFFSET)
{
*(COMMON)
*(.bss)
}
g_kernel_end = .;
}

View File

@ -1,4 +1,4 @@
# uint64_t read_ip() # uint64_t read_()
.global read_ip .global read_ip
read_ip: read_ip:
popq %rax popq %rax

View File

@ -1,41 +1,115 @@
#include <BAN/Atomic.h>
#include <kernel/Panic.h> #include <kernel/Panic.h>
#define ATEXIT_MAX_FUNCS 128 #define ATEXIT_MAX_FUNCS 128
#ifdef __cplusplus
extern "C" {
#endif
typedef unsigned uarch_t;
struct atexit_func_entry_t struct atexit_func_entry_t
{ {
void(*func)(void*); /*
void* arg; * Each member is at least 4 bytes large. Such that each entry is 12bytes.
* 128 * 12 = 1.5KB exact.
**/
void (*destructor_func)(void *);
void *obj_ptr;
void *dso_handle; void *dso_handle;
}; };
static atexit_func_entry_t __atexit_funcs[ATEXIT_MAX_FUNCS]; atexit_func_entry_t __atexit_funcs[ATEXIT_MAX_FUNCS];
static size_t __atexit_func_count = 0; uarch_t __atexit_func_count = 0;
extern "C" int __cxa_atexit(void(*func)(void*), void* arg, void* dso_handle) int __cxa_atexit(void (*f)(void *), void *objptr, void *dso)
{ {
if (__atexit_func_count >= ATEXIT_MAX_FUNCS) if (__atexit_func_count >= ATEXIT_MAX_FUNCS) {return -1;};
return -1; __atexit_funcs[__atexit_func_count].destructor_func = f;
auto& atexit_func = __atexit_funcs[__atexit_func_count++]; __atexit_funcs[__atexit_func_count].obj_ptr = objptr;
atexit_func.func = func; __atexit_funcs[__atexit_func_count].dso_handle = dso;
atexit_func.arg = arg; __atexit_func_count++;
atexit_func.dso_handle = dso_handle; return 0; /*I would prefer if functions returned 1 on success, but the ABI says...*/
return 0;
}; };
extern "C" void __cxa_finalize(void* f) void __cxa_finalize(void *f)
{ {
for (size_t i = __atexit_func_count; i > 0; i--) uarch_t i = __atexit_func_count;
if (!f)
{ {
auto& atexit_func = __atexit_funcs[i - 1]; /*
if (atexit_func.func == nullptr) * According to the Itanium C++ ABI, if __cxa_finalize is called without a
continue; * function ptr, then it means that we should destroy EVERYTHING MUAHAHAHA!!
if (f == nullptr || f == atexit_func.func) *
* TODO:
* Note well, however, that deleting a function from here that contains a __dso_handle
* means that one link to a shared object file has been terminated. In other words,
* We should monitor this list (optional, of course), since it tells us how many links to
* an object file exist at runtime in a particular application. This can be used to tell
* when a shared object is no longer in use. It is one of many methods, however.
**/
//You may insert a prinf() here to tell you whether or not the function gets called. Testing
//is CRITICAL!
while (i--)
{ {
atexit_func.func(atexit_func.arg); if (__atexit_funcs[i].destructor_func)
atexit_func.func = nullptr; {
} /* ^^^ That if statement is a safeguard...
} * To make sure we don't call any entries that have already been called and unset at runtime.
* Those will contain a value of 0, and calling a function with value 0
* will cause undefined behaviour. Remember that linear address 0,
* in a non-virtual address space (physical) contains the IVT and BDA.
*
* In a virtual environment, the kernel will receive a page fault, and then probably
* map in some trash, or a blank page, or something stupid like that.
* This will result in the processor executing trash, and...we don't want that.
**/
(*__atexit_funcs[i].destructor_func)(__atexit_funcs[i].obj_ptr);
};
};
return;
};
while (i--)
{
/*
* The ABI states that multiple calls to the __cxa_finalize(destructor_func_ptr) function
* should not destroy objects multiple times. Only one call is needed to eliminate multiple
* entries with the same address.
*
* FIXME:
* This presents the obvious problem: all destructors must be stored in the order they
* were placed in the list. I.e: the last initialized object's destructor must be first
* in the list of destructors to be called. But removing a destructor from the list at runtime
* creates holes in the table with unfilled entries.
* Remember that the insertion algorithm in __cxa_atexit simply inserts the next destructor
* at the end of the table. So, we have holes with our current algorithm
* This function should be modified to move all the destructors above the one currently
* being called and removed one place down in the list, so as to cover up the hole.
* Otherwise, whenever a destructor is called and removed, an entire space in the table is wasted.
**/
if (__atexit_funcs[i].destructor_func == f)
{
/*
* Note that in the next line, not every destructor function is a class destructor.
* It is perfectly legal to register a non class destructor function as a simple cleanup
* function to be called on program termination, in which case, it would not NEED an
* object This pointer. A smart programmer may even take advantage of this and register
* a C function in the table with the address of some structure containing data about
* what to clean up on exit.
* In the case of a function that takes no arguments, it will simply be ignore within the
* function itself. No worries.
**/
(*__atexit_funcs[i].destructor_func)(__atexit_funcs[i].obj_ptr);
__atexit_funcs[i].destructor_func = 0;
/*
* Notice that we didn't decrement __atexit_func_count: this is because this algorithm
* requires patching to deal with the FIXME outlined above.
**/
};
};
}; };
namespace __cxxabiv1 namespace __cxxabiv1
@ -44,19 +118,23 @@ namespace __cxxabiv1
int __cxa_guard_acquire (__guard* g) int __cxa_guard_acquire (__guard* g)
{ {
uint8_t* byte = reinterpret_cast<uint8_t*>(g); auto& atomic = *reinterpret_cast<BAN::Atomic<__guard>*>(g);
uint8_t zero = 0; return atomic == 0;
return __atomic_compare_exchange_n(byte, &zero, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
} }
void __cxa_guard_release (__guard* g) void __cxa_guard_release (__guard* g)
{ {
uint8_t* byte = reinterpret_cast<uint8_t*>(g); auto& atomic = *reinterpret_cast<BAN::Atomic<__guard>*>(g);
__atomic_store_n(byte, 0, __ATOMIC_RELEASE); atomic = 1;
} }
void __cxa_guard_abort (__guard*) void __cxa_guard_abort (__guard*)
{ {
Kernel::panic("__cxa_guard_abort"); Kernel::panic("__cxa_guard_abort");
__builtin_unreachable();
} }
} }
#ifdef __cplusplus
};
#endif

View File

@ -1,11 +1,11 @@
#pragma once #pragma once
#define x86_64 1 #define x86_64 1
#define i686 2 #define i386 2
#define ARCH(arch) (__arch == arch) #define ARCH(arch) (__arch == arch)
#if !defined(__arch) || (__arch != x86_64 && __arch != i686) #if !defined(__arch) || (__arch != x86_64 && __arch != i386)
#error "Unsupported architecture" #error "Unsupported architecture"
#endif #endif
@ -13,12 +13,10 @@
#define read_rsp(rsp) asm volatile("movq %%rsp, %0" : "=r"(rsp)) #define read_rsp(rsp) asm volatile("movq %%rsp, %0" : "=r"(rsp))
#define push_callee_saved() asm volatile("pushq %rbx; pushq %rbp; pushq %r12; pushq %r13; pushq %r14; pushq %r15") #define push_callee_saved() asm volatile("pushq %rbx; pushq %rbp; pushq %r12; pushq %r13; pushq %r14; pushq %r15")
#define pop_callee_saved() asm volatile("popq %r15; popq %r14; popq %r13; popq %r12; popq %rbp; popq %rbx") #define pop_callee_saved() asm volatile("popq %r15; popq %r14; popq %r13; popq %r12; popq %rbp; popq %rbx")
#elif ARCH(i686) #else
#define read_rsp(rsp) asm volatile("movl %%esp, %0" : "=r"(rsp)) #define read_rsp(rsp) asm volatile("movl %%esp, %0" : "=r"(rsp))
#define push_callee_saved() asm volatile("pushal") #define push_callee_saved() asm volatile("pushal")
#define pop_callee_saved() asm volatile("popal") #define pop_callee_saved() asm volatile("popal")
#else
#error
#endif #endif
#include <stdint.h> #include <stdint.h>

View File

@ -28,7 +28,7 @@ namespace Kernel
// 1x triply indirect // 1x triply indirect
BAN::Array<paddr_t, 5> block; BAN::Array<paddr_t, 5> block;
static constexpr size_t direct_block_count = 2; static constexpr size_t direct_block_count = 2;
#elif ARCH(i686) #elif ARCH(i386)
// 14x direct blocks // 14x direct blocks
// 1x singly indirect // 1x singly indirect
// 1x doubly indirect // 1x doubly indirect

View File

@ -4,7 +4,7 @@
#if ARCH(x86_64) #if ARCH(x86_64)
#define KERNEL_OFFSET 0xFFFFFFFF80000000 #define KERNEL_OFFSET 0xFFFFFFFF80000000
#elif ARCH(i686) #elif ARCH(i386)
#define KERNEL_OFFSET 0xC0000000 #define KERNEL_OFFSET 0xC0000000
#else #else
#error #error

View File

@ -199,33 +199,6 @@ namespace Kernel
BAN::ErrorOr<void> validate_string_access(const char*); BAN::ErrorOr<void> validate_string_access(const char*);
BAN::ErrorOr<void> validate_pointer_access(const void*, size_t); BAN::ErrorOr<void> validate_pointer_access(const void*, size_t);
uint64_t signal_pending_mask() const
{
return ((uint64_t)m_signal_pending_mask[1].load() << 32) | m_signal_pending_mask[0].load();
}
void add_pending_signal(uint8_t signal)
{
ASSERT(signal >= _SIGMIN);
ASSERT(signal <= _SIGMAX);
ASSERT(signal < 64);
if (signal < 32)
m_signal_pending_mask[0] |= (uint32_t)1 << signal;
else
m_signal_pending_mask[1] |= (uint32_t)1 << (signal - 32);
}
void remove_pending_signal(uint8_t signal)
{
ASSERT(signal >= _SIGMIN);
ASSERT(signal <= _SIGMAX);
ASSERT(signal < 64);
if (signal < 32)
m_signal_pending_mask[0] &= ~((uint32_t)1 << signal);
else
m_signal_pending_mask[1] &= ~((uint32_t)1 << (signal - 32));
}
private: private:
struct ExitStatus struct ExitStatus
{ {
@ -253,8 +226,7 @@ namespace Kernel
BAN::Vector<Thread*> m_threads; BAN::Vector<Thread*> m_threads;
BAN::Atomic<vaddr_t> m_signal_handlers[_SIGMAX + 1] { }; BAN::Atomic<vaddr_t> m_signal_handlers[_SIGMAX + 1] { };
// This is 2 32 bit values to allow atomicity on 32 targets BAN::Atomic<uint64_t> m_signal_pending_mask { 0 };
BAN::Atomic<uint32_t> m_signal_pending_mask[2] { 0, 0 };
BAN::Vector<BAN::String> m_cmdline; BAN::Vector<BAN::String> m_cmdline;
BAN::Vector<BAN::String> m_environ; BAN::Vector<BAN::String> m_environ;

View File

@ -19,7 +19,7 @@ namespace Kernel
using ProcessorID = uint32_t; using ProcessorID = uint32_t;
constexpr ProcessorID PROCESSOR_NONE = 0xFFFFFFFF; constexpr ProcessorID PROCESSOR_NONE = 0xFFFFFFFF;
#if ARCH(x86_64) || ARCH(i686) #if ARCH(x86_64) || ARCH(i386)
class Processor class Processor
{ {
BAN_NON_COPYABLE(Processor); BAN_NON_COPYABLE(Processor);

View File

@ -33,11 +33,9 @@ namespace Kernel
Semaphore m_semaphore; Semaphore m_semaphore;
SpinLock m_lock; SpinLock m_lock;
BAN::Atomic<size_t> m_used_mask { 0 }; BAN::Atomic<uint64_t> m_used_mask { 0 };
BAN::Atomic<size_t> m_done_mask { 0 }; BAN::Atomic<uint64_t> m_done_mask { 0 };
volatile uint16_t m_status_codes[64] { }; volatile uint16_t m_status_codes[64] { };
static constexpr size_t m_mask_bits = sizeof(size_t) * 8;
}; };
} }

View File

@ -79,7 +79,6 @@ namespace Kernel
static pid_t current_tid(); static pid_t current_tid();
Process& process(); Process& process();
const Process& process() const;
bool has_process() const { return m_process; } bool has_process() const { return m_process; }
bool is_userspace() const { return m_is_userspace; } bool is_userspace() const { return m_is_userspace; }

View File

@ -1488,7 +1488,7 @@ namespace Kernel
if (pid == m_pid) if (pid == m_pid)
{ {
add_pending_signal(signal); m_signal_pending_mask |= 1 << signal;
return 0; return 0;
} }
@ -1501,7 +1501,7 @@ namespace Kernel
found = true; found = true;
if (signal) if (signal)
{ {
process.add_pending_signal(signal); process.m_signal_pending_mask |= 1 << signal;
// FIXME: This feels hacky // FIXME: This feels hacky
Scheduler::get().unblock_thread(process.m_threads.front()->tid()); Scheduler::get().unblock_thread(process.m_threads.front()->tid());
} }

View File

@ -22,7 +22,7 @@ namespace Kernel
{ {
#if ARCH(x86_64) #if ARCH(x86_64)
asm volatile("rdrand %0" : "=r"(s_rand_seed)); asm volatile("rdrand %0" : "=r"(s_rand_seed));
#elif ARCH(i686) #elif ARCH(i386)
uint32_t lo, hi; uint32_t lo, hi;
asm volatile( asm volatile(
"rdrand %[lo];" "rdrand %[lo];"

View File

@ -21,7 +21,7 @@ namespace Kernel
{ {
#if ARCH(x86_64) #if ARCH(x86_64)
asm volatile("movq %0, %%rsp" :: "rm"(Processor::current_stack_top())); asm volatile("movq %0, %%rsp" :: "rm"(Processor::current_stack_top()));
#elif ARCH(i686) #elif ARCH(i386)
asm volatile("movl %0, %%esp" :: "rm"(Processor::current_stack_top())); asm volatile("movl %0, %%esp" :: "rm"(Processor::current_stack_top()));
#else #else
#error #error
@ -221,7 +221,7 @@ namespace Kernel
"orq $(1 << 3), %rax;" "orq $(1 << 3), %rax;"
"movq %rax, %cr0" "movq %rax, %cr0"
); );
#elif ARCH(i686) #elif ARCH(i386)
asm volatile( asm volatile(
"movl %cr0, %eax;" "movl %cr0, %eax;"
"orl $(1 << 3), %eax;" "orl $(1 << 3), %eax;"

View File

@ -15,8 +15,8 @@ namespace Kernel
, m_doorbell(db) , m_doorbell(db)
, m_qdepth(qdepth) , m_qdepth(qdepth)
{ {
for (uint32_t i = qdepth; i < m_mask_bits; i++) for (uint32_t i = qdepth; i < 64; i++)
m_used_mask |= (size_t)1 << i; m_used_mask |= (uint64_t)1 << i;
set_irq(irq); set_irq(irq);
enable_interrupt(); enable_interrupt();
} }
@ -29,8 +29,8 @@ namespace Kernel
{ {
uint16_t sts = cq_ptr[m_cq_head].sts >> 1; uint16_t sts = cq_ptr[m_cq_head].sts >> 1;
uint16_t cid = cq_ptr[m_cq_head].cid; uint16_t cid = cq_ptr[m_cq_head].cid;
size_t cid_mask = (size_t)1 << cid; uint64_t cid_mask = (uint64_t)1 << cid;
ASSERT(cid < m_mask_bits); ASSERT(cid < 64);
ASSERT((m_done_mask & cid_mask) == 0); ASSERT((m_done_mask & cid_mask) == 0);
@ -50,7 +50,7 @@ namespace Kernel
uint16_t NVMeQueue::submit_command(NVMe::SubmissionQueueEntry& sqe) uint16_t NVMeQueue::submit_command(NVMe::SubmissionQueueEntry& sqe)
{ {
uint16_t cid = reserve_cid(); uint16_t cid = reserve_cid();
size_t cid_mask = (size_t)1 << cid; uint64_t cid_mask = (uint64_t)1 << cid;
{ {
SpinLockGuard _(m_lock); SpinLockGuard _(m_lock);
@ -98,13 +98,13 @@ namespace Kernel
} }
uint16_t cid = 0; uint16_t cid = 0;
for (; cid < m_mask_bits; cid++) for (; cid < 64; cid++)
if ((m_used_mask & ((size_t)1 << cid)) == 0) if ((m_used_mask & ((uint64_t)1 << cid)) == 0)
break; break;
ASSERT(cid < m_mask_bits); ASSERT(cid < 64);
ASSERT(cid < m_qdepth); ASSERT(cid < m_qdepth);
m_used_mask |= (size_t)1 << cid; m_used_mask |= (uint64_t)1 << cid;
m_lock.unlock(state); m_lock.unlock(state);
return cid; return cid;

View File

@ -111,7 +111,7 @@ namespace Kernel
); );
save_sse(); save_sse();
asm volatile("movq %0, %%cr0" :: "r"(cr0)); asm volatile("movq %0, %%cr0" :: "r"(cr0));
#elif ARCH(i686) #elif ARCH(i386)
uintptr_t cr0; uintptr_t cr0;
asm volatile( asm volatile(
"movl %%cr0, %%eax;" "movl %%cr0, %%eax;"
@ -140,12 +140,6 @@ namespace Kernel
return *m_process; return *m_process;
} }
const Process& Thread::process() const
{
ASSERT(m_process);
return *m_process;
}
Thread::~Thread() Thread::~Thread()
{ {
} }
@ -247,7 +241,7 @@ namespace Kernel
auto& interrupt_stack = *reinterpret_cast<InterruptStack*>(interrupt_stack_base() + interrupt_stack_size() - sizeof(InterruptStack)); auto& interrupt_stack = *reinterpret_cast<InterruptStack*>(interrupt_stack_base() + interrupt_stack_size() - sizeof(InterruptStack));
if (!GDT::is_user_segment(interrupt_stack.cs)) if (!GDT::is_user_segment(interrupt_stack.cs))
return false; return false;
uint64_t full_pending_mask = m_signal_pending_mask | process().signal_pending_mask();; uint64_t full_pending_mask = m_signal_pending_mask | m_process->m_signal_pending_mask;
return full_pending_mask & ~m_signal_block_mask; return full_pending_mask & ~m_signal_block_mask;
} }
@ -271,7 +265,7 @@ namespace Kernel
if (signal == 0) if (signal == 0)
{ {
uint64_t full_pending_mask = m_signal_pending_mask | process().signal_pending_mask(); uint64_t full_pending_mask = m_signal_pending_mask | process().m_signal_pending_mask;
for (signal = _SIGMIN; signal <= _SIGMAX; signal++) for (signal = _SIGMIN; signal <= _SIGMAX; signal++)
{ {
uint64_t mask = 1ull << signal; uint64_t mask = 1ull << signal;
@ -289,7 +283,7 @@ namespace Kernel
vaddr_t signal_handler = process().m_signal_handlers[signal]; vaddr_t signal_handler = process().m_signal_handlers[signal];
m_signal_pending_mask &= ~(1ull << signal); m_signal_pending_mask &= ~(1ull << signal);
process().remove_pending_signal(signal); process().m_signal_pending_mask &= ~(1ull << signal);
if (signal_handler == (vaddr_t)SIG_IGN) if (signal_handler == (vaddr_t)SIG_IGN)
; ;

View File

@ -1,4 +1,4 @@
/* i686 crti.s */ /* i386 crti.s */
.section .init .section .init
.global _init .global _init
.type _init, @function .type _init, @function

View File

@ -1,4 +1,4 @@
/* i686 crtn.s */ /* i386 crtn.s */
.section .init .section .init
/* gcc will nicely put the contents of crtend.o's .init section here. */ /* gcc will nicely put the contents of crtend.o's .init section here. */
popl %ebp popl %ebp

View File

@ -1,41 +1,37 @@
#include <BAN/Assert.h> #include <icxxabi.h>
#include <stdint.h>
#include <stddef.h>
#define ATEXIT_MAX_FUNCS 128 #define ATEXIT_MAX_FUNCS 128
struct atexit_func_entry_t struct atexit_func_entry_t
{ {
void(*func)(void*); void (*destructor)(void*);
void* arg; void* data;
void* dso_handle; void* dso_handle;
}; };
static atexit_func_entry_t __atexit_funcs[ATEXIT_MAX_FUNCS]; static atexit_func_entry_t __atexit_funcs[ATEXIT_MAX_FUNCS];
static size_t __atexit_func_count = 0; static int __atexit_func_count = 0;
extern "C" int __cxa_atexit(void(*func)(void*), void* arg, void* dso_handle) int __cxa_atexit(void (*func)(void*), void* data, void* dso_handle)
{ {
if (__atexit_func_count >= ATEXIT_MAX_FUNCS) if (__atexit_func_count >= ATEXIT_MAX_FUNCS)
return -1; return -1;;
auto& atexit_func = __atexit_funcs[__atexit_func_count++]; __atexit_funcs[__atexit_func_count].destructor = func;
atexit_func.func = func; __atexit_funcs[__atexit_func_count].data = data;
atexit_func.arg = arg; __atexit_funcs[__atexit_func_count].dso_handle = dso_handle;
atexit_func.dso_handle = dso_handle; __atexit_func_count++;
return 0; return 0;
}; };
extern "C" void __cxa_finalize(void* f) void __cxa_finalize(void* func)
{ {
for (size_t i = __atexit_func_count; i > 0; i--) for (int i = __atexit_func_count - 1; i >= 0; i--)
{ {
auto& atexit_func = __atexit_funcs[i - 1]; if (func && func != __atexit_funcs[i].destructor)
if (atexit_func.func == nullptr)
continue; continue;
if (f == nullptr || f == atexit_func.func) if (__atexit_funcs[i].destructor == nullptr)
{ continue;
atexit_func.func(atexit_func.arg); __atexit_funcs[i].destructor(__atexit_funcs[i].data);
atexit_func.func = nullptr; __atexit_funcs[i].destructor = nullptr;
} }
} }
};