Merge pull request 'Add back x86_32 support' (#5) from x86_32 into main
Reviewed-on: #5
This commit is contained in:
commit
731330c6b5
|
@ -106,6 +106,36 @@ namespace BAN
|
||||||
template<typename T> inline constexpr bool is_unsigned_v = is_unsigned<T>::value;
|
template<typename T> inline constexpr bool is_unsigned_v = is_unsigned<T>::value;
|
||||||
template<typename T> concept unsigned_integral = is_unsigned_v<T> && is_integral_v<T>;
|
template<typename T> concept unsigned_integral = is_unsigned_v<T> && is_integral_v<T>;
|
||||||
|
|
||||||
|
#define __BAN_TRAITS_MAKE_UNSIGNED_CV(__type) \
|
||||||
|
template<> struct make_unsigned<__type> { using type = unsigned __type; }; \
|
||||||
|
template<> struct make_unsigned<const __type> { using type = unsigned const __type; }; \
|
||||||
|
template<> struct make_unsigned<volatile __type> { using type = unsigned volatile __type; }; \
|
||||||
|
template<> struct make_unsigned<const volatile __type> { using type = unsigned const volatile __type; };
|
||||||
|
|
||||||
|
template<typename T> requires is_arithmetic_v<T> struct make_unsigned { using type = T; };
|
||||||
|
__BAN_TRAITS_MAKE_UNSIGNED_CV(char)
|
||||||
|
__BAN_TRAITS_MAKE_UNSIGNED_CV(short)
|
||||||
|
__BAN_TRAITS_MAKE_UNSIGNED_CV(int)
|
||||||
|
__BAN_TRAITS_MAKE_UNSIGNED_CV(long)
|
||||||
|
__BAN_TRAITS_MAKE_UNSIGNED_CV(long long)
|
||||||
|
template<typename T> using make_unsigned_t = typename make_unsigned<T>::type;
|
||||||
|
#undef __BAN_TRAITS_MAKE_UNSIGNED_CV
|
||||||
|
|
||||||
|
#define __BAN_TRAITS_MAKE_SIGNED_CV(__type) \
|
||||||
|
template<> struct make_signed<unsigned __type> { using type = __type; }; \
|
||||||
|
template<> struct make_signed<unsigned const __type> { using type = const __type; }; \
|
||||||
|
template<> struct make_signed<unsigned volatile __type> { using type = volatile __type; }; \
|
||||||
|
template<> struct make_signed<unsigned const volatile __type> { using type = const volatile __type; };
|
||||||
|
|
||||||
|
template<typename T> requires is_arithmetic_v<T> struct make_signed { using type = T; };
|
||||||
|
__BAN_TRAITS_MAKE_SIGNED_CV(char)
|
||||||
|
__BAN_TRAITS_MAKE_SIGNED_CV(short)
|
||||||
|
__BAN_TRAITS_MAKE_SIGNED_CV(int)
|
||||||
|
__BAN_TRAITS_MAKE_SIGNED_CV(long)
|
||||||
|
__BAN_TRAITS_MAKE_SIGNED_CV(long long)
|
||||||
|
template<typename T> using make_signed_t = typename make_signed<T>::type;
|
||||||
|
#undef __BAN_TRAITS_MAKE_SIGNED_CV
|
||||||
|
|
||||||
template<typename T> struct less { constexpr bool operator()(const T& lhs, const T& rhs) const { return lhs < rhs; } };
|
template<typename T> struct less { constexpr bool operator()(const T& lhs, const T& rhs) const { return lhs < rhs; } };
|
||||||
template<typename T> struct equal { constexpr bool operator()(const T& lhs, const T& rhs) const { return lhs == rhs; } };
|
template<typename T> struct equal { constexpr bool operator()(const T& lhs, const T& rhs) const { return lhs == rhs; } };
|
||||||
template<typename T> struct greater { constexpr bool operator()(const T& lhs, const T& rhs) const { return lhs > rhs; } };
|
template<typename T> struct greater { constexpr bool operator()(const T& lhs, const T& rhs) const { return lhs > rhs; } };
|
||||||
|
|
|
@ -86,7 +86,7 @@ namespace LibELF
|
||||||
return BAN::Error::from_errno(ENOEXEC);
|
return BAN::Error::from_errno(ENOEXEC);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if ARCH(i386)
|
#if ARCH(i686)
|
||||||
if (m_file_header.e_ident[EI_CLASS] != ELFCLASS32)
|
if (m_file_header.e_ident[EI_CLASS] != ELFCLASS32)
|
||||||
#elif ARCH(x86_64)
|
#elif ARCH(x86_64)
|
||||||
if (m_file_header.e_ident[EI_CLASS] != ELFCLASS64)
|
if (m_file_header.e_ident[EI_CLASS] != ELFCLASS64)
|
||||||
|
|
|
@ -42,7 +42,7 @@ namespace LibELF
|
||||||
const Elf32SectionHeader& section_header32(size_t) const;
|
const Elf32SectionHeader& section_header32(size_t) const;
|
||||||
const char* lookup_section_name32(uint32_t) const;
|
const char* lookup_section_name32(uint32_t) const;
|
||||||
const char* lookup_string32(size_t, uint32_t) const;
|
const char* lookup_string32(size_t, uint32_t) const;
|
||||||
#if ARCH(i386)
|
#if ARCH(i686)
|
||||||
const Elf32FileHeader& file_header_native() const { return file_header32(); }
|
const Elf32FileHeader& file_header_native() const { return file_header32(); }
|
||||||
const Elf32ProgramHeader& program_header_native(size_t index) const { return program_header32(index); }
|
const Elf32ProgramHeader& program_header_native(size_t index) const { return program_header32(index); }
|
||||||
const Elf32SectionHeader& section_header_native(size_t index) const { return section_header32(index); }
|
const Elf32SectionHeader& section_header_native(size_t index) const { return section_header32(index); }
|
||||||
|
|
|
@ -155,7 +155,7 @@ namespace LibELF
|
||||||
Elf64Xword p_align;
|
Elf64Xword p_align;
|
||||||
};
|
};
|
||||||
|
|
||||||
#if ARCH(i386)
|
#if ARCH(i686)
|
||||||
using ElfNativeAddr = Elf32Addr;
|
using ElfNativeAddr = Elf32Addr;
|
||||||
using ElfNativeOff = Elf32Off;
|
using ElfNativeOff = Elf32Off;
|
||||||
using ElfNativeHalf = Elf32Half;
|
using ElfNativeHalf = Elf32Half;
|
||||||
|
|
|
@ -161,7 +161,7 @@ gdt:
|
||||||
.quad 0x00CF9A000000FFFF # 32-bit code
|
.quad 0x00CF9A000000FFFF # 32-bit code
|
||||||
gdtr:
|
gdtr:
|
||||||
.short . - gdt - 1
|
.short . - gdt - 1
|
||||||
.quad gdt
|
.long gdt
|
||||||
|
|
||||||
banan_boot_info:
|
banan_boot_info:
|
||||||
boot_command_line:
|
boot_command_line:
|
||||||
|
|
|
@ -5,15 +5,26 @@
|
||||||
.set e_machine, 18
|
.set e_machine, 18
|
||||||
.set e_version, 20
|
.set e_version, 20
|
||||||
.set e_entry, 24
|
.set e_entry, 24
|
||||||
.set e_phoff, 32
|
|
||||||
.set e_shoff, 40
|
.set e32_phoff, 28
|
||||||
.set e_flags, 48
|
.set e32_shoff, 32
|
||||||
.set e_ehsize, 52
|
.set e32_flags, 36
|
||||||
.set e_phentsize, 54
|
.set e32_ehsize, 40
|
||||||
.set e_phnum, 56
|
.set e32_phentsize, 42
|
||||||
.set e_shentsize, 58
|
.set e32_phnum, 44
|
||||||
.set e_shnum, 60
|
.set e32_shentsize, 46
|
||||||
.set e_shstrndx, 62
|
.set e32_shnum, 48
|
||||||
|
.set e32_shstrndx, 50
|
||||||
|
|
||||||
|
.set e64_phoff, 32
|
||||||
|
.set e64_shoff, 40
|
||||||
|
.set e64_flags, 48
|
||||||
|
.set e64_ehsize, 52
|
||||||
|
.set e64_phentsize, 54
|
||||||
|
.set e64_phnum, 56
|
||||||
|
.set e64_shentsize, 58
|
||||||
|
.set e64_shnum, 60
|
||||||
|
.set e64_shstrndx, 62
|
||||||
|
|
||||||
# e_ident offsets
|
# e_ident offsets
|
||||||
.set EI_CLASS, 4
|
.set EI_CLASS, 4
|
||||||
|
@ -22,6 +33,7 @@
|
||||||
|
|
||||||
# e_ident constants
|
# e_ident constants
|
||||||
.set ELFMAGIC, 0x464C457F
|
.set ELFMAGIC, 0x464C457F
|
||||||
|
.set ELFCLASS32, 1
|
||||||
.set ELFCLASS64, 2
|
.set ELFCLASS64, 2
|
||||||
.set ELFDATA2LSB, 1
|
.set ELFDATA2LSB, 1
|
||||||
.set EV_CURRENT, 1
|
.set EV_CURRENT, 1
|
||||||
|
@ -31,18 +43,30 @@
|
||||||
|
|
||||||
# program header field offsets
|
# program header field offsets
|
||||||
.set p_type, 0
|
.set p_type, 0
|
||||||
.set p_flags, 4
|
|
||||||
.set p_offset, 8
|
.set p32_offset, 4
|
||||||
.set p_vaddr, 16
|
.set p32_vaddr, 8
|
||||||
.set p_paddr, 24
|
.set p32_paddr, 12
|
||||||
.set p_filesz, 32
|
.set p32_filesz, 16
|
||||||
.set p_memsz, 40
|
.set p32_memsz, 20
|
||||||
.set p_align, 48
|
.set p32_flags, 24
|
||||||
|
.set p32_align, 28
|
||||||
|
|
||||||
|
.set p64_flags, 4
|
||||||
|
.set p64_offset, 8
|
||||||
|
.set p64_vaddr, 16
|
||||||
|
.set p64_paddr, 24
|
||||||
|
.set p64_filesz, 32
|
||||||
|
.set p64_memsz, 40
|
||||||
|
.set p64_align, 48
|
||||||
|
|
||||||
# p_type constants
|
# p_type constants
|
||||||
.set PT_NULL, 0
|
.set PT_NULL, 0
|
||||||
.set PT_LOAD, 1
|
.set PT_LOAD, 1
|
||||||
|
|
||||||
|
# mask for entry point and segment loading
|
||||||
|
.set LOAD_MASK, 0x07FFFFFF
|
||||||
|
|
||||||
.code16
|
.code16
|
||||||
.section .stage2
|
.section .stage2
|
||||||
|
|
||||||
|
@ -52,8 +76,12 @@ elf_validate_file_header:
|
||||||
cmpl $ELFMAGIC, (elf_file_header)
|
cmpl $ELFMAGIC, (elf_file_header)
|
||||||
jne .elf_validate_file_header_invalid_magic
|
jne .elf_validate_file_header_invalid_magic
|
||||||
|
|
||||||
|
cmpb $ELFCLASS32, (elf_file_header + EI_CLASS)
|
||||||
|
je .elf_validate_file_header_class_valid
|
||||||
cmpb $ELFCLASS64, (elf_file_header + EI_CLASS)
|
cmpb $ELFCLASS64, (elf_file_header + EI_CLASS)
|
||||||
jne .elf_validate_file_header_only_64bit_supported
|
je .elf_validate_file_header_class_valid
|
||||||
|
jmp .elf_validate_file_header_invalid_class
|
||||||
|
.elf_validate_file_header_class_valid:
|
||||||
|
|
||||||
cmpb $ELFDATA2LSB, (elf_file_header + EI_DATA)
|
cmpb $ELFDATA2LSB, (elf_file_header + EI_DATA)
|
||||||
jne .elf_validate_file_header_only_little_endian_supported
|
jne .elf_validate_file_header_only_little_endian_supported
|
||||||
|
@ -72,8 +100,8 @@ elf_validate_file_header:
|
||||||
.elf_validate_file_header_invalid_magic:
|
.elf_validate_file_header_invalid_magic:
|
||||||
movw $elf_validate_file_header_invalid_magic_msg, %si
|
movw $elf_validate_file_header_invalid_magic_msg, %si
|
||||||
jmp print_and_halt
|
jmp print_and_halt
|
||||||
.elf_validate_file_header_only_64bit_supported:
|
.elf_validate_file_header_invalid_class:
|
||||||
movw $elf_validate_file_header_only_64bit_supported_msg, %si
|
movw $elf_validate_file_header_invalid_class_msg, %si
|
||||||
jmp print_and_halt
|
jmp print_and_halt
|
||||||
.elf_validate_file_header_only_little_endian_supported:
|
.elf_validate_file_header_only_little_endian_supported:
|
||||||
movw $elf_validate_file_header_only_little_endian_supported_msg, %si
|
movw $elf_validate_file_header_only_little_endian_supported_msg, %si
|
||||||
|
@ -86,6 +114,77 @@ elf_validate_file_header:
|
||||||
jmp print_and_halt
|
jmp print_and_halt
|
||||||
|
|
||||||
|
|
||||||
|
# sets memory to zero
|
||||||
|
# edi: start address
|
||||||
|
# ecx: byte count
|
||||||
|
# on return
|
||||||
|
# edi: start address + byte count
|
||||||
|
# ecx: 0
|
||||||
|
elf_memset_zero:
|
||||||
|
test %ecx, %ecx
|
||||||
|
jz .elf_memset_zero_done
|
||||||
|
.elf_memset_zero_loop:
|
||||||
|
movb $0, (%edi)
|
||||||
|
incl %edi
|
||||||
|
decl %ecx
|
||||||
|
jnz .elf_memset_zero_loop
|
||||||
|
.elf_memset_zero_done:
|
||||||
|
ret
|
||||||
|
|
||||||
|
# reads memory specified by 32 bit elf_program_header to memory
|
||||||
|
elf_read_program_header32_to_memory:
|
||||||
|
pushal
|
||||||
|
pushl %ebp
|
||||||
|
movl %esp, %ebp
|
||||||
|
|
||||||
|
# memset p_filesz -> p_memsz to 0
|
||||||
|
movl (elf_program_header + p32_filesz), %ebx
|
||||||
|
movl (elf_program_header + p32_vaddr), %edi
|
||||||
|
andl $LOAD_MASK, %edi
|
||||||
|
addl %ebx, %edi
|
||||||
|
movl (elf_program_header + p32_memsz), %ecx
|
||||||
|
subl %ebx, %ecx
|
||||||
|
call elf_memset_zero
|
||||||
|
|
||||||
|
# read file specified in program header to memory
|
||||||
|
movl (elf_program_header + p32_offset), %eax
|
||||||
|
movl (elf_program_header + p32_vaddr), %edi
|
||||||
|
andl $LOAD_MASK, %edi
|
||||||
|
movl (elf_program_header + p32_filesz), %ecx
|
||||||
|
call *%esi
|
||||||
|
|
||||||
|
leavel
|
||||||
|
popal
|
||||||
|
ret
|
||||||
|
|
||||||
|
|
||||||
|
# reads memory specified by 64 bit elf_program_header to memory
|
||||||
|
elf_read_program_header64_to_memory:
|
||||||
|
pushal
|
||||||
|
pushl %ebp
|
||||||
|
movl %esp, %ebp
|
||||||
|
|
||||||
|
# memset p_filesz -> p_memsz to 0
|
||||||
|
movl (elf_program_header + p64_filesz), %ebx
|
||||||
|
movl (elf_program_header + p64_vaddr), %edi
|
||||||
|
andl $LOAD_MASK, %edi
|
||||||
|
addl %ebx, %edi
|
||||||
|
movl (elf_program_header + p64_memsz), %ecx
|
||||||
|
subl %ebx, %ecx
|
||||||
|
call elf_memset_zero
|
||||||
|
|
||||||
|
# read file specified in program header to memory
|
||||||
|
movl (elf_program_header + p64_offset), %eax
|
||||||
|
movl (elf_program_header + p64_vaddr), %edi
|
||||||
|
andl $LOAD_MASK, %edi
|
||||||
|
movl (elf_program_header + p64_filesz), %ecx
|
||||||
|
call *%esi
|
||||||
|
|
||||||
|
leavel
|
||||||
|
popal
|
||||||
|
ret
|
||||||
|
|
||||||
|
|
||||||
# read callback format
|
# read callback format
|
||||||
# eax: first byte
|
# eax: first byte
|
||||||
# ecx: byte count
|
# ecx: byte count
|
||||||
|
@ -104,42 +203,72 @@ elf_read_kernel_to_memory:
|
||||||
movl %esp, %ebp
|
movl %esp, %ebp
|
||||||
subl $2, %esp
|
subl $2, %esp
|
||||||
|
|
||||||
# read file header
|
# read start of file header
|
||||||
movl $0, %eax
|
movl $0, %eax
|
||||||
movl $64, %ecx
|
movl $24, %ecx
|
||||||
movl $elf_file_header, %edi
|
movl $elf_file_header, %edi
|
||||||
call *%esi
|
call *%esi
|
||||||
|
|
||||||
call elf_validate_file_header
|
call elf_validate_file_header
|
||||||
|
|
||||||
cmpl $0, (elf_file_header + e_phoff + 4)
|
# determine file header size
|
||||||
|
movl $52, %ecx
|
||||||
|
movl $64, %edx
|
||||||
|
cmpb $ELFCLASS64, (elf_file_header + EI_CLASS)
|
||||||
|
cmovel %edx, %ecx
|
||||||
|
|
||||||
|
# read full file header
|
||||||
|
movl $0, %eax
|
||||||
|
movl $elf_file_header, %edi
|
||||||
|
call *%esi
|
||||||
|
|
||||||
|
# verify that e_phoff fits in 32 bits
|
||||||
|
cmpb $ELFCLASS64, (elf_file_header + EI_CLASS)
|
||||||
|
jne .elf_read_kernel_to_memory_valid_offset
|
||||||
|
cmpl $0, (elf_file_header + e64_phoff + 4)
|
||||||
jnz .elf_read_kernel_to_memory_unsupported_offset
|
jnz .elf_read_kernel_to_memory_unsupported_offset
|
||||||
|
.elf_read_kernel_to_memory_valid_offset:
|
||||||
|
|
||||||
|
# read architecture phentsize and phnum to fixed locations
|
||||||
|
movw (elf_file_header + e32_phentsize), %ax
|
||||||
|
movw (elf_file_header + e32_phnum), %bx
|
||||||
|
movl (elf_file_header + e32_phoff), %ecx
|
||||||
|
cmpb $ELFCLASS64, (elf_file_header + EI_CLASS)
|
||||||
|
cmovew (elf_file_header + e64_phentsize), %ax
|
||||||
|
cmovew (elf_file_header + e64_phnum), %bx
|
||||||
|
cmovel (elf_file_header + e64_phoff), %ecx
|
||||||
|
movw %ax, (elf_file_header_phentsize)
|
||||||
|
movw %bx, (elf_file_header_phnum)
|
||||||
|
movl %ecx, (elf_file_header_phoff)
|
||||||
|
|
||||||
# current program header
|
# current program header
|
||||||
movw $0, -2(%ebp)
|
movw $0, -2(%ebp)
|
||||||
|
|
||||||
.elf_read_kernel_to_memory_loop_program_headers:
|
.elf_read_kernel_to_memory_loop_program_headers:
|
||||||
movw -2(%ebp), %cx
|
movw -2(%ebp), %cx
|
||||||
cmpw (elf_file_header + e_phnum), %cx
|
cmpw (elf_file_header_phnum), %cx
|
||||||
jae .elf_read_kernel_to_memory_done
|
jae .elf_read_kernel_to_memory_done
|
||||||
|
|
||||||
# eax := program_header_index * e_phentsize + e_phoff
|
# eax := program_header_index * e_phentsize + e_phoff
|
||||||
xorl %eax, %eax
|
xorl %eax, %eax
|
||||||
movw %cx, %ax
|
movw %cx, %ax
|
||||||
xorl %ebx, %ebx
|
xorl %ebx, %ebx
|
||||||
movw (elf_file_header + e_phentsize), %bx
|
movw (elf_file_header_phentsize), %bx
|
||||||
mull %ebx
|
mull %ebx
|
||||||
addl (elf_file_header + e_phoff), %eax
|
addl (elf_file_header_phoff), %eax
|
||||||
jc .elf_read_kernel_to_memory_unsupported_offset
|
jc .elf_read_kernel_to_memory_unsupported_offset
|
||||||
|
|
||||||
# setup program header size and address
|
# determine program header size
|
||||||
movl $56, %ecx
|
movl $32, %ecx
|
||||||
movl $elf_program_header, %edi
|
movl $56, %edx
|
||||||
|
cmpb $ELFCLASS64, (elf_file_header + EI_CLASS)
|
||||||
|
cmovel %edx, %ecx
|
||||||
|
|
||||||
# read the program header
|
# read program header
|
||||||
|
movl $elf_program_header, %edi
|
||||||
call *%esi
|
call *%esi
|
||||||
|
|
||||||
# test if program header is empty
|
# test if program header is NULL header
|
||||||
cmpl $PT_NULL, (elf_program_header + p_type)
|
cmpl $PT_NULL, (elf_program_header + p_type)
|
||||||
je .elf_read_kernel_to_memory_null_program_header
|
je .elf_read_kernel_to_memory_null_program_header
|
||||||
|
|
||||||
|
@ -147,33 +276,12 @@ elf_read_kernel_to_memory:
|
||||||
cmpl $PT_LOAD, (elf_program_header + p_type)
|
cmpl $PT_LOAD, (elf_program_header + p_type)
|
||||||
jne .elf_read_kernel_to_memory_not_loadable_header
|
jne .elf_read_kernel_to_memory_not_loadable_header
|
||||||
|
|
||||||
# memset p_filesz -> p_memsz to 0
|
# read program header to memory
|
||||||
movl (elf_program_header + p_filesz), %ebx
|
movl $elf_read_program_header32_to_memory, %eax
|
||||||
|
movl $elf_read_program_header64_to_memory, %ebx
|
||||||
movl (elf_program_header + p_vaddr), %edi
|
cmpb $ELFCLASS64, (elf_file_header + EI_CLASS)
|
||||||
andl $0x7FFFFFFF, %edi
|
cmovel %ebx, %eax
|
||||||
addl %ebx, %edi
|
call *%eax
|
||||||
|
|
||||||
movl (elf_program_header + p_memsz), %ecx
|
|
||||||
subl %ebx, %ecx
|
|
||||||
jz .elf_read_kernel_to_memory_memset_done
|
|
||||||
|
|
||||||
.elf_read_kernel_to_memory_memset:
|
|
||||||
movb $0, (%edi)
|
|
||||||
incl %edi
|
|
||||||
decl %ecx
|
|
||||||
jnz .elf_read_kernel_to_memory_memset
|
|
||||||
.elf_read_kernel_to_memory_memset_done:
|
|
||||||
|
|
||||||
# read file specified in program header to memory
|
|
||||||
movl (elf_program_header + p_offset), %eax
|
|
||||||
movl (elf_program_header + p_vaddr), %edi
|
|
||||||
andl $0x7FFFFFFF, %edi
|
|
||||||
movl (elf_program_header + p_filesz), %ecx
|
|
||||||
|
|
||||||
#call print_hex32; call print_newline
|
|
||||||
|
|
||||||
call *%esi
|
|
||||||
|
|
||||||
.elf_read_kernel_to_memory_null_program_header:
|
.elf_read_kernel_to_memory_null_program_header:
|
||||||
incw -2(%ebp)
|
incw -2(%ebp)
|
||||||
|
@ -185,7 +293,7 @@ elf_read_kernel_to_memory:
|
||||||
|
|
||||||
# set kernel entry address
|
# set kernel entry address
|
||||||
movl (elf_file_header + e_entry), %eax
|
movl (elf_file_header + e_entry), %eax
|
||||||
andl $0x7FFFFF, %eax
|
andl $LOAD_MASK, %eax
|
||||||
|
|
||||||
ret
|
ret
|
||||||
|
|
||||||
|
@ -200,8 +308,8 @@ elf_read_kernel_to_memory:
|
||||||
|
|
||||||
elf_validate_file_header_invalid_magic_msg:
|
elf_validate_file_header_invalid_magic_msg:
|
||||||
.asciz "ELF: file has invalid ELF magic"
|
.asciz "ELF: file has invalid ELF magic"
|
||||||
elf_validate_file_header_only_64bit_supported_msg:
|
elf_validate_file_header_invalid_class_msg:
|
||||||
.asciz "ELF: file is not targettint 64 bit"
|
.asciz "ELF: file has invalid ELF class"
|
||||||
elf_validate_file_header_only_little_endian_supported_msg:
|
elf_validate_file_header_only_little_endian_supported_msg:
|
||||||
.asciz "ELF: file is not in little endian format"
|
.asciz "ELF: file is not in little endian format"
|
||||||
elf_validate_file_header_not_current_version_msg:
|
elf_validate_file_header_not_current_version_msg:
|
||||||
|
@ -219,5 +327,12 @@ elf_read_kernel_to_memory_not_loadable_header_msg:
|
||||||
elf_file_header:
|
elf_file_header:
|
||||||
.skip 64
|
.skip 64
|
||||||
|
|
||||||
|
elf_file_header_phentsize:
|
||||||
|
.skip 2
|
||||||
|
elf_file_header_phnum:
|
||||||
|
.skip 2
|
||||||
|
elf_file_header_phoff:
|
||||||
|
.skip 4 # NOTE: only 32 bit offsets are supported
|
||||||
|
|
||||||
elf_program_header:
|
elf_program_header:
|
||||||
.skip 56
|
.skip 56
|
||||||
|
|
|
@ -1,6 +1,11 @@
|
||||||
cmake_minimum_required(VERSION 3.26)
|
cmake_minimum_required(VERSION 3.26)
|
||||||
|
|
||||||
project(x86_64-banan_os-bootloader-installer CXX)
|
if (NOT DEFINED ENV{BANAN_ARCH})
|
||||||
|
message(FATAL_ERROR "environment variable BANAN_ARCH not defined")
|
||||||
|
endif ()
|
||||||
|
set(BANAN_ARCH $ENV{BANAN_ARCH})
|
||||||
|
|
||||||
|
project(banan_os-bootloader-installer CXX)
|
||||||
|
|
||||||
set(SOURCES
|
set(SOURCES
|
||||||
crc32.cpp
|
crc32.cpp
|
||||||
|
@ -10,8 +15,8 @@ set(SOURCES
|
||||||
main.cpp
|
main.cpp
|
||||||
)
|
)
|
||||||
|
|
||||||
add_executable(x86_64-banan_os-bootloader-installer ${SOURCES})
|
add_executable(banan_os-bootloader-installer ${SOURCES})
|
||||||
target_compile_options(x86_64-banan_os-bootloader-installer PRIVATE -O2 -std=c++20)
|
target_compile_options(banan_os-bootloader-installer PRIVATE -O2 -std=c++20)
|
||||||
target_compile_definitions(x86_64-banan_os-bootloader-installer PRIVATE __arch=x86_64)
|
target_compile_definitions(banan_os-bootloader-installer PRIVATE __arch=${BANAN_ARCH})
|
||||||
target_include_directories(x86_64-banan_os-bootloader-installer PRIVATE ${CMAKE_SOURCE_DIR}/../../LibELF/include)
|
target_include_directories(banan_os-bootloader-installer PRIVATE ${CMAKE_SOURCE_DIR}/../../LibELF/include)
|
||||||
target_include_directories(x86_64-banan_os-bootloader-installer PRIVATE ${CMAKE_SOURCE_DIR}/../../kernel/include)
|
target_include_directories(banan_os-bootloader-installer PRIVATE ${CMAKE_SOURCE_DIR}/../../kernel/include)
|
||||||
|
|
|
@ -81,7 +81,7 @@ bool ELFFile::validate_elf_header() const
|
||||||
|
|
||||||
#if ARCH(x86_64)
|
#if ARCH(x86_64)
|
||||||
if (elf_header.e_ident[EI_CLASS] != ELFCLASS64)
|
if (elf_header.e_ident[EI_CLASS] != ELFCLASS64)
|
||||||
#elif ARCH(i386)
|
#elif ARCH(i686)
|
||||||
if (elf_header.e_ident[EI_CLASS] != ELFCLASS32)
|
if (elf_header.e_ident[EI_CLASS] != ELFCLASS32)
|
||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
|
|
|
@ -4,7 +4,7 @@ project(kernel CXX C ASM)
|
||||||
|
|
||||||
if("${BANAN_ARCH}" STREQUAL "x86_64")
|
if("${BANAN_ARCH}" STREQUAL "x86_64")
|
||||||
set(ELF_FORMAT elf64-x86-64)
|
set(ELF_FORMAT elf64-x86-64)
|
||||||
elseif("${BANAN_ARCH}" STREQUAL "i386")
|
elseif("${BANAN_ARCH}" STREQUAL "i686")
|
||||||
set(ELF_FORMAT elf32-i386)
|
set(ELF_FORMAT elf32-i386)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
@ -33,6 +33,8 @@ set(KERNEL_SOURCES
|
||||||
kernel/FS/TmpFS/FileSystem.cpp
|
kernel/FS/TmpFS/FileSystem.cpp
|
||||||
kernel/FS/TmpFS/Inode.cpp
|
kernel/FS/TmpFS/Inode.cpp
|
||||||
kernel/FS/VirtualFileSystem.cpp
|
kernel/FS/VirtualFileSystem.cpp
|
||||||
|
kernel/GDT.cpp
|
||||||
|
kernel/IDT.cpp
|
||||||
kernel/Input/KeyboardLayout.cpp
|
kernel/Input/KeyboardLayout.cpp
|
||||||
kernel/Input/KeyEvent.cpp
|
kernel/Input/KeyEvent.cpp
|
||||||
kernel/Input/PS2/Controller.cpp
|
kernel/Input/PS2/Controller.cpp
|
||||||
|
@ -83,7 +85,6 @@ set(KERNEL_SOURCES
|
||||||
kernel/Storage/Partition.cpp
|
kernel/Storage/Partition.cpp
|
||||||
kernel/Storage/StorageDevice.cpp
|
kernel/Storage/StorageDevice.cpp
|
||||||
kernel/Syscall.cpp
|
kernel/Syscall.cpp
|
||||||
kernel/Syscall.S
|
|
||||||
kernel/Terminal/FramebufferTerminal.cpp
|
kernel/Terminal/FramebufferTerminal.cpp
|
||||||
kernel/Terminal/Serial.cpp
|
kernel/Terminal/Serial.cpp
|
||||||
kernel/Terminal/TTY.cpp
|
kernel/Terminal/TTY.cpp
|
||||||
|
@ -106,35 +107,33 @@ if("${BANAN_ARCH}" STREQUAL "x86_64")
|
||||||
set(KERNEL_SOURCES
|
set(KERNEL_SOURCES
|
||||||
${KERNEL_SOURCES}
|
${KERNEL_SOURCES}
|
||||||
arch/x86_64/boot.S
|
arch/x86_64/boot.S
|
||||||
arch/x86_64/GDT.cpp
|
|
||||||
arch/x86_64/IDT.cpp
|
|
||||||
arch/x86_64/interrupts.S
|
arch/x86_64/interrupts.S
|
||||||
arch/x86_64/PageTable.cpp
|
arch/x86_64/PageTable.cpp
|
||||||
arch/x86_64/Signal.S
|
arch/x86_64/Signal.S
|
||||||
|
arch/x86_64/Syscall.S
|
||||||
arch/x86_64/Thread.S
|
arch/x86_64/Thread.S
|
||||||
)
|
)
|
||||||
elseif("${BANAN_ARCH}" STREQUAL "i386")
|
file(GLOB_RECURSE LAI_SOURCES
|
||||||
|
lai/*.c
|
||||||
|
)
|
||||||
|
set(LAI_SOURCES
|
||||||
|
${LAI_SOURCES}
|
||||||
|
kernel/lai_host.cpp
|
||||||
|
)
|
||||||
|
elseif("${BANAN_ARCH}" STREQUAL "i686")
|
||||||
set(KERNEL_SOURCES
|
set(KERNEL_SOURCES
|
||||||
${KERNEL_SOURCES}
|
${KERNEL_SOURCES}
|
||||||
arch/i386/boot.S
|
arch/i686/boot.S
|
||||||
arch/i386/GDT.cpp
|
arch/i686/interrupts.S
|
||||||
arch/i386/IDT.cpp
|
arch/i686/PageTable.cpp
|
||||||
arch/i386/MMU.cpp
|
arch/i686/Signal.S
|
||||||
arch/i386/SpinLock.S
|
arch/i686/Syscall.S
|
||||||
arch/i386/Thread.S
|
arch/i686/Thread.S
|
||||||
)
|
)
|
||||||
else()
|
else()
|
||||||
message(FATAL_ERROR "unsupported architecure ${BANAN_ARCH}")
|
message(FATAL_ERROR "unsupported architecure ${BANAN_ARCH}")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
file(GLOB_RECURSE LAI_SOURCES
|
|
||||||
lai/*.c
|
|
||||||
)
|
|
||||||
set(LAI_SOURCES
|
|
||||||
${LAI_SOURCES}
|
|
||||||
kernel/lai_host.cpp
|
|
||||||
)
|
|
||||||
|
|
||||||
set(BAN_SOURCES
|
set(BAN_SOURCES
|
||||||
../BAN/BAN/Assert.cpp
|
../BAN/BAN/Assert.cpp
|
||||||
../BAN/BAN/New.cpp
|
../BAN/BAN/New.cpp
|
||||||
|
@ -179,11 +178,11 @@ if(ENABLE_KERNEL_UBSAN)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if("${BANAN_ARCH}" STREQUAL "x86_64")
|
if("${BANAN_ARCH}" STREQUAL "x86_64")
|
||||||
target_compile_options(kernel PUBLIC -mcmodel=kernel -mno-red-zone -mno-mmx)
|
target_compile_options(kernel PUBLIC -mcmodel=kernel -mno-red-zone)
|
||||||
target_link_options(kernel PUBLIC LINKER:-z,max-page-size=4096)
|
target_link_options(kernel PUBLIC LINKER:-z,max-page-size=4096)
|
||||||
target_link_options(kernel PUBLIC LINKER:-T,${CMAKE_CURRENT_SOURCE_DIR}/arch/x86_64/linker.ld)
|
target_link_options(kernel PUBLIC LINKER:-T,${CMAKE_CURRENT_SOURCE_DIR}/arch/x86_64/linker.ld)
|
||||||
elseif("${BANAN_ARCH}" STREQUAL "i386")
|
elseif("${BANAN_ARCH}" STREQUAL "i686")
|
||||||
target_link_options(kernel PUBLIC LINKER:-T,${CMAKE_CURRENT_SOURCE_DIR}/arch/i386/linker.ld)
|
target_link_options(kernel PUBLIC LINKER:-T,${CMAKE_CURRENT_SOURCE_DIR}/arch/i686/linker.ld)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
target_link_options(kernel PUBLIC -ffreestanding -nostdlib)
|
target_link_options(kernel PUBLIC -ffreestanding -nostdlib)
|
||||||
|
|
|
@ -1,147 +0,0 @@
|
||||||
#include <BAN/Assert.h>
|
|
||||||
#include <kernel/GDT.h>
|
|
||||||
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
extern "C" uintptr_t g_boot_stack_top[0];
|
|
||||||
|
|
||||||
namespace Kernel::GDT
|
|
||||||
{
|
|
||||||
|
|
||||||
struct TaskStateSegment
|
|
||||||
{
|
|
||||||
uint16_t link;
|
|
||||||
uint16_t reserved1;
|
|
||||||
uint32_t esp0;
|
|
||||||
uint16_t ss0;
|
|
||||||
uint16_t reserved2;
|
|
||||||
uint32_t esp1;
|
|
||||||
uint16_t ss1;
|
|
||||||
uint16_t reserved3;
|
|
||||||
uint32_t esp2;
|
|
||||||
uint16_t ss2;
|
|
||||||
uint16_t reserved4;
|
|
||||||
uint32_t cr3;
|
|
||||||
uint32_t eip;
|
|
||||||
uint32_t eflags;
|
|
||||||
uint32_t eax;
|
|
||||||
uint32_t ecx;
|
|
||||||
uint32_t edx;
|
|
||||||
uint32_t ebx;
|
|
||||||
uint32_t esp;
|
|
||||||
uint32_t ebp;
|
|
||||||
uint32_t esi;
|
|
||||||
uint32_t edi;
|
|
||||||
uint16_t es;
|
|
||||||
uint16_t reserved5;
|
|
||||||
uint16_t cs;
|
|
||||||
uint16_t reserved6;
|
|
||||||
uint16_t ss;
|
|
||||||
uint16_t reserved7;
|
|
||||||
uint16_t ds;
|
|
||||||
uint16_t reserved8;
|
|
||||||
uint16_t fs;
|
|
||||||
uint16_t reserved9;
|
|
||||||
uint16_t gs;
|
|
||||||
uint16_t reserved10;
|
|
||||||
uint16_t ldtr;
|
|
||||||
uint16_t reserved11;
|
|
||||||
uint16_t reserved12;
|
|
||||||
uint16_t iopb;
|
|
||||||
uint32_t ssp;
|
|
||||||
} __attribute__((packed));
|
|
||||||
|
|
||||||
union SegmentDescriptor
|
|
||||||
{
|
|
||||||
struct
|
|
||||||
{
|
|
||||||
uint16_t limit1;
|
|
||||||
uint16_t base1;
|
|
||||||
uint8_t base2;
|
|
||||||
uint8_t access;
|
|
||||||
uint8_t limit2 : 4;
|
|
||||||
uint8_t flags : 4;
|
|
||||||
uint8_t base3;
|
|
||||||
} __attribute__((packed));
|
|
||||||
|
|
||||||
struct
|
|
||||||
{
|
|
||||||
uint32_t low;
|
|
||||||
uint32_t high;
|
|
||||||
} __attribute__((packed));
|
|
||||||
|
|
||||||
} __attribute__((packed));
|
|
||||||
|
|
||||||
struct GDTR
|
|
||||||
{
|
|
||||||
uint16_t size;
|
|
||||||
uint32_t address;
|
|
||||||
} __attribute__((packed));
|
|
||||||
|
|
||||||
static TaskStateSegment* s_tss = nullptr;
|
|
||||||
static SegmentDescriptor* s_gdt = nullptr;
|
|
||||||
static GDTR s_gdtr;
|
|
||||||
|
|
||||||
static void write_entry(uint8_t offset, uint32_t base, uint32_t limit, uint8_t access, uint8_t flags)
|
|
||||||
{
|
|
||||||
SegmentDescriptor& desc = *(SegmentDescriptor*)((uintptr_t)s_gdt + offset);
|
|
||||||
desc.base1 = base;
|
|
||||||
desc.base2 = base >> 16;
|
|
||||||
desc.base3 = base >> 24;
|
|
||||||
|
|
||||||
desc.limit1 = limit;
|
|
||||||
desc.limit2 = limit >> 16;
|
|
||||||
|
|
||||||
desc.access = access;
|
|
||||||
|
|
||||||
desc.flags = flags;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void write_tss(uint8_t offset)
|
|
||||||
{
|
|
||||||
s_tss = new TaskStateSegment();
|
|
||||||
ASSERT(s_tss);
|
|
||||||
|
|
||||||
memset(s_tss, 0x00, sizeof(TaskStateSegment));
|
|
||||||
s_tss->ss0 = 0x10;
|
|
||||||
s_tss->esp0 = (uintptr_t)g_boot_stack_top;
|
|
||||||
|
|
||||||
write_entry(offset, (uint32_t)s_tss, sizeof(TaskStateSegment), 0x89, 0x0);
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_tss_stack(uintptr_t esp)
|
|
||||||
{
|
|
||||||
s_tss->esp0 = esp;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void flush_gdt()
|
|
||||||
{
|
|
||||||
asm volatile("lgdt %0" :: "m"(s_gdtr));
|
|
||||||
}
|
|
||||||
|
|
||||||
extern "C" void flush_tss(uint16_t offset)
|
|
||||||
{
|
|
||||||
asm volatile("ltr %0" :: "m"(offset));
|
|
||||||
}
|
|
||||||
|
|
||||||
void initialize()
|
|
||||||
{
|
|
||||||
constexpr uint32_t descriptor_count = 6;
|
|
||||||
s_gdt = new SegmentDescriptor[descriptor_count];
|
|
||||||
ASSERT(s_gdt);
|
|
||||||
|
|
||||||
s_gdtr.address = (uint64_t)s_gdt;
|
|
||||||
s_gdtr.size = descriptor_count * sizeof(SegmentDescriptor) - 1;
|
|
||||||
|
|
||||||
write_entry(0x00, 0x00000000, 0x00000, 0x00, 0x0); // null
|
|
||||||
write_entry(0x08, 0x00000000, 0xFFFFF, 0x9A, 0xC); // kernel code
|
|
||||||
write_entry(0x10, 0x00000000, 0xFFFFF, 0x92, 0xC); // kernel data
|
|
||||||
write_entry(0x18, 0x00000000, 0xFFFFF, 0xFA, 0xC); // user code
|
|
||||||
write_entry(0x20, 0x00000000, 0xFFFFF, 0xF2, 0xC); // user data
|
|
||||||
write_tss(0x28);
|
|
||||||
|
|
||||||
flush_gdt();
|
|
||||||
flush_tss(0x28);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,270 +0,0 @@
|
||||||
#include <BAN/Errors.h>
|
|
||||||
#include <kernel/IDT.h>
|
|
||||||
#include <kernel/InterruptController.h>
|
|
||||||
#include <kernel/Memory/kmalloc.h>
|
|
||||||
#include <kernel/Panic.h>
|
|
||||||
#include <kernel/Scheduler.h>
|
|
||||||
|
|
||||||
#define INTERRUPT_HANDLER____(i, msg) \
|
|
||||||
static void interrupt ## i () \
|
|
||||||
{ \
|
|
||||||
uint32_t eax, ebx, ecx, edx; \
|
|
||||||
uint32_t esp, ebp; \
|
|
||||||
uint32_t cr0, cr2, cr3, cr4; \
|
|
||||||
asm volatile("":"=a"(eax),"=b"(ebx),"=c"(ecx),"=d"(edx)); \
|
|
||||||
asm volatile("movl %%esp, %%eax":"=a"(esp)); \
|
|
||||||
asm volatile("movl %%ebp, %%eax":"=a"(ebp)); \
|
|
||||||
asm volatile("movl %%cr0, %%eax":"=a"(cr0)); \
|
|
||||||
asm volatile("movl %%cr2, %%eax":"=a"(cr2)); \
|
|
||||||
asm volatile("movl %%cr3, %%eax":"=a"(cr3)); \
|
|
||||||
asm volatile("movl %%cr4, %%eax":"=a"(cr4)); \
|
|
||||||
Kernel::panic(msg "\r\nRegister dump\r\n" \
|
|
||||||
"eax=0x{8H}, ebx=0x{8H}, ecx=0x{8H}, edx=0x{8H}\r\n" \
|
|
||||||
"esp=0x{8H}, ebp=0x{8H}\r\n" \
|
|
||||||
"CR0=0x{8H}, CR2=0x{8H}, CR3=0x{8H}, CR4=0x{8H}\r\n", \
|
|
||||||
eax, ebx, ecx, edx, esp, ebp, cr0, cr2, cr3, cr4); \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define INTERRUPT_HANDLER_ERR(i, msg) \
|
|
||||||
static void interrupt ## i () \
|
|
||||||
{ \
|
|
||||||
uint32_t eax, ebx, ecx, edx; \
|
|
||||||
uint32_t esp, ebp; \
|
|
||||||
uint32_t cr0, cr2, cr3, cr4; \
|
|
||||||
uint32_t error_code; \
|
|
||||||
asm volatile("":"=a"(eax),"=b"(ebx),"=c"(ecx),"=d"(edx)); \
|
|
||||||
asm volatile("movl %%esp, %%eax":"=a"(esp)); \
|
|
||||||
asm volatile("movl %%ebp, %%eax":"=a"(ebp)); \
|
|
||||||
asm volatile("movl %%cr0, %%eax":"=a"(cr0)); \
|
|
||||||
asm volatile("movl %%cr2, %%eax":"=a"(cr2)); \
|
|
||||||
asm volatile("movl %%cr3, %%eax":"=a"(cr3)); \
|
|
||||||
asm volatile("movl %%cr4, %%eax":"=a"(cr4)); \
|
|
||||||
asm volatile("popl %%eax":"=a"(error_code)); \
|
|
||||||
Kernel::panic(msg " (error code: 0x{8H})\r\n" \
|
|
||||||
"Register dump\r\n" \
|
|
||||||
"eax=0x{8H}, ebx=0x{8H}, ecx=0x{8H}, edx=0x{8H}\r\n" \
|
|
||||||
"esp=0x{8H}, ebp=0x{8H}\r\n" \
|
|
||||||
"CR0=0x{8H}, CR2=0x{8H}, CR3=0x{8H}, CR4=0x{8H}\r\n", \
|
|
||||||
eax, ebx, ecx, edx, esp, ebp, cr0, cr2, cr3, cr4, error_code); \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define REGISTER_HANDLER(i) register_interrupt_handler(i, interrupt ## i)
|
|
||||||
|
|
||||||
namespace IDT
|
|
||||||
{
|
|
||||||
|
|
||||||
struct GateDescriptor
|
|
||||||
{
|
|
||||||
uint16_t offset1;
|
|
||||||
uint16_t selector;
|
|
||||||
uint8_t reserved : 5;
|
|
||||||
uint8_t zero1 : 3;
|
|
||||||
uint8_t type : 4;
|
|
||||||
uint8_t zero2 : 1;
|
|
||||||
uint8_t DPL : 2;
|
|
||||||
uint8_t present : 1;
|
|
||||||
uint16_t offset2;
|
|
||||||
} __attribute__((packed));
|
|
||||||
|
|
||||||
struct IDTR
|
|
||||||
{
|
|
||||||
uint16_t size;
|
|
||||||
void* offset;
|
|
||||||
} __attribute((packed));
|
|
||||||
|
|
||||||
static IDTR s_idtr;
|
|
||||||
static GateDescriptor* s_idt = nullptr;
|
|
||||||
|
|
||||||
static void(*s_irq_handlers[16])() { nullptr };
|
|
||||||
|
|
||||||
INTERRUPT_HANDLER____(0x00, "Division Error")
|
|
||||||
INTERRUPT_HANDLER____(0x01, "Debug")
|
|
||||||
INTERRUPT_HANDLER____(0x02, "Non-maskable Interrupt")
|
|
||||||
INTERRUPT_HANDLER____(0x03, "Breakpoint")
|
|
||||||
INTERRUPT_HANDLER____(0x04, "Overflow")
|
|
||||||
INTERRUPT_HANDLER____(0x05, "Bound Range Exception")
|
|
||||||
INTERRUPT_HANDLER____(0x06, "Invalid Opcode")
|
|
||||||
INTERRUPT_HANDLER____(0x07, "Device Not Available")
|
|
||||||
INTERRUPT_HANDLER_ERR(0x08, "Double Fault")
|
|
||||||
INTERRUPT_HANDLER____(0x09, "Coprocessor Segment Overrun")
|
|
||||||
INTERRUPT_HANDLER_ERR(0x0A, "Invalid TSS")
|
|
||||||
INTERRUPT_HANDLER_ERR(0x0B, "Segment Not Present")
|
|
||||||
INTERRUPT_HANDLER_ERR(0x0C, "Stack-Segment Fault")
|
|
||||||
INTERRUPT_HANDLER_ERR(0x0D, "General Protection Fault")
|
|
||||||
INTERRUPT_HANDLER_ERR(0x0E, "Page Fault")
|
|
||||||
INTERRUPT_HANDLER____(0x0F, "Unknown Exception 0x0F")
|
|
||||||
INTERRUPT_HANDLER____(0x10, "x87 Floating-Point Exception")
|
|
||||||
INTERRUPT_HANDLER_ERR(0x11, "Alignment Check")
|
|
||||||
INTERRUPT_HANDLER____(0x12, "Machine Check")
|
|
||||||
INTERRUPT_HANDLER____(0x13, "SIMD Floating-Point Exception")
|
|
||||||
INTERRUPT_HANDLER____(0x14, "Virtualization Exception")
|
|
||||||
INTERRUPT_HANDLER_ERR(0x15, "Control Protection Exception")
|
|
||||||
INTERRUPT_HANDLER____(0x16, "Unknown Exception 0x16")
|
|
||||||
INTERRUPT_HANDLER____(0x17, "Unknown Exception 0x17")
|
|
||||||
INTERRUPT_HANDLER____(0x18, "Unknown Exception 0x18")
|
|
||||||
INTERRUPT_HANDLER____(0x19, "Unknown Exception 0x19")
|
|
||||||
INTERRUPT_HANDLER____(0x1A, "Unknown Exception 0x1A")
|
|
||||||
INTERRUPT_HANDLER____(0x1B, "Unknown Exception 0x1B")
|
|
||||||
INTERRUPT_HANDLER____(0x1C, "Hypervisor Injection Exception")
|
|
||||||
INTERRUPT_HANDLER_ERR(0x1D, "VMM Communication Exception")
|
|
||||||
INTERRUPT_HANDLER_ERR(0x1E, "Security Exception")
|
|
||||||
INTERRUPT_HANDLER____(0x1F, "Unkown Exception 0x1F")
|
|
||||||
|
|
||||||
extern "C" void handle_irq()
|
|
||||||
{
|
|
||||||
uint8_t irq;
|
|
||||||
for (uint32_t i = 0; i < 16; i++)
|
|
||||||
{
|
|
||||||
if (InterruptController::get().is_in_service(i))
|
|
||||||
{
|
|
||||||
irq = i;
|
|
||||||
goto found;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
dprintln("Spurious irq");
|
|
||||||
return;
|
|
||||||
found:
|
|
||||||
if (s_irq_handlers[irq])
|
|
||||||
s_irq_handlers[irq]();
|
|
||||||
else
|
|
||||||
dprintln("no handler for irq 0x{2H}\n", irq);
|
|
||||||
|
|
||||||
// NOTE: Scheduler sends PIT eoi's
|
|
||||||
if (irq != PIT_IRQ)
|
|
||||||
InterruptController::get().eoi(irq);
|
|
||||||
|
|
||||||
Kernel::Scheduler::get().reschedule_if_idling();
|
|
||||||
}
|
|
||||||
|
|
||||||
extern "C" void handle_irq_common();
|
|
||||||
asm(
|
|
||||||
".globl handle_irq_common;"
|
|
||||||
"handle_irq_common:"
|
|
||||||
"pusha;"
|
|
||||||
"pushw %ds;"
|
|
||||||
"pushw %es;"
|
|
||||||
"pushw %ss;"
|
|
||||||
"pushw %ss;"
|
|
||||||
"popw %ds;"
|
|
||||||
"popw %es;"
|
|
||||||
"call handle_irq;"
|
|
||||||
"popw %es;"
|
|
||||||
"popw %ds;"
|
|
||||||
"popa;"
|
|
||||||
"iret;"
|
|
||||||
);
|
|
||||||
|
|
||||||
extern "C" void syscall_asm();
|
|
||||||
asm(
|
|
||||||
".global syscall_asm;"
|
|
||||||
"syscall_asm:"
|
|
||||||
"pusha;"
|
|
||||||
"pushw %ds;"
|
|
||||||
"pushw %es;"
|
|
||||||
"pushw %ss;"
|
|
||||||
"pushw %ss;"
|
|
||||||
"popw %ds;"
|
|
||||||
"popw %es;"
|
|
||||||
"pushl %edx;"
|
|
||||||
"pushl %ecx;"
|
|
||||||
"pushl %ebx;"
|
|
||||||
"pushl %eax;"
|
|
||||||
"call cpp_syscall_handler;"
|
|
||||||
"addl $16, %esp;"
|
|
||||||
"popw %es;"
|
|
||||||
"popw %ds;"
|
|
||||||
|
|
||||||
// NOTE: following instructions are same as in 'popa', except we skip eax
|
|
||||||
// since it holds the return value of the syscall.
|
|
||||||
"popl %edi;"
|
|
||||||
"popl %esi;"
|
|
||||||
"popl %ebp;"
|
|
||||||
"addl $4, %esp;"
|
|
||||||
"popl %ebx;"
|
|
||||||
"popl %edx;"
|
|
||||||
"popl %ecx;"
|
|
||||||
"addl $4, %esp;"
|
|
||||||
|
|
||||||
"iret;"
|
|
||||||
);
|
|
||||||
|
|
||||||
static void flush_idt()
|
|
||||||
{
|
|
||||||
asm volatile("lidt %0"::"m"(s_idtr));
|
|
||||||
}
|
|
||||||
|
|
||||||
static void register_interrupt_handler(uint8_t index, void(*f)())
|
|
||||||
{
|
|
||||||
GateDescriptor& descriptor = s_idt[index];
|
|
||||||
descriptor.offset1 = (uint32_t)f & 0xFFFF;
|
|
||||||
descriptor.selector = 0x08;
|
|
||||||
descriptor.type = 0xE;
|
|
||||||
descriptor.DPL = 0;
|
|
||||||
descriptor.present = 1;
|
|
||||||
descriptor.offset2 = (uint32_t)f >> 16;
|
|
||||||
}
|
|
||||||
|
|
||||||
void register_irq_handler(uint8_t irq, void(*f)())
|
|
||||||
{
|
|
||||||
s_irq_handlers[irq] = f;
|
|
||||||
register_interrupt_handler(IRQ_VECTOR_BASE + irq, handle_irq_common);
|
|
||||||
flush_idt();
|
|
||||||
}
|
|
||||||
|
|
||||||
void register_syscall_handler(uint8_t offset, void(*handler)())
|
|
||||||
{
|
|
||||||
register_interrupt_handler(offset, handler);
|
|
||||||
s_idt[offset].DPL = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
void initialize()
|
|
||||||
{
|
|
||||||
constexpr size_t idt_size = 0x100 * sizeof(GateDescriptor);
|
|
||||||
|
|
||||||
s_idt = (GateDescriptor*)kmalloc(idt_size);
|
|
||||||
ASSERT(s_idt);
|
|
||||||
memset(s_idt, 0x00, idt_size);
|
|
||||||
|
|
||||||
s_idtr.offset = s_idt;
|
|
||||||
s_idtr.size = idt_size - 1;
|
|
||||||
|
|
||||||
REGISTER_HANDLER(0x00);
|
|
||||||
REGISTER_HANDLER(0x01);
|
|
||||||
REGISTER_HANDLER(0x02);
|
|
||||||
REGISTER_HANDLER(0x03);
|
|
||||||
REGISTER_HANDLER(0x04);
|
|
||||||
REGISTER_HANDLER(0x05);
|
|
||||||
REGISTER_HANDLER(0x06);
|
|
||||||
REGISTER_HANDLER(0x07);
|
|
||||||
REGISTER_HANDLER(0x08);
|
|
||||||
REGISTER_HANDLER(0x09);
|
|
||||||
REGISTER_HANDLER(0x0A);
|
|
||||||
REGISTER_HANDLER(0x0B);
|
|
||||||
REGISTER_HANDLER(0x0C);
|
|
||||||
REGISTER_HANDLER(0x0D);
|
|
||||||
REGISTER_HANDLER(0x0E);
|
|
||||||
REGISTER_HANDLER(0x0F);
|
|
||||||
REGISTER_HANDLER(0x10);
|
|
||||||
REGISTER_HANDLER(0x11);
|
|
||||||
REGISTER_HANDLER(0x12);
|
|
||||||
REGISTER_HANDLER(0x13);
|
|
||||||
REGISTER_HANDLER(0x14);
|
|
||||||
REGISTER_HANDLER(0x15);
|
|
||||||
REGISTER_HANDLER(0x16);
|
|
||||||
REGISTER_HANDLER(0x17);
|
|
||||||
REGISTER_HANDLER(0x18);
|
|
||||||
REGISTER_HANDLER(0x19);
|
|
||||||
REGISTER_HANDLER(0x1A);
|
|
||||||
REGISTER_HANDLER(0x1B);
|
|
||||||
REGISTER_HANDLER(0x1C);
|
|
||||||
REGISTER_HANDLER(0x1D);
|
|
||||||
REGISTER_HANDLER(0x1E);
|
|
||||||
REGISTER_HANDLER(0x1F);
|
|
||||||
|
|
||||||
register_syscall_handler(0x80, syscall_asm);
|
|
||||||
|
|
||||||
flush_idt();
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,227 +0,0 @@
|
||||||
#include <BAN/Errors.h>
|
|
||||||
#include <kernel/Debug.h>
|
|
||||||
#include <kernel/Memory/MMU.h>
|
|
||||||
#include <kernel/Memory/kmalloc.h>
|
|
||||||
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
#define MMU_DEBUG_PRINT 0
|
|
||||||
|
|
||||||
// bits 31-12 set
|
|
||||||
#define PAGE_MASK 0xfffff000
|
|
||||||
#define FLAGS_MASK 0x00000fff
|
|
||||||
|
|
||||||
namespace Kernel
|
|
||||||
{
|
|
||||||
|
|
||||||
static MMU* s_instance = nullptr;
|
|
||||||
|
|
||||||
void MMU::initialize()
|
|
||||||
{
|
|
||||||
ASSERT(s_instance == nullptr);
|
|
||||||
s_instance = new MMU();
|
|
||||||
ASSERT(s_instance);
|
|
||||||
s_instance->initialize_kernel();
|
|
||||||
s_instance->load();
|
|
||||||
}
|
|
||||||
|
|
||||||
MMU& MMU::get()
|
|
||||||
{
|
|
||||||
ASSERT(s_instance);
|
|
||||||
return *s_instance;
|
|
||||||
}
|
|
||||||
|
|
||||||
static uint64_t* allocate_page_aligned_page()
|
|
||||||
{
|
|
||||||
uint64_t* page = (uint64_t*)kmalloc(PAGE_SIZE, PAGE_SIZE);
|
|
||||||
ASSERT(page);
|
|
||||||
ASSERT(((uintptr_t)page % PAGE_SIZE) == 0);
|
|
||||||
memset(page, 0, PAGE_SIZE);
|
|
||||||
return page;
|
|
||||||
}
|
|
||||||
|
|
||||||
void MMU::initialize_kernel()
|
|
||||||
{
|
|
||||||
m_highest_paging_struct = (uint64_t*)kmalloc(sizeof(uint64_t) * 4, 32);
|
|
||||||
ASSERT(m_highest_paging_struct);
|
|
||||||
ASSERT(((uintptr_t)m_highest_paging_struct % 32) == 0);
|
|
||||||
|
|
||||||
// allocate all page directories
|
|
||||||
for (int i = 0; i < 4; i++)
|
|
||||||
{
|
|
||||||
uint64_t* page_directory = allocate_page_aligned_page();
|
|
||||||
m_highest_paging_struct[i] = (uint64_t)page_directory | Flags::Present;
|
|
||||||
}
|
|
||||||
|
|
||||||
// FIXME: We should just identity map until g_kernel_end
|
|
||||||
|
|
||||||
// create and identity map first 6 MiB
|
|
||||||
uint64_t* page_directory1 = (uint64_t*)(m_highest_paging_struct[0] & PAGE_MASK);
|
|
||||||
for (uint64_t i = 0; i < 3; i++)
|
|
||||||
{
|
|
||||||
uint64_t* page_table = allocate_page_aligned_page();
|
|
||||||
for (uint64_t j = 0; j < 512; j++)
|
|
||||||
page_table[j] = (i << 21) | (j << 12) | Flags::ReadWrite | Flags::Present;
|
|
||||||
|
|
||||||
page_directory1[i] = (uint64_t)page_table | Flags::ReadWrite | Flags::Present;
|
|
||||||
}
|
|
||||||
|
|
||||||
// dont map first page (0 -> 4 KiB) so that nullptr dereference
|
|
||||||
// causes page fault :)
|
|
||||||
uint64_t* page_table1 = (uint64_t*)(page_directory1[0] & PAGE_MASK);
|
|
||||||
page_table1[0] = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
MMU::MMU()
|
|
||||||
{
|
|
||||||
if (s_instance == nullptr)
|
|
||||||
return;
|
|
||||||
|
|
||||||
// Here we copy the s_instances paging structs since they are
|
|
||||||
// global for every process
|
|
||||||
|
|
||||||
uint64_t* global_pdpt = s_instance->m_highest_paging_struct;
|
|
||||||
|
|
||||||
uint64_t* pdpt = (uint64_t*)kmalloc(sizeof(uint64_t) * 4, 32);
|
|
||||||
ASSERT(pdpt);
|
|
||||||
|
|
||||||
for (uint32_t pdpte = 0; pdpte < 4; pdpte++)
|
|
||||||
{
|
|
||||||
if (!(global_pdpt[pdpte] & Flags::Present))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
uint64_t* global_pd = (uint64_t*)(global_pdpt[pdpte] & PAGE_MASK);
|
|
||||||
|
|
||||||
uint64_t* pd = allocate_page_aligned_page();
|
|
||||||
pdpt[pdpte] = (uint64_t)pd | (global_pdpt[pdpte] & ~PAGE_MASK);
|
|
||||||
|
|
||||||
for (uint32_t pde = 0; pde < 512; pde++)
|
|
||||||
{
|
|
||||||
if (!(global_pd[pde] & Flags::Present))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
uint64_t* global_pt = (uint64_t*)(global_pd[pde] & PAGE_MASK);
|
|
||||||
|
|
||||||
uint64_t* pt = allocate_page_aligned_page();
|
|
||||||
pd[pde] = (uint64_t)pt | (global_pd[pde] & ~PAGE_MASK);
|
|
||||||
|
|
||||||
memcpy(pt, global_pt, PAGE_SIZE);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
m_highest_paging_struct = pdpt;
|
|
||||||
}
|
|
||||||
|
|
||||||
MMU::~MMU()
|
|
||||||
{
|
|
||||||
uint64_t* pdpt = m_highest_paging_struct;
|
|
||||||
for (uint32_t pdpte = 0; pdpte < 512; pdpte++)
|
|
||||||
{
|
|
||||||
if (!(pdpt[pdpte] & Flags::Present))
|
|
||||||
continue;
|
|
||||||
uint64_t* pd = (uint64_t*)(pdpt[pdpte] & PAGE_MASK);
|
|
||||||
for (uint32_t pde = 0; pde < 512; pde++)
|
|
||||||
{
|
|
||||||
if (!(pd[pde] & Flags::Present))
|
|
||||||
continue;
|
|
||||||
kfree((void*)(pd[pde] & PAGE_MASK));
|
|
||||||
}
|
|
||||||
kfree(pd);
|
|
||||||
}
|
|
||||||
kfree(pdpt);
|
|
||||||
}
|
|
||||||
|
|
||||||
void MMU::load()
|
|
||||||
{
|
|
||||||
asm volatile("movl %0, %%cr3" :: "r"(m_highest_paging_struct));
|
|
||||||
}
|
|
||||||
|
|
||||||
void MMU::map_page_at(paddr_t paddr, vaddr_t vaddr, uint8_t flags)
|
|
||||||
{
|
|
||||||
#if MMU_DEBUG_PRINT
|
|
||||||
dprintln("AllocatePage(0x{8H})", address);
|
|
||||||
#endif
|
|
||||||
ASSERT(flags & Flags::Present);
|
|
||||||
|
|
||||||
ASSERT(!(paddr & ~PAGE_MASK));
|
|
||||||
ASSERT(!(vaddr & ~PAGE_MASK));
|
|
||||||
|
|
||||||
uint32_t pdpte = (vaddr & 0xC0000000) >> 30;
|
|
||||||
uint32_t pde = (vaddr & 0x3FE00000) >> 21;
|
|
||||||
uint32_t pte = (vaddr & 0x001FF000) >> 12;
|
|
||||||
|
|
||||||
uint64_t* page_directory = (uint64_t*)(m_highest_paging_struct[pdpte] & PAGE_MASK);
|
|
||||||
if (!(page_directory[pde] & Flags::Present))
|
|
||||||
{
|
|
||||||
uint64_t* page_table = allocate_page_aligned_page();
|
|
||||||
page_directory[pde] = (uint64_t)page_table;
|
|
||||||
}
|
|
||||||
page_directory[pde] |= flags;
|
|
||||||
|
|
||||||
uint64_t* page_table = (uint64_t*)(page_directory[pde] & PAGE_MASK);
|
|
||||||
page_table[pte] = paddr | flags;
|
|
||||||
}
|
|
||||||
|
|
||||||
void MMU::identity_map_page(paddr_t address, uint8_t flags)
|
|
||||||
{
|
|
||||||
address &= PAGE_MASK;
|
|
||||||
map_page_at(address, address, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
void MMU::identity_map_range(paddr_t address, ptrdiff_t size, uint8_t flags)
|
|
||||||
{
|
|
||||||
paddr_t s_page = address & PAGE_MASK;
|
|
||||||
paddr_t e_page = (address + size - 1) & PAGE_MASK;
|
|
||||||
for (paddr_t page = s_page; page <= e_page; page += PAGE_SIZE)
|
|
||||||
identity_map_page(page, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
void MMU::unmap_page(vaddr_t address)
|
|
||||||
{
|
|
||||||
#if MMU_DEBUG_PRINT
|
|
||||||
dprintln("UnAllocatePage(0x{8H})", address & PAGE_MASK);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
uint32_t pdpte = (address & 0xC0000000) >> 30;
|
|
||||||
uint32_t pde = (address & 0x3FE00000) >> 21;
|
|
||||||
uint32_t pte = (address & 0x001FF000) >> 12;
|
|
||||||
|
|
||||||
uint64_t* page_directory = (uint64_t*)(m_highest_paging_struct[pdpte] & PAGE_MASK);
|
|
||||||
if (!(page_directory[pde] & Flags::Present))
|
|
||||||
return;
|
|
||||||
|
|
||||||
uint64_t* page_table = (uint64_t*)(page_directory[pde] & PAGE_MASK);
|
|
||||||
if (!(page_table[pte] & Flags::Present))
|
|
||||||
return;
|
|
||||||
|
|
||||||
page_table[pte] = 0;
|
|
||||||
|
|
||||||
// TODO: Unallocate the page table if this was the only allocated page
|
|
||||||
}
|
|
||||||
|
|
||||||
void MMU::unmap_range(vaddr_t address, ptrdiff_t size)
|
|
||||||
{
|
|
||||||
uintptr_t s_page = address & PAGE_MASK;
|
|
||||||
uintptr_t e_page = (address + size - 1) & PAGE_MASK;
|
|
||||||
for (uintptr_t page = s_page; page <= e_page; page += PAGE_SIZE)
|
|
||||||
unmap_page(page);
|
|
||||||
}
|
|
||||||
|
|
||||||
uint8_t MMU::get_page_flags(vaddr_t address) const
|
|
||||||
{
|
|
||||||
uint32_t pdpte = (address & 0xC0000000) >> 30;
|
|
||||||
uint32_t pde = (address & 0x3FE00000) >> 21;
|
|
||||||
uint32_t pte = (address & 0x001FF000) >> 12;
|
|
||||||
|
|
||||||
uint64_t* page_directory = (uint64_t*)(m_highest_paging_struct[pdpte] & PAGE_MASK);
|
|
||||||
if (!(page_directory[pde] & Flags::Present))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
uint64_t* page_table = (uint64_t*)(page_directory[pde] & PAGE_MASK);
|
|
||||||
if (!(page_table[pte] & Flags::Present))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
return page_table[pte] & FLAGS_MASK;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,19 +0,0 @@
|
||||||
.global spinlock_lock_asm
|
|
||||||
spinlock_lock_asm:
|
|
||||||
movl 4(%esp), %eax
|
|
||||||
lock; btsl $0, (%eax)
|
|
||||||
jnc .done
|
|
||||||
.retry:
|
|
||||||
pause
|
|
||||||
testl $1, (%eax)
|
|
||||||
jne .retry
|
|
||||||
lock; btsl $0, (%eax)
|
|
||||||
jc .retry
|
|
||||||
.done:
|
|
||||||
ret
|
|
||||||
|
|
||||||
.global spinlock_unlock_asm
|
|
||||||
spinlock_unlock_asm:
|
|
||||||
movl 4(%esp), %eax
|
|
||||||
movl $0, (%eax)
|
|
||||||
ret
|
|
|
@ -1,47 +0,0 @@
|
||||||
# uint32_t read_rip()
|
|
||||||
.global read_rip
|
|
||||||
read_rip:
|
|
||||||
popl %eax
|
|
||||||
jmp *%eax
|
|
||||||
|
|
||||||
exit_thread_trampoline:
|
|
||||||
addl $4, %esp
|
|
||||||
pushl (%esp)
|
|
||||||
ret
|
|
||||||
|
|
||||||
# void start_thread(uint32_t esp, uint32_t eip)
|
|
||||||
.global start_thread
|
|
||||||
start_thread:
|
|
||||||
movl 8(%esp), %ecx
|
|
||||||
movl 4(%esp), %esp
|
|
||||||
movl $0, %ebp
|
|
||||||
pushl $exit_thread_trampoline
|
|
||||||
sti
|
|
||||||
jmp *%ecx
|
|
||||||
|
|
||||||
# void continue_thread(uint32_t rsp, uint32_t rip)
|
|
||||||
.global continue_thread
|
|
||||||
continue_thread:
|
|
||||||
movl 8(%esp), %ecx
|
|
||||||
movl 4(%esp), %esp
|
|
||||||
movl $0, %eax
|
|
||||||
jmp *%ecx
|
|
||||||
|
|
||||||
# void thread_jump_userspace(uint32_t rsp, uint32_t rip)
|
|
||||||
.global thread_jump_userspace
|
|
||||||
thread_jump_userspace:
|
|
||||||
movl $0x23, %eax
|
|
||||||
movw %ax, %ds
|
|
||||||
movw %ax, %es
|
|
||||||
movw %ax, %fs
|
|
||||||
movw %ax, %gs
|
|
||||||
|
|
||||||
movl 8(%esp), %ecx
|
|
||||||
movl 4(%esp), %esp
|
|
||||||
|
|
||||||
pushl $0x23
|
|
||||||
pushl %esp
|
|
||||||
pushfl
|
|
||||||
pushl $0x1B
|
|
||||||
pushl %ecx
|
|
||||||
iret
|
|
|
@ -1,182 +0,0 @@
|
||||||
# Declare constants for the multiboot header
|
|
||||||
.set ALIGN, 1<<0 # align loaded modules on page boundaries
|
|
||||||
.set MEMINFO, 1<<1 # provide memory map
|
|
||||||
.set VIDEOINFO, 1<<2 # provide video info
|
|
||||||
.set MB_FLAGS, ALIGN | MEMINFO | VIDEOINFO # this is the Multiboot 'flag' field
|
|
||||||
.set MB_MAGIC, 0x1BADB002 # 'magic number' lets bootloader find the header
|
|
||||||
.set MB_CHECKSUM, -(MB_MAGIC + MB_FLAGS) #checksum of above, to prove we are multiboot
|
|
||||||
|
|
||||||
# Multiboot header
|
|
||||||
.section .multiboot, "aw"
|
|
||||||
.align 4
|
|
||||||
.long MB_MAGIC
|
|
||||||
.long MB_FLAGS
|
|
||||||
.long MB_CHECKSUM
|
|
||||||
.skip 20
|
|
||||||
|
|
||||||
.long 0
|
|
||||||
.long 800
|
|
||||||
.long 600
|
|
||||||
.long 32
|
|
||||||
|
|
||||||
.section .bss, "aw", @nobits
|
|
||||||
# Create stack
|
|
||||||
.global g_boot_stack_bottom
|
|
||||||
g_boot_stack_bottom:
|
|
||||||
.skip 16384
|
|
||||||
.global g_boot_stack_top
|
|
||||||
g_boot_stack_top:
|
|
||||||
|
|
||||||
# 0 MiB -> 1 MiB: bootloader stuff
|
|
||||||
# 1 MiB -> : kernel
|
|
||||||
.align 32
|
|
||||||
boot_page_directory_pointer_table:
|
|
||||||
.skip 4 * 8
|
|
||||||
.align 4096
|
|
||||||
boot_page_directory1:
|
|
||||||
.skip 512 * 8
|
|
||||||
|
|
||||||
.global g_kernel_cmdline
|
|
||||||
g_kernel_cmdline:
|
|
||||||
.skip 4096
|
|
||||||
|
|
||||||
.global g_multiboot_info
|
|
||||||
g_multiboot_info:
|
|
||||||
.skip 4
|
|
||||||
.global g_multiboot_magic
|
|
||||||
g_multiboot_magic:
|
|
||||||
.skip 4
|
|
||||||
|
|
||||||
.section .text
|
|
||||||
|
|
||||||
boot_gdt:
|
|
||||||
.quad 0x0000000000000000 # null
|
|
||||||
.quad 0x00CF9A000000FFFF # kernel code
|
|
||||||
.quad 0x00CF92000000FFFF # kernel data
|
|
||||||
boot_gdtr:
|
|
||||||
.short . - boot_gdt - 1
|
|
||||||
.long boot_gdt
|
|
||||||
|
|
||||||
has_cpuid:
|
|
||||||
pushfl
|
|
||||||
pushfl
|
|
||||||
xorl $0x00200000, (%esp)
|
|
||||||
popfl
|
|
||||||
pushfl
|
|
||||||
popl %eax
|
|
||||||
xorl (%esp), %eax
|
|
||||||
popfl
|
|
||||||
testl $0x00200000, %eax
|
|
||||||
ret
|
|
||||||
|
|
||||||
has_pae:
|
|
||||||
movl $0, %eax
|
|
||||||
cpuid
|
|
||||||
testl $(1 << 6), %edx
|
|
||||||
ret
|
|
||||||
|
|
||||||
has_sse:
|
|
||||||
movl $1, %eax
|
|
||||||
cpuid
|
|
||||||
testl $(1 << 25), %edx
|
|
||||||
ret
|
|
||||||
|
|
||||||
check_requirements:
|
|
||||||
call has_cpuid
|
|
||||||
jz .exit
|
|
||||||
call has_pae
|
|
||||||
jz .exit
|
|
||||||
call has_sse
|
|
||||||
jz .exit
|
|
||||||
ret
|
|
||||||
.exit:
|
|
||||||
jmp system_halt
|
|
||||||
|
|
||||||
copy_kernel_commandline:
|
|
||||||
pushl %esi
|
|
||||||
pushl %edi
|
|
||||||
movl g_multiboot_info, %esi
|
|
||||||
addl $16, %esi
|
|
||||||
movl (%esi), %esi
|
|
||||||
movl $1024, %ecx
|
|
||||||
movl $g_kernel_cmdline, %edi
|
|
||||||
rep movsl
|
|
||||||
popl %edi
|
|
||||||
popl %esi
|
|
||||||
ret
|
|
||||||
|
|
||||||
enable_sse:
|
|
||||||
movl %cr0, %eax
|
|
||||||
andw $0xFFFB, %ax
|
|
||||||
orw $0x0002, %ax
|
|
||||||
movl %eax, %cr0
|
|
||||||
movl %cr4, %eax
|
|
||||||
orw $0x0600, %ax
|
|
||||||
movl %eax, %cr4
|
|
||||||
ret
|
|
||||||
|
|
||||||
initialize_paging:
|
|
||||||
# identity map first 6 MiB
|
|
||||||
movl $(0x00000000 + 0x83), boot_page_directory1 + 0
|
|
||||||
movl $(0x00200000 + 0x83), boot_page_directory1 + 8
|
|
||||||
movl $(0x00400000 + 0x83), boot_page_directory1 + 16
|
|
||||||
movl $(boot_page_directory1 + 0x01), boot_page_directory_pointer_table
|
|
||||||
|
|
||||||
# enable PAE
|
|
||||||
movl %cr4, %ecx
|
|
||||||
orl $0x20, %ecx
|
|
||||||
movl %ecx, %cr4
|
|
||||||
|
|
||||||
# set address of paging structures
|
|
||||||
movl $boot_page_directory_pointer_table, %ecx
|
|
||||||
movl %ecx, %cr3
|
|
||||||
|
|
||||||
# enable paging
|
|
||||||
movl %cr0, %ecx
|
|
||||||
orl $0x80000000, %ecx
|
|
||||||
movl %ecx, %cr0
|
|
||||||
|
|
||||||
ret
|
|
||||||
|
|
||||||
initialize_gdt:
|
|
||||||
lgdt boot_gdtr
|
|
||||||
|
|
||||||
# flush gdt
|
|
||||||
movw $0x10, %ax
|
|
||||||
movw %ax, %ds
|
|
||||||
movw %ax, %es
|
|
||||||
movw %ax, %fs
|
|
||||||
movw %ax, %gs
|
|
||||||
movw %ax, %ss
|
|
||||||
jmp $0x08, $flush
|
|
||||||
flush:
|
|
||||||
ret
|
|
||||||
|
|
||||||
.global _start
|
|
||||||
.type _start, @function
|
|
||||||
_start:
|
|
||||||
# Initialize stack and multiboot info
|
|
||||||
movl $g_boot_stack_top, %esp
|
|
||||||
movl %eax, g_multiboot_magic
|
|
||||||
movl %ebx, g_multiboot_info
|
|
||||||
|
|
||||||
call copy_kernel_commandline
|
|
||||||
call check_requirements
|
|
||||||
call enable_sse
|
|
||||||
|
|
||||||
call initialize_paging
|
|
||||||
call initialize_gdt
|
|
||||||
|
|
||||||
call _init
|
|
||||||
|
|
||||||
# call to the kernel itself (clear ebp for stacktrace)
|
|
||||||
xorl %ebp, %ebp
|
|
||||||
call kernel_main
|
|
||||||
|
|
||||||
call _fini
|
|
||||||
|
|
||||||
system_halt:
|
|
||||||
xchgw %bx, %bx
|
|
||||||
cli
|
|
||||||
1: hlt
|
|
||||||
jmp 1b
|
|
|
@ -1,28 +0,0 @@
|
||||||
ENTRY (_start)
|
|
||||||
|
|
||||||
SECTIONS
|
|
||||||
{
|
|
||||||
. = 0x00100000;
|
|
||||||
|
|
||||||
g_kernel_start = .;
|
|
||||||
.text BLOCK(4K) : ALIGN(4K)
|
|
||||||
{
|
|
||||||
*(.multiboot)
|
|
||||||
*(.text)
|
|
||||||
}
|
|
||||||
.rodata BLOCK(4K) : ALIGN(4K)
|
|
||||||
{
|
|
||||||
*(.rodata.*)
|
|
||||||
}
|
|
||||||
.data BLOCK(4K) : ALIGN(4K)
|
|
||||||
{
|
|
||||||
*(.data)
|
|
||||||
}
|
|
||||||
.bss BLOCK(4K) : ALIGN(4K)
|
|
||||||
{
|
|
||||||
*(COMMON)
|
|
||||||
*(.bss)
|
|
||||||
}
|
|
||||||
|
|
||||||
g_kernel_end = .;
|
|
||||||
}
|
|
|
@ -0,0 +1,620 @@
|
||||||
|
#include <kernel/CPUID.h>
|
||||||
|
#include <kernel/Lock/SpinLock.h>
|
||||||
|
#include <kernel/Memory/kmalloc.h>
|
||||||
|
#include <kernel/Memory/PageTable.h>
|
||||||
|
|
||||||
|
extern uint8_t g_kernel_start[];
|
||||||
|
extern uint8_t g_kernel_end[];
|
||||||
|
|
||||||
|
extern uint8_t g_kernel_execute_start[];
|
||||||
|
extern uint8_t g_kernel_execute_end[];
|
||||||
|
|
||||||
|
extern uint8_t g_userspace_start[];
|
||||||
|
extern uint8_t g_userspace_end[];
|
||||||
|
|
||||||
|
namespace Kernel
|
||||||
|
{
|
||||||
|
|
||||||
|
RecursiveSpinLock PageTable::s_fast_page_lock;
|
||||||
|
|
||||||
|
static PageTable* s_kernel = nullptr;
|
||||||
|
static bool s_has_nxe = false;
|
||||||
|
static bool s_has_pge = false;
|
||||||
|
|
||||||
|
static paddr_t s_global_pdpte = 0;
|
||||||
|
|
||||||
|
static inline PageTable::flags_t parse_flags(uint64_t entry)
|
||||||
|
{
|
||||||
|
using Flags = PageTable::Flags;
|
||||||
|
|
||||||
|
PageTable::flags_t result = 0;
|
||||||
|
if (s_has_nxe && !(entry & (1ull << 63)))
|
||||||
|
result |= Flags::Execute;
|
||||||
|
if (entry & Flags::Reserved)
|
||||||
|
result |= Flags::Reserved;
|
||||||
|
if (entry & Flags::CacheDisable)
|
||||||
|
result |= Flags::CacheDisable;
|
||||||
|
if (entry & Flags::UserSupervisor)
|
||||||
|
result |= Flags::UserSupervisor;
|
||||||
|
if (entry & Flags::ReadWrite)
|
||||||
|
result |= Flags::ReadWrite;
|
||||||
|
if (entry & Flags::Present)
|
||||||
|
result |= Flags::Present;
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void PageTable::initialize()
|
||||||
|
{
|
||||||
|
if (CPUID::has_nxe())
|
||||||
|
s_has_nxe = true;
|
||||||
|
|
||||||
|
if (CPUID::has_pge())
|
||||||
|
s_has_pge = true;
|
||||||
|
|
||||||
|
ASSERT(s_kernel == nullptr);
|
||||||
|
s_kernel = new PageTable();
|
||||||
|
ASSERT(s_kernel);
|
||||||
|
|
||||||
|
s_kernel->initialize_kernel();
|
||||||
|
s_kernel->initial_load();
|
||||||
|
}
|
||||||
|
|
||||||
|
void PageTable::initial_load()
|
||||||
|
{
|
||||||
|
if (s_has_nxe)
|
||||||
|
{
|
||||||
|
asm volatile(
|
||||||
|
"movl $0xC0000080, %%ecx;"
|
||||||
|
"rdmsr;"
|
||||||
|
"orl $0x800, %%eax;"
|
||||||
|
"wrmsr"
|
||||||
|
::: "eax", "ecx", "edx", "memory"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (s_has_pge)
|
||||||
|
{
|
||||||
|
asm volatile(
|
||||||
|
"movl %%cr4, %%eax;"
|
||||||
|
"orl $0x80, %%eax;"
|
||||||
|
"movl %%eax, %%cr4;"
|
||||||
|
::: "eax"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// enable write protect
|
||||||
|
asm volatile(
|
||||||
|
"movl %%cr0, %%eax;"
|
||||||
|
"orl $0x10000, %%eax;"
|
||||||
|
"movl %%eax, %%cr0;"
|
||||||
|
::: "rax"
|
||||||
|
);
|
||||||
|
|
||||||
|
load();
|
||||||
|
}
|
||||||
|
|
||||||
|
PageTable& PageTable::kernel()
|
||||||
|
{
|
||||||
|
ASSERT(s_kernel);
|
||||||
|
return *s_kernel;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool PageTable::is_valid_pointer(uintptr_t)
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static uint64_t* allocate_zeroed_page_aligned_page()
|
||||||
|
{
|
||||||
|
void* page = kmalloc(PAGE_SIZE, PAGE_SIZE, true);
|
||||||
|
ASSERT(page);
|
||||||
|
memset(page, 0, PAGE_SIZE);
|
||||||
|
return (uint64_t*)page;
|
||||||
|
}
|
||||||
|
|
||||||
|
void PageTable::initialize_kernel()
|
||||||
|
{
|
||||||
|
ASSERT(s_global_pdpte == 0);
|
||||||
|
s_global_pdpte = V2P(allocate_zeroed_page_aligned_page());
|
||||||
|
|
||||||
|
map_kernel_memory();
|
||||||
|
|
||||||
|
prepare_fast_page();
|
||||||
|
|
||||||
|
// Map main bios area below 1 MiB
|
||||||
|
map_range_at(
|
||||||
|
0x000E0000,
|
||||||
|
P2V(0x000E0000),
|
||||||
|
0x00100000 - 0x000E0000,
|
||||||
|
PageTable::Flags::Present
|
||||||
|
);
|
||||||
|
|
||||||
|
// Map (phys_kernel_start -> phys_kernel_end) to (virt_kernel_start -> virt_kernel_end)
|
||||||
|
ASSERT((vaddr_t)g_kernel_start % PAGE_SIZE == 0);
|
||||||
|
map_range_at(
|
||||||
|
V2P(g_kernel_start),
|
||||||
|
(vaddr_t)g_kernel_start,
|
||||||
|
g_kernel_end - g_kernel_start,
|
||||||
|
Flags::ReadWrite | Flags::Present
|
||||||
|
);
|
||||||
|
|
||||||
|
// Map executable kernel memory as executable
|
||||||
|
map_range_at(
|
||||||
|
V2P(g_kernel_execute_start),
|
||||||
|
(vaddr_t)g_kernel_execute_start,
|
||||||
|
g_kernel_execute_end - g_kernel_execute_start,
|
||||||
|
Flags::Execute | Flags::Present
|
||||||
|
);
|
||||||
|
|
||||||
|
// Map userspace memory
|
||||||
|
map_range_at(
|
||||||
|
V2P(g_userspace_start),
|
||||||
|
(vaddr_t)g_userspace_start,
|
||||||
|
g_userspace_end - g_userspace_start,
|
||||||
|
Flags::Execute | Flags::UserSupervisor | Flags::Present
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
void PageTable::prepare_fast_page()
|
||||||
|
{
|
||||||
|
constexpr uint64_t pdpte = (fast_page() >> 30) & 0x1FF;
|
||||||
|
constexpr uint64_t pde = (fast_page() >> 21) & 0x1FF;
|
||||||
|
constexpr uint64_t pte = (fast_page() >> 12) & 0x1FF;
|
||||||
|
|
||||||
|
uint64_t* pdpt = reinterpret_cast<uint64_t*>(P2V(m_highest_paging_struct));
|
||||||
|
ASSERT(pdpt[pdpte] & Flags::Present);
|
||||||
|
|
||||||
|
uint64_t* pd = reinterpret_cast<uint64_t*>(P2V(pdpt[pdpte]) & PAGE_ADDR_MASK);
|
||||||
|
ASSERT(!(pd[pde] & Flags::Present));
|
||||||
|
pd[pde] = V2P(allocate_zeroed_page_aligned_page()) | Flags::ReadWrite | Flags::Present;
|
||||||
|
|
||||||
|
uint64_t* pt = reinterpret_cast<uint64_t*>(P2V(pd[pde]) & PAGE_ADDR_MASK);
|
||||||
|
ASSERT(!(pt[pte] & Flags::Present));
|
||||||
|
pt[pte] = V2P(allocate_zeroed_page_aligned_page());
|
||||||
|
}
|
||||||
|
|
||||||
|
void PageTable::map_fast_page(paddr_t paddr)
|
||||||
|
{
|
||||||
|
ASSERT(s_kernel);
|
||||||
|
ASSERT(paddr);
|
||||||
|
|
||||||
|
SpinLockGuard _(s_fast_page_lock);
|
||||||
|
|
||||||
|
constexpr uint64_t pdpte = (fast_page() >> 30) & 0x1FF;
|
||||||
|
constexpr uint64_t pde = (fast_page() >> 21) & 0x1FF;
|
||||||
|
constexpr uint64_t pte = (fast_page() >> 12) & 0x1FF;
|
||||||
|
|
||||||
|
uint64_t* pdpt = reinterpret_cast<uint64_t*>(P2V(s_kernel->m_highest_paging_struct));
|
||||||
|
uint64_t* pd = reinterpret_cast<uint64_t*>(P2V(pdpt[pdpte] & PAGE_ADDR_MASK));
|
||||||
|
uint64_t* pt = reinterpret_cast<uint64_t*>(P2V(pd[pde] & PAGE_ADDR_MASK));
|
||||||
|
|
||||||
|
ASSERT(!(pt[pte] & Flags::Present));
|
||||||
|
pt[pte] = paddr | Flags::ReadWrite | Flags::Present;
|
||||||
|
|
||||||
|
invalidate(fast_page());
|
||||||
|
}
|
||||||
|
|
||||||
|
void PageTable::unmap_fast_page()
|
||||||
|
{
|
||||||
|
ASSERT(s_kernel);
|
||||||
|
|
||||||
|
SpinLockGuard _(s_fast_page_lock);
|
||||||
|
|
||||||
|
constexpr uint64_t pdpte = (fast_page() >> 30) & 0x1FF;
|
||||||
|
constexpr uint64_t pde = (fast_page() >> 21) & 0x1FF;
|
||||||
|
constexpr uint64_t pte = (fast_page() >> 12) & 0x1FF;
|
||||||
|
|
||||||
|
uint64_t* pdpt = reinterpret_cast<uint64_t*>(P2V(s_kernel->m_highest_paging_struct));
|
||||||
|
uint64_t* pd = reinterpret_cast<uint64_t*>(P2V(pdpt[pdpte] & PAGE_ADDR_MASK));
|
||||||
|
uint64_t* pt = reinterpret_cast<uint64_t*>(P2V(pd[pde] & PAGE_ADDR_MASK));
|
||||||
|
|
||||||
|
ASSERT(pt[pte] & Flags::Present);
|
||||||
|
pt[pte] = 0;
|
||||||
|
|
||||||
|
invalidate(fast_page());
|
||||||
|
}
|
||||||
|
|
||||||
|
BAN::ErrorOr<PageTable*> PageTable::create_userspace()
|
||||||
|
{
|
||||||
|
SpinLockGuard _(s_kernel->m_lock);
|
||||||
|
PageTable* page_table = new PageTable;
|
||||||
|
if (page_table == nullptr)
|
||||||
|
return BAN::Error::from_errno(ENOMEM);
|
||||||
|
page_table->map_kernel_memory();
|
||||||
|
return page_table;
|
||||||
|
}
|
||||||
|
|
||||||
|
void PageTable::map_kernel_memory()
|
||||||
|
{
|
||||||
|
ASSERT(s_kernel);
|
||||||
|
ASSERT(s_global_pdpte);
|
||||||
|
|
||||||
|
ASSERT(m_highest_paging_struct == 0);
|
||||||
|
m_highest_paging_struct = V2P(kmalloc(32, 32, true));
|
||||||
|
ASSERT(m_highest_paging_struct);
|
||||||
|
|
||||||
|
uint64_t* pdpt = reinterpret_cast<uint64_t*>(P2V(m_highest_paging_struct));
|
||||||
|
pdpt[0] = 0;
|
||||||
|
pdpt[1] = 0;
|
||||||
|
pdpt[2] = 0;
|
||||||
|
pdpt[3] = s_global_pdpte | Flags::Present;
|
||||||
|
static_assert(KERNEL_OFFSET == 0xC0000000);
|
||||||
|
}
|
||||||
|
|
||||||
|
PageTable::~PageTable()
|
||||||
|
{
|
||||||
|
uint64_t* pdpt = reinterpret_cast<uint64_t*>(P2V(m_highest_paging_struct));
|
||||||
|
|
||||||
|
for (uint32_t pdpte = 0; pdpte < 3; pdpte++)
|
||||||
|
{
|
||||||
|
if (!(pdpt[pdpte] & Flags::Present))
|
||||||
|
continue;
|
||||||
|
uint64_t* pd = reinterpret_cast<uint64_t*>(P2V(pdpt[pdpte] & PAGE_ADDR_MASK));
|
||||||
|
for (uint32_t pde = 0; pde < 512; pde++)
|
||||||
|
{
|
||||||
|
if (!(pd[pde] & Flags::Present))
|
||||||
|
continue;
|
||||||
|
kfree(reinterpret_cast<uint64_t*>(P2V(pd[pde] & PAGE_ADDR_MASK)));
|
||||||
|
}
|
||||||
|
kfree(pd);
|
||||||
|
}
|
||||||
|
kfree(pdpt);
|
||||||
|
}
|
||||||
|
|
||||||
|
void PageTable::load()
|
||||||
|
{
|
||||||
|
SpinLockGuard _(m_lock);
|
||||||
|
ASSERT(m_highest_paging_struct < 0x100000000);
|
||||||
|
const uint32_t pdpt_lo = m_highest_paging_struct;
|
||||||
|
asm volatile("movl %0, %%cr3" :: "r"(pdpt_lo));
|
||||||
|
Processor::set_current_page_table(this);
|
||||||
|
}
|
||||||
|
|
||||||
|
void PageTable::invalidate(vaddr_t vaddr)
|
||||||
|
{
|
||||||
|
ASSERT(vaddr % PAGE_SIZE == 0);
|
||||||
|
asm volatile("invlpg (%0)" :: "r"(vaddr) : "memory");
|
||||||
|
}
|
||||||
|
|
||||||
|
void PageTable::unmap_page(vaddr_t vaddr)
|
||||||
|
{
|
||||||
|
ASSERT(vaddr);
|
||||||
|
ASSERT(vaddr % PAGE_SIZE == 0);
|
||||||
|
ASSERT(vaddr != fast_page());
|
||||||
|
if (vaddr >= KERNEL_OFFSET)
|
||||||
|
ASSERT(vaddr >= (vaddr_t)g_kernel_start);
|
||||||
|
if ((vaddr >= KERNEL_OFFSET) != (this == s_kernel))
|
||||||
|
Kernel::panic("unmapping {8H}, kernel: {}", vaddr, this == s_kernel);
|
||||||
|
|
||||||
|
const uint64_t pdpte = (vaddr >> 30) & 0x1FF;
|
||||||
|
const uint64_t pde = (vaddr >> 21) & 0x1FF;
|
||||||
|
const uint64_t pte = (vaddr >> 12) & 0x1FF;
|
||||||
|
|
||||||
|
SpinLockGuard _(m_lock);
|
||||||
|
|
||||||
|
if (is_page_free(vaddr))
|
||||||
|
{
|
||||||
|
dwarnln("unmapping unmapped page {8H}", vaddr);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t* pdpt = reinterpret_cast<uint64_t*>(P2V(m_highest_paging_struct));
|
||||||
|
uint64_t* pd = reinterpret_cast<uint64_t*>(P2V(pdpt[pdpte] & PAGE_ADDR_MASK));
|
||||||
|
uint64_t* pt = reinterpret_cast<uint64_t*>(P2V(pd[pde] & PAGE_ADDR_MASK));
|
||||||
|
|
||||||
|
pt[pte] = 0;
|
||||||
|
invalidate(vaddr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void PageTable::unmap_range(vaddr_t vaddr, size_t size)
|
||||||
|
{
|
||||||
|
vaddr_t s_page = vaddr / PAGE_SIZE;
|
||||||
|
vaddr_t e_page = BAN::Math::div_round_up<vaddr_t>(vaddr + size, PAGE_SIZE);
|
||||||
|
|
||||||
|
SpinLockGuard _(m_lock);
|
||||||
|
for (vaddr_t page = s_page; page < e_page; page++)
|
||||||
|
unmap_page(page * PAGE_SIZE);
|
||||||
|
}
|
||||||
|
|
||||||
|
void PageTable::map_page_at(paddr_t paddr, vaddr_t vaddr, flags_t flags)
|
||||||
|
{
|
||||||
|
ASSERT(vaddr);
|
||||||
|
ASSERT(vaddr != fast_page());
|
||||||
|
if ((vaddr >= KERNEL_OFFSET) != (this == s_kernel))
|
||||||
|
Kernel::panic("mapping {8H} to {8H}, kernel: {}", paddr, vaddr, this == s_kernel);
|
||||||
|
|
||||||
|
ASSERT(paddr % PAGE_SIZE == 0);
|
||||||
|
ASSERT(vaddr % PAGE_SIZE == 0);
|
||||||
|
ASSERT(flags & Flags::Used);
|
||||||
|
|
||||||
|
const uint64_t pdpte = (vaddr >> 30) & 0x1FF;
|
||||||
|
const uint64_t pde = (vaddr >> 21) & 0x1FF;
|
||||||
|
const uint64_t pte = (vaddr >> 12) & 0x1FF;
|
||||||
|
|
||||||
|
uint64_t extra_flags = 0;
|
||||||
|
if (s_has_pge && vaddr >= KERNEL_OFFSET) // Map kernel memory as global
|
||||||
|
extra_flags |= 1ull << 8;
|
||||||
|
if (s_has_nxe && !(flags & Flags::Execute))
|
||||||
|
extra_flags |= 1ull << 63;
|
||||||
|
if (flags & Flags::Reserved)
|
||||||
|
extra_flags |= Flags::Reserved;
|
||||||
|
if (flags & Flags::CacheDisable)
|
||||||
|
extra_flags |= Flags::CacheDisable;
|
||||||
|
|
||||||
|
// NOTE: we add present here, since it has to be available in higher level structures
|
||||||
|
flags_t uwr_flags = (flags & (Flags::UserSupervisor | Flags::ReadWrite)) | Flags::Present;
|
||||||
|
|
||||||
|
SpinLockGuard _(m_lock);
|
||||||
|
|
||||||
|
uint64_t* pdpt = reinterpret_cast<uint64_t*>(P2V(m_highest_paging_struct));
|
||||||
|
if (!(pdpt[pdpte] & Flags::Present))
|
||||||
|
pdpt[pdpte] = V2P(allocate_zeroed_page_aligned_page()) | Flags::Present;
|
||||||
|
|
||||||
|
uint64_t* pd = reinterpret_cast<uint64_t*>(P2V(pdpt[pdpte] & PAGE_ADDR_MASK));
|
||||||
|
if ((pd[pde] & uwr_flags) != uwr_flags)
|
||||||
|
{
|
||||||
|
if (!(pd[pde] & Flags::Present))
|
||||||
|
pd[pde] = V2P(allocate_zeroed_page_aligned_page());
|
||||||
|
pd[pde] |= uwr_flags;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!(flags & Flags::Present))
|
||||||
|
uwr_flags &= ~Flags::Present;
|
||||||
|
|
||||||
|
uint64_t* pt = reinterpret_cast<uint64_t*>(P2V(pd[pde] & PAGE_ADDR_MASK));
|
||||||
|
pt[pte] = paddr | uwr_flags | extra_flags;
|
||||||
|
|
||||||
|
invalidate(vaddr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void PageTable::map_range_at(paddr_t paddr, vaddr_t vaddr, size_t size, flags_t flags)
|
||||||
|
{
|
||||||
|
ASSERT(vaddr);
|
||||||
|
ASSERT(paddr % PAGE_SIZE == 0);
|
||||||
|
ASSERT(vaddr % PAGE_SIZE == 0);
|
||||||
|
|
||||||
|
size_t page_count = range_page_count(vaddr, size);
|
||||||
|
|
||||||
|
SpinLockGuard _(m_lock);
|
||||||
|
for (size_t page = 0; page < page_count; page++)
|
||||||
|
map_page_at(paddr + page * PAGE_SIZE, vaddr + page * PAGE_SIZE, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t PageTable::get_page_data(vaddr_t vaddr) const
|
||||||
|
{
|
||||||
|
ASSERT(vaddr % PAGE_SIZE == 0);
|
||||||
|
|
||||||
|
const uint64_t pdpte = (vaddr >> 30) & 0x1FF;
|
||||||
|
const uint64_t pde = (vaddr >> 21) & 0x1FF;
|
||||||
|
const uint64_t pte = (vaddr >> 12) & 0x1FF;
|
||||||
|
|
||||||
|
SpinLockGuard _(m_lock);
|
||||||
|
|
||||||
|
uint64_t* pdpt = (uint64_t*)P2V(m_highest_paging_struct);
|
||||||
|
if (!(pdpt[pdpte] & Flags::Present))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
uint64_t* pd = (uint64_t*)P2V(pdpt[pdpte] & PAGE_ADDR_MASK);
|
||||||
|
if (!(pd[pde] & Flags::Present))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
uint64_t* pt = (uint64_t*)P2V(pd[pde] & PAGE_ADDR_MASK);
|
||||||
|
if (!(pt[pte] & Flags::Used))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return pt[pte];
|
||||||
|
}
|
||||||
|
|
||||||
|
PageTable::flags_t PageTable::get_page_flags(vaddr_t vaddr) const
|
||||||
|
{
|
||||||
|
return parse_flags(get_page_data(vaddr));
|
||||||
|
}
|
||||||
|
|
||||||
|
paddr_t PageTable::physical_address_of(vaddr_t vaddr) const
|
||||||
|
{
|
||||||
|
uint64_t page_data = get_page_data(vaddr);
|
||||||
|
return (page_data & PAGE_ADDR_MASK) & ~(1ull << 63);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool PageTable::is_page_free(vaddr_t vaddr) const
|
||||||
|
{
|
||||||
|
ASSERT(vaddr % PAGE_SIZE == 0);
|
||||||
|
return !(get_page_flags(vaddr) & Flags::Used);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool PageTable::is_range_free(vaddr_t vaddr, size_t size) const
|
||||||
|
{
|
||||||
|
vaddr_t s_page = vaddr / PAGE_SIZE;
|
||||||
|
vaddr_t e_page = BAN::Math::div_round_up<vaddr_t>(vaddr + size, PAGE_SIZE);
|
||||||
|
|
||||||
|
SpinLockGuard _(m_lock);
|
||||||
|
for (vaddr_t page = s_page; page < e_page; page++)
|
||||||
|
if (!is_page_free(page * PAGE_SIZE))
|
||||||
|
return false;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool PageTable::reserve_page(vaddr_t vaddr, bool only_free)
|
||||||
|
{
|
||||||
|
SpinLockGuard _(m_lock);
|
||||||
|
ASSERT(vaddr % PAGE_SIZE == 0);
|
||||||
|
if (only_free && !is_page_free(vaddr))
|
||||||
|
return false;
|
||||||
|
map_page_at(0, vaddr, Flags::Reserved);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool PageTable::reserve_range(vaddr_t vaddr, size_t bytes, bool only_free)
|
||||||
|
{
|
||||||
|
if (size_t rem = bytes % PAGE_SIZE)
|
||||||
|
bytes += PAGE_SIZE - rem;
|
||||||
|
ASSERT(vaddr % PAGE_SIZE == 0);
|
||||||
|
|
||||||
|
SpinLockGuard _(m_lock);
|
||||||
|
if (only_free && !is_range_free(vaddr, bytes))
|
||||||
|
return false;
|
||||||
|
for (size_t offset = 0; offset < bytes; offset += PAGE_SIZE)
|
||||||
|
reserve_page(vaddr + offset);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
vaddr_t PageTable::reserve_free_page(vaddr_t first_address, vaddr_t last_address)
|
||||||
|
{
|
||||||
|
if (first_address >= KERNEL_OFFSET && first_address < (vaddr_t)g_kernel_end)
|
||||||
|
first_address = (vaddr_t)g_kernel_end;
|
||||||
|
if (size_t rem = first_address % PAGE_SIZE)
|
||||||
|
first_address += PAGE_SIZE - rem;
|
||||||
|
if (size_t rem = last_address % PAGE_SIZE)
|
||||||
|
last_address -= rem;
|
||||||
|
|
||||||
|
const uint32_t s_pdpte = (first_address >> 30) & 0x1FF;
|
||||||
|
const uint32_t s_pde = (first_address >> 21) & 0x1FF;
|
||||||
|
const uint32_t s_pte = (first_address >> 12) & 0x1FF;
|
||||||
|
|
||||||
|
const uint32_t e_pdpte = (last_address >> 30) & 0x1FF;
|
||||||
|
const uint32_t e_pde = (last_address >> 21) & 0x1FF;
|
||||||
|
const uint32_t e_pte = (last_address >> 12) & 0x1FF;
|
||||||
|
|
||||||
|
SpinLockGuard _(m_lock);
|
||||||
|
|
||||||
|
// Try to find free page that can be mapped without
|
||||||
|
// allocations (page table with unused entries)
|
||||||
|
uint64_t* pdpt = reinterpret_cast<uint64_t*>(P2V(m_highest_paging_struct));
|
||||||
|
for (uint32_t pdpte = s_pdpte; pdpte < 4; pdpte++)
|
||||||
|
{
|
||||||
|
if (pdpte > e_pdpte)
|
||||||
|
break;
|
||||||
|
if (!(pdpt[pdpte] & Flags::Present))
|
||||||
|
continue;
|
||||||
|
uint64_t* pd = reinterpret_cast<uint64_t*>(P2V(pdpt[pdpte] & PAGE_ADDR_MASK));
|
||||||
|
for (uint32_t pde = s_pde; pde < 512; pde++)
|
||||||
|
{
|
||||||
|
if (pdpte == e_pdpte && pde > e_pde)
|
||||||
|
break;
|
||||||
|
if (!(pd[pde] & Flags::Present))
|
||||||
|
continue;
|
||||||
|
uint64_t* pt = (uint64_t*)P2V(pd[pde] & PAGE_ADDR_MASK);
|
||||||
|
for (uint32_t pte = s_pte; pte < 512; pte++)
|
||||||
|
{
|
||||||
|
if (pdpte == e_pdpte && pde == e_pde && pte >= e_pte)
|
||||||
|
break;
|
||||||
|
if (!(pt[pte] & Flags::Used))
|
||||||
|
{
|
||||||
|
vaddr_t vaddr = 0;
|
||||||
|
vaddr |= (vaddr_t)pdpte << 30;
|
||||||
|
vaddr |= (vaddr_t)pde << 21;
|
||||||
|
vaddr |= (vaddr_t)pte << 12;
|
||||||
|
ASSERT(reserve_page(vaddr));
|
||||||
|
return vaddr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find any free page
|
||||||
|
for (vaddr_t vaddr = first_address; vaddr < last_address; vaddr += PAGE_SIZE)
|
||||||
|
{
|
||||||
|
if (is_page_free(vaddr))
|
||||||
|
{
|
||||||
|
ASSERT(reserve_page(vaddr));
|
||||||
|
return vaddr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ASSERT_NOT_REACHED();
|
||||||
|
}
|
||||||
|
|
||||||
|
vaddr_t PageTable::reserve_free_contiguous_pages(size_t page_count, vaddr_t first_address, vaddr_t last_address)
|
||||||
|
{
|
||||||
|
if (first_address >= KERNEL_OFFSET && first_address < (vaddr_t)g_kernel_start)
|
||||||
|
first_address = (vaddr_t)g_kernel_start;
|
||||||
|
if (size_t rem = first_address % PAGE_SIZE)
|
||||||
|
first_address += PAGE_SIZE - rem;
|
||||||
|
if (size_t rem = last_address % PAGE_SIZE)
|
||||||
|
last_address -= rem;
|
||||||
|
|
||||||
|
SpinLockGuard _(m_lock);
|
||||||
|
|
||||||
|
for (vaddr_t vaddr = first_address; vaddr < last_address;)
|
||||||
|
{
|
||||||
|
bool valid { true };
|
||||||
|
for (size_t page = 0; page < page_count; page++)
|
||||||
|
{
|
||||||
|
if (!is_page_free(vaddr + page * PAGE_SIZE))
|
||||||
|
{
|
||||||
|
vaddr += (page + 1) * PAGE_SIZE;
|
||||||
|
valid = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (valid)
|
||||||
|
{
|
||||||
|
ASSERT(reserve_range(vaddr, page_count * PAGE_SIZE));
|
||||||
|
return vaddr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ASSERT_NOT_REACHED();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void dump_range(vaddr_t start, vaddr_t end, PageTable::flags_t flags)
|
||||||
|
{
|
||||||
|
if (start == 0)
|
||||||
|
return;
|
||||||
|
dprintln("{}-{}: {}{}{}{}",
|
||||||
|
(void*)(start), (void*)(end - 1),
|
||||||
|
flags & PageTable::Flags::Execute ? 'x' : '-',
|
||||||
|
flags & PageTable::Flags::UserSupervisor ? 'u' : '-',
|
||||||
|
flags & PageTable::Flags::ReadWrite ? 'w' : '-',
|
||||||
|
flags & PageTable::Flags::Present ? 'r' : '-'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
void PageTable::debug_dump()
|
||||||
|
{
|
||||||
|
SpinLockGuard _(m_lock);
|
||||||
|
|
||||||
|
flags_t flags = 0;
|
||||||
|
vaddr_t start = 0;
|
||||||
|
|
||||||
|
uint64_t* pdpt = reinterpret_cast<uint64_t*>(P2V(m_highest_paging_struct));
|
||||||
|
for (uint32_t pdpte = 0; pdpte < 4; pdpte++)
|
||||||
|
{
|
||||||
|
if (!(pdpt[pdpte] & Flags::Present))
|
||||||
|
{
|
||||||
|
dump_range(start, (pdpte << 30), flags);
|
||||||
|
start = 0;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
uint64_t* pd = (uint64_t*)P2V(pdpt[pdpte] & PAGE_ADDR_MASK);
|
||||||
|
for (uint64_t pde = 0; pde < 512; pde++)
|
||||||
|
{
|
||||||
|
if (!(pd[pde] & Flags::Present))
|
||||||
|
{
|
||||||
|
dump_range(start, (pdpte << 30) | (pde << 21), flags);
|
||||||
|
start = 0;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
uint64_t* pt = (uint64_t*)P2V(pd[pde] & PAGE_ADDR_MASK);
|
||||||
|
for (uint64_t pte = 0; pte < 512; pte++)
|
||||||
|
{
|
||||||
|
if (parse_flags(pt[pte]) != flags)
|
||||||
|
{
|
||||||
|
dump_range(start, (pdpte << 30) | (pde << 21) | (pte << 12), flags);
|
||||||
|
start = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!(pt[pte] & Flags::Used))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (start == 0)
|
||||||
|
{
|
||||||
|
flags = parse_flags(pt[pte]);
|
||||||
|
start = (pdpte << 30) | (pde << 21) | (pte << 12);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,31 @@
|
||||||
|
.section .userspace, "aw"
|
||||||
|
|
||||||
|
// stack contains
|
||||||
|
// return address
|
||||||
|
// signal number
|
||||||
|
// signal handler
|
||||||
|
|
||||||
|
.global signal_trampoline
|
||||||
|
signal_trampoline:
|
||||||
|
ud2
|
||||||
|
|
||||||
|
pushl %ebp
|
||||||
|
movl %esp, %ebp
|
||||||
|
subl $8, %esp
|
||||||
|
|
||||||
|
pusha
|
||||||
|
|
||||||
|
movl 40(%esp), %edi
|
||||||
|
movl 36(%esp), %eax
|
||||||
|
|
||||||
|
subl $12, %esp
|
||||||
|
pushl %edi
|
||||||
|
call *%eax
|
||||||
|
addl $16, %esp
|
||||||
|
|
||||||
|
popa
|
||||||
|
|
||||||
|
leave
|
||||||
|
addl $8, %esp
|
||||||
|
|
||||||
|
ret
|
|
@ -0,0 +1,31 @@
|
||||||
|
.global sys_fork_trampoline
|
||||||
|
sys_fork_trampoline:
|
||||||
|
pushl %ebp
|
||||||
|
pushl %ebx
|
||||||
|
pushl %esi
|
||||||
|
pushl %edi
|
||||||
|
|
||||||
|
call read_ip
|
||||||
|
testl %eax, %eax
|
||||||
|
jz .reload_stack
|
||||||
|
|
||||||
|
movl %esp, %ebx
|
||||||
|
|
||||||
|
subl $8, %esp
|
||||||
|
pushl %eax
|
||||||
|
pushl %ebx
|
||||||
|
call sys_fork
|
||||||
|
addl $16, %esp
|
||||||
|
|
||||||
|
.done:
|
||||||
|
popl %edi
|
||||||
|
popl %esi
|
||||||
|
popl %ebx
|
||||||
|
popl %ebp
|
||||||
|
ret
|
||||||
|
|
||||||
|
.reload_stack:
|
||||||
|
call get_thread_start_sp
|
||||||
|
movl %eax, %esp
|
||||||
|
xorl %eax, %eax
|
||||||
|
jmp .done
|
|
@ -0,0 +1,68 @@
|
||||||
|
# uint32_t read_ip()
|
||||||
|
.global read_ip
|
||||||
|
read_ip:
|
||||||
|
popl %eax
|
||||||
|
jmp *%eax
|
||||||
|
|
||||||
|
# void start_kernel_thread()
|
||||||
|
.global start_kernel_thread
|
||||||
|
start_kernel_thread:
|
||||||
|
call get_thread_start_sp
|
||||||
|
movl %eax, %esp
|
||||||
|
|
||||||
|
# STACK LAYOUT
|
||||||
|
# on_exit arg
|
||||||
|
# on_exit func
|
||||||
|
# entry arg
|
||||||
|
# entry func
|
||||||
|
|
||||||
|
movl 4(%esp), %edi
|
||||||
|
movl 0(%esp), %esi
|
||||||
|
|
||||||
|
subl $12, %esp
|
||||||
|
pushl %edi
|
||||||
|
sti
|
||||||
|
call *%esi
|
||||||
|
addl $16, %esp
|
||||||
|
|
||||||
|
movl 12(%esp), %edi
|
||||||
|
movl 8(%esp), %esi
|
||||||
|
|
||||||
|
subl $12, %esp
|
||||||
|
pushl %edi
|
||||||
|
call *%esi
|
||||||
|
addl $16, %esp
|
||||||
|
|
||||||
|
|
||||||
|
.global start_userspace_thread
|
||||||
|
start_userspace_thread:
|
||||||
|
call get_thread_start_sp
|
||||||
|
movl %eax, %esp
|
||||||
|
|
||||||
|
# STACK LAYOUT
|
||||||
|
# entry
|
||||||
|
# argc
|
||||||
|
# argv
|
||||||
|
# envp
|
||||||
|
# userspace stack
|
||||||
|
|
||||||
|
call get_userspace_thread_stack_top
|
||||||
|
|
||||||
|
movw $(0x20 | 3), %bx
|
||||||
|
movw %bx, %ds
|
||||||
|
movw %bx, %es
|
||||||
|
movw %bx, %fs
|
||||||
|
movw %bx, %gs
|
||||||
|
xorw %bx, %bx
|
||||||
|
|
||||||
|
popl %edx
|
||||||
|
popl %esi
|
||||||
|
popl %edi
|
||||||
|
popl %ecx
|
||||||
|
|
||||||
|
pushl $(0x20 | 3)
|
||||||
|
pushl %eax
|
||||||
|
pushl $0x202
|
||||||
|
pushl $(0x18 | 3)
|
||||||
|
pushl %ecx
|
||||||
|
iret
|
|
@ -0,0 +1,296 @@
|
||||||
|
.set PG_PRESENT, 1<<0
|
||||||
|
.set PG_READ_WRITE, 1<<1
|
||||||
|
.set PG_PAGE_SIZE, 1<<7
|
||||||
|
|
||||||
|
.set FB_WIDTH, 800
|
||||||
|
.set FB_HEIGHT, 600
|
||||||
|
.set FB_BPP, 32
|
||||||
|
|
||||||
|
#define KERNEL_OFFSET 0xC0000000
|
||||||
|
#define V2P(vaddr) ((vaddr) - KERNEL_OFFSET)
|
||||||
|
|
||||||
|
.code32
|
||||||
|
|
||||||
|
# multiboot2 header
|
||||||
|
.section .multiboot, "aw"
|
||||||
|
.align 8
|
||||||
|
multiboot2_start:
|
||||||
|
.long 0xE85250D6
|
||||||
|
.long 0
|
||||||
|
.long multiboot2_end - multiboot2_start
|
||||||
|
.long -(0xE85250D6 + (multiboot2_end - multiboot2_start))
|
||||||
|
|
||||||
|
# framebuffer tag
|
||||||
|
.align 8
|
||||||
|
.short 5
|
||||||
|
.short 0
|
||||||
|
.long 20
|
||||||
|
.long FB_WIDTH
|
||||||
|
.long FB_HEIGHT
|
||||||
|
.long FB_BPP
|
||||||
|
|
||||||
|
# legacy start
|
||||||
|
.align 8
|
||||||
|
.short 3
|
||||||
|
.short 0
|
||||||
|
.long 12
|
||||||
|
.long V2P(_start)
|
||||||
|
|
||||||
|
.align 8
|
||||||
|
.short 0
|
||||||
|
.short 0
|
||||||
|
.long 8
|
||||||
|
multiboot2_end:
|
||||||
|
|
||||||
|
.section .bananboot, "aw"
|
||||||
|
.align 8
|
||||||
|
bananboot_start:
|
||||||
|
.long 0xBABAB007
|
||||||
|
.long -(0xBABAB007 + FB_WIDTH + FB_HEIGHT + FB_BPP)
|
||||||
|
.long FB_WIDTH
|
||||||
|
.long FB_HEIGHT
|
||||||
|
.long FB_BPP
|
||||||
|
bananboot_end:
|
||||||
|
|
||||||
|
.section .bss, "aw", @nobits
|
||||||
|
.align 4096
|
||||||
|
boot_stack_bottom:
|
||||||
|
.skip 4096 * 4
|
||||||
|
boot_stack_top:
|
||||||
|
|
||||||
|
.global g_kernel_cmdline
|
||||||
|
g_kernel_cmdline:
|
||||||
|
.skip 4096
|
||||||
|
|
||||||
|
bootloader_magic:
|
||||||
|
.skip 8
|
||||||
|
bootloader_info:
|
||||||
|
.skip 8
|
||||||
|
|
||||||
|
.section .data
|
||||||
|
|
||||||
|
# Map first GiB to 0x00000000 and 0xC0000000
|
||||||
|
.align 32
|
||||||
|
boot_pdpt:
|
||||||
|
.long V2P(boot_pd) + (PG_PRESENT)
|
||||||
|
.long 0
|
||||||
|
.quad 0
|
||||||
|
.quad 0
|
||||||
|
.long V2P(boot_pd) + (PG_PRESENT)
|
||||||
|
.long 0
|
||||||
|
.align 4096
|
||||||
|
boot_pd:
|
||||||
|
.set i, 0
|
||||||
|
.rept 512
|
||||||
|
.long i + (PG_PAGE_SIZE | PG_READ_WRITE | PG_PRESENT)
|
||||||
|
.long 0
|
||||||
|
.set i, i + 0x200000
|
||||||
|
.endr
|
||||||
|
|
||||||
|
boot_gdt:
|
||||||
|
.quad 0x0000000000000000 # null descriptor
|
||||||
|
.quad 0x00CF9A000000FFFF # kernel code
|
||||||
|
.quad 0x00CF92000000FFFF # kernel data
|
||||||
|
boot_gdtr:
|
||||||
|
.short . - boot_gdt - 1
|
||||||
|
.long V2P(boot_gdt)
|
||||||
|
|
||||||
|
.global g_ap_startup_done
|
||||||
|
g_ap_startup_done:
|
||||||
|
.byte 0
|
||||||
|
.global g_ap_running_count
|
||||||
|
g_ap_running_count:
|
||||||
|
.byte 0
|
||||||
|
.global g_ap_stack_loaded
|
||||||
|
g_ap_stack_loaded:
|
||||||
|
.byte 0
|
||||||
|
|
||||||
|
.section .text
|
||||||
|
|
||||||
|
has_cpuid:
|
||||||
|
pushfl
|
||||||
|
pushfl
|
||||||
|
xorl $0x00200000, (%esp)
|
||||||
|
popfl
|
||||||
|
pushfl
|
||||||
|
popl %eax
|
||||||
|
xorl (%esp), %eax
|
||||||
|
popfl
|
||||||
|
testl $0x00200000, %eax
|
||||||
|
ret
|
||||||
|
|
||||||
|
has_pae:
|
||||||
|
movl $0, %eax
|
||||||
|
cpuid
|
||||||
|
testl $(1 << 6), %edx
|
||||||
|
ret
|
||||||
|
|
||||||
|
has_sse:
|
||||||
|
movl $1, %eax
|
||||||
|
cpuid
|
||||||
|
testl $(1 << 25), %edx
|
||||||
|
ret
|
||||||
|
|
||||||
|
check_requirements:
|
||||||
|
call has_cpuid
|
||||||
|
jz .exit
|
||||||
|
call has_pae
|
||||||
|
jz .exit
|
||||||
|
call has_sse
|
||||||
|
jz .exit
|
||||||
|
ret
|
||||||
|
.exit:
|
||||||
|
jmp system_halt
|
||||||
|
|
||||||
|
enable_sse:
|
||||||
|
movl %cr0, %eax
|
||||||
|
andw $0xFFFB, %ax
|
||||||
|
orw $0x0002, %ax
|
||||||
|
movl %eax, %cr0
|
||||||
|
movl %cr4, %eax
|
||||||
|
orw $0x0600, %ax
|
||||||
|
movl %eax, %cr4
|
||||||
|
ret
|
||||||
|
|
||||||
|
initialize_paging:
|
||||||
|
# enable PAE
|
||||||
|
movl %cr4, %ecx
|
||||||
|
orl $(1 << 5), %ecx
|
||||||
|
movl %ecx, %cr4
|
||||||
|
|
||||||
|
# load page tables
|
||||||
|
movl $V2P(boot_pdpt), %ecx
|
||||||
|
movl %ecx, %cr3
|
||||||
|
|
||||||
|
# enable paging
|
||||||
|
movl %cr0, %ecx
|
||||||
|
orl $(1 << 31), %ecx
|
||||||
|
movl %ecx, %cr0
|
||||||
|
|
||||||
|
ret
|
||||||
|
|
||||||
|
.global _start
|
||||||
|
.type _start, @function
|
||||||
|
_start:
|
||||||
|
cli; cld
|
||||||
|
|
||||||
|
# save bootloader magic and info
|
||||||
|
movl %eax, V2P(bootloader_magic)
|
||||||
|
movl %ebx, V2P(bootloader_info)
|
||||||
|
|
||||||
|
# load boot stack
|
||||||
|
movl $V2P(boot_stack_top), %esp
|
||||||
|
|
||||||
|
# load boot GDT
|
||||||
|
lgdt V2P(boot_gdtr)
|
||||||
|
ljmpl $0x08, $V2P(gdt_flush)
|
||||||
|
gdt_flush:
|
||||||
|
# set correct segment registers
|
||||||
|
movw $0x10, %ax
|
||||||
|
movw %ax, %ds
|
||||||
|
movw %ax, %ss
|
||||||
|
movw %ax, %es
|
||||||
|
|
||||||
|
# do processor initialization
|
||||||
|
call check_requirements
|
||||||
|
call enable_sse
|
||||||
|
call initialize_paging
|
||||||
|
|
||||||
|
# load higher half stack pointer
|
||||||
|
movl $boot_stack_top, %esp
|
||||||
|
|
||||||
|
# jump to higher half
|
||||||
|
leal higher_half, %ecx
|
||||||
|
jmp *%ecx
|
||||||
|
|
||||||
|
higher_half:
|
||||||
|
# call global constuctors
|
||||||
|
call _init
|
||||||
|
|
||||||
|
# call to the kernel itself (clear ebp for stacktrace)
|
||||||
|
xorl %ebp, %ebp
|
||||||
|
|
||||||
|
subl $8, %esp
|
||||||
|
pushl bootloader_info
|
||||||
|
pushl bootloader_magic
|
||||||
|
call kernel_main
|
||||||
|
addl $16, %esp
|
||||||
|
|
||||||
|
# call global destructors
|
||||||
|
call _fini
|
||||||
|
|
||||||
|
system_halt:
|
||||||
|
xchgw %bx, %bx
|
||||||
|
cli
|
||||||
|
1: hlt
|
||||||
|
jmp 1b
|
||||||
|
|
||||||
|
|
||||||
|
.section .ap_init, "ax"
|
||||||
|
|
||||||
|
.code16
|
||||||
|
.global ap_trampoline
|
||||||
|
ap_trampoline:
|
||||||
|
jmp 1f
|
||||||
|
.align 8
|
||||||
|
ap_stack_ptr:
|
||||||
|
.skip 4
|
||||||
|
1:
|
||||||
|
cli; cld
|
||||||
|
ljmpl $0x00, $ap_cs_clear
|
||||||
|
ap_cs_clear:
|
||||||
|
|
||||||
|
# load ap gdt and enter protected mode
|
||||||
|
lgdt ap_gdtr
|
||||||
|
movl %cr0, %eax
|
||||||
|
orb $1, %al
|
||||||
|
movl %eax, %cr0
|
||||||
|
ljmpl $0x08, $ap_protected_mode
|
||||||
|
|
||||||
|
.code32
|
||||||
|
ap_protected_mode:
|
||||||
|
movw $0x10, %ax
|
||||||
|
movw %ax, %ds
|
||||||
|
movw %ax, %ss
|
||||||
|
movw %ax, %es
|
||||||
|
|
||||||
|
movl ap_stack_ptr, %esp
|
||||||
|
movb $1, V2P(g_ap_stack_loaded)
|
||||||
|
|
||||||
|
call V2P(enable_sse)
|
||||||
|
|
||||||
|
call V2P(initialize_paging)
|
||||||
|
|
||||||
|
# load boot gdt and enter long mode
|
||||||
|
lgdt V2P(boot_gdtr)
|
||||||
|
ljmpl $0x08, $ap_flush_gdt
|
||||||
|
|
||||||
|
ap_flush_gdt:
|
||||||
|
# move stack pointer to higher half
|
||||||
|
movl %esp, %esp
|
||||||
|
addl $KERNEL_OFFSET, %esp
|
||||||
|
|
||||||
|
# jump to higher half
|
||||||
|
leal ap_higher_half, %ecx
|
||||||
|
jmp *%ecx
|
||||||
|
|
||||||
|
ap_higher_half:
|
||||||
|
# clear rbp for stacktrace
|
||||||
|
xorl %ebp, %ebp
|
||||||
|
|
||||||
|
1: pause
|
||||||
|
cmpb $0, g_ap_startup_done
|
||||||
|
jz 1b
|
||||||
|
|
||||||
|
lock incb g_ap_running_count
|
||||||
|
|
||||||
|
call ap_main
|
||||||
|
jmp system_halt
|
||||||
|
|
||||||
|
ap_gdt:
|
||||||
|
.quad 0x0000000000000000 # null descriptor
|
||||||
|
.quad 0x00CF9A000000FFFF # 32 bit code
|
||||||
|
.quad 0x00CF92000000FFFF # 32 bit data
|
||||||
|
ap_gdtr:
|
||||||
|
.short . - ap_gdt - 1
|
||||||
|
.long ap_gdt
|
|
@ -0,0 +1,213 @@
|
||||||
|
.macro push_userspace
|
||||||
|
pushw %gs
|
||||||
|
pushw %fs
|
||||||
|
pushw %es
|
||||||
|
pushw %ds
|
||||||
|
pushal
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro load_kernel_segments
|
||||||
|
movw $0x10, %ax
|
||||||
|
movw %ax, %ds
|
||||||
|
movw %ax, %es
|
||||||
|
movw %ax, %fs
|
||||||
|
|
||||||
|
movw $0x28, %ax
|
||||||
|
movw %ax, %gs
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro pop_userspace
|
||||||
|
popal
|
||||||
|
popw %ds
|
||||||
|
popw %es
|
||||||
|
popw %fs
|
||||||
|
popw %gs
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro pop_userspace_skip_eax
|
||||||
|
popl %edi
|
||||||
|
popl %esi
|
||||||
|
popl %ebp
|
||||||
|
addl $4, %esp
|
||||||
|
popl %ebx
|
||||||
|
popl %edx
|
||||||
|
popl %ecx
|
||||||
|
addl $4, %esp
|
||||||
|
|
||||||
|
popw %ds
|
||||||
|
popw %es
|
||||||
|
popw %fs
|
||||||
|
popw %gs
|
||||||
|
.endm
|
||||||
|
|
||||||
|
isr_stub:
|
||||||
|
push_userspace
|
||||||
|
load_kernel_segments
|
||||||
|
|
||||||
|
movl %cr0, %eax; pushl %eax
|
||||||
|
movl %cr2, %eax; pushl %eax
|
||||||
|
movl %cr3, %eax; pushl %eax
|
||||||
|
movl %cr4, %eax; pushl %eax
|
||||||
|
|
||||||
|
movl %esp, %eax // register ptr
|
||||||
|
leal 64(%esp), %ebx // interrupt stack ptr
|
||||||
|
movl 60(%esp), %ecx // error code
|
||||||
|
movl 56(%esp), %edx // isr number
|
||||||
|
|
||||||
|
subl $12, %esp
|
||||||
|
pushl %eax
|
||||||
|
pushl %ebx
|
||||||
|
pushl %ecx
|
||||||
|
pushl %edx
|
||||||
|
call cpp_isr_handler
|
||||||
|
addl $44, %esp
|
||||||
|
|
||||||
|
pop_userspace
|
||||||
|
addl $8, %esp
|
||||||
|
iret
|
||||||
|
|
||||||
|
irq_stub:
|
||||||
|
push_userspace
|
||||||
|
load_kernel_segments
|
||||||
|
|
||||||
|
movl 40(%esp), %eax # interrupt number
|
||||||
|
|
||||||
|
subl $12, %esp
|
||||||
|
pushl %eax
|
||||||
|
call cpp_irq_handler
|
||||||
|
addl $16, %esp
|
||||||
|
|
||||||
|
pop_userspace
|
||||||
|
addl $8, %esp
|
||||||
|
iret
|
||||||
|
|
||||||
|
.global asm_reschedule_handler
|
||||||
|
asm_reschedule_handler:
|
||||||
|
push_userspace
|
||||||
|
load_kernel_segments
|
||||||
|
|
||||||
|
movl %esp, %eax # interrupt registers ptr
|
||||||
|
leal 40(%esp), %ebx # interrupt stack ptr
|
||||||
|
|
||||||
|
subl $12, %esp
|
||||||
|
pushl %eax
|
||||||
|
pushl %ebx
|
||||||
|
call cpp_reschedule_handler
|
||||||
|
addl $20, %esp
|
||||||
|
|
||||||
|
pop_userspace
|
||||||
|
iret
|
||||||
|
|
||||||
|
// arguments in EAX, EBX, ECX, EDX, ESI, EDI
|
||||||
|
.global syscall_asm
|
||||||
|
syscall_asm:
|
||||||
|
push_userspace
|
||||||
|
|
||||||
|
subl $8, %esp
|
||||||
|
|
||||||
|
pushl %esp
|
||||||
|
addl $48, (%esp)
|
||||||
|
|
||||||
|
pushl %edi
|
||||||
|
pushl %esi
|
||||||
|
pushl %edx
|
||||||
|
pushl %ecx
|
||||||
|
pushl %ebx
|
||||||
|
pushl %eax
|
||||||
|
|
||||||
|
load_kernel_segments
|
||||||
|
|
||||||
|
call cpp_syscall_handler
|
||||||
|
addl $36, %esp
|
||||||
|
|
||||||
|
pop_userspace_skip_eax
|
||||||
|
iret
|
||||||
|
|
||||||
|
.macro isr n
|
||||||
|
.global isr\n
|
||||||
|
isr\n:
|
||||||
|
pushl $0
|
||||||
|
pushl $\n
|
||||||
|
jmp isr_stub
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro isr_err n
|
||||||
|
.global isr\n
|
||||||
|
isr\n:
|
||||||
|
pushl $\n
|
||||||
|
jmp isr_stub
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro irq n
|
||||||
|
.global irq\n
|
||||||
|
irq\n:
|
||||||
|
pushl $0
|
||||||
|
pushl $\n
|
||||||
|
jmp irq_stub
|
||||||
|
.endm
|
||||||
|
|
||||||
|
isr 0
|
||||||
|
isr 1
|
||||||
|
isr 2
|
||||||
|
isr 3
|
||||||
|
isr 4
|
||||||
|
isr 5
|
||||||
|
isr 6
|
||||||
|
isr 7
|
||||||
|
isr_err 8
|
||||||
|
isr 9
|
||||||
|
isr_err 10
|
||||||
|
isr_err 11
|
||||||
|
isr_err 12
|
||||||
|
isr_err 13
|
||||||
|
isr_err 14
|
||||||
|
isr 15
|
||||||
|
isr 16
|
||||||
|
isr_err 17
|
||||||
|
isr 18
|
||||||
|
isr 19
|
||||||
|
isr 20
|
||||||
|
isr 21
|
||||||
|
isr 22
|
||||||
|
isr 23
|
||||||
|
isr 24
|
||||||
|
isr 25
|
||||||
|
isr 26
|
||||||
|
isr 27
|
||||||
|
isr 28
|
||||||
|
isr 29
|
||||||
|
isr 30
|
||||||
|
isr 31
|
||||||
|
|
||||||
|
irq 0
|
||||||
|
irq 1
|
||||||
|
irq 2
|
||||||
|
irq 3
|
||||||
|
irq 4
|
||||||
|
irq 5
|
||||||
|
irq 6
|
||||||
|
irq 7
|
||||||
|
irq 8
|
||||||
|
irq 9
|
||||||
|
irq 10
|
||||||
|
irq 11
|
||||||
|
irq 12
|
||||||
|
irq 13
|
||||||
|
irq 14
|
||||||
|
irq 15
|
||||||
|
irq 16
|
||||||
|
irq 17
|
||||||
|
irq 18
|
||||||
|
irq 19
|
||||||
|
irq 20
|
||||||
|
irq 21
|
||||||
|
irq 22
|
||||||
|
irq 23
|
||||||
|
irq 24
|
||||||
|
irq 25
|
||||||
|
irq 26
|
||||||
|
irq 27
|
||||||
|
irq 28
|
||||||
|
irq 29
|
||||||
|
irq 30
|
||||||
|
irq 31
|
|
@ -0,0 +1,45 @@
|
||||||
|
ENTRY (_start)
|
||||||
|
|
||||||
|
KERNEL_OFFSET = 0xC0000000;
|
||||||
|
|
||||||
|
SECTIONS
|
||||||
|
{
|
||||||
|
. = 0xF000;
|
||||||
|
.ap_init ALIGN(4K) : AT(ADDR(.ap_init))
|
||||||
|
{
|
||||||
|
g_ap_init_addr = .;
|
||||||
|
*(.ap_init)
|
||||||
|
}
|
||||||
|
|
||||||
|
. = 0x00100000 + KERNEL_OFFSET;
|
||||||
|
|
||||||
|
g_kernel_start = .;
|
||||||
|
.text ALIGN(4K) : AT(ADDR(.text) - KERNEL_OFFSET)
|
||||||
|
{
|
||||||
|
g_kernel_execute_start = .;
|
||||||
|
*(.multiboot)
|
||||||
|
*(.bananboot)
|
||||||
|
*(.text.*)
|
||||||
|
}
|
||||||
|
.userspace ALIGN(4K) : AT(ADDR(.userspace) - KERNEL_OFFSET)
|
||||||
|
{
|
||||||
|
g_userspace_start = .;
|
||||||
|
*(.userspace)
|
||||||
|
g_userspace_end = .;
|
||||||
|
g_kernel_execute_end = .;
|
||||||
|
}
|
||||||
|
.rodata ALIGN(4K) : AT(ADDR(.rodata) - KERNEL_OFFSET)
|
||||||
|
{
|
||||||
|
*(.rodata.*)
|
||||||
|
}
|
||||||
|
.data ALIGN(4K) : AT(ADDR(.data) - KERNEL_OFFSET)
|
||||||
|
{
|
||||||
|
*(.data)
|
||||||
|
}
|
||||||
|
.bss ALIGN(4K) : AT(ADDR(.bss) - KERNEL_OFFSET)
|
||||||
|
{
|
||||||
|
*(COMMON)
|
||||||
|
*(.bss)
|
||||||
|
}
|
||||||
|
g_kernel_end = .;
|
||||||
|
}
|
|
@ -1,56 +0,0 @@
|
||||||
#include <kernel/GDT.h>
|
|
||||||
#include <kernel/Debug.h>
|
|
||||||
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
namespace Kernel
|
|
||||||
{
|
|
||||||
|
|
||||||
GDT* GDT::create()
|
|
||||||
{
|
|
||||||
auto* gdt = new GDT();
|
|
||||||
ASSERT(gdt);
|
|
||||||
|
|
||||||
gdt->write_entry(0x00, 0x00000000, 0x00000, 0x00, 0x0); // null
|
|
||||||
gdt->write_entry(0x08, 0x00000000, 0xFFFFF, 0x9A, 0xA); // kernel code
|
|
||||||
gdt->write_entry(0x10, 0x00000000, 0xFFFFF, 0x92, 0xC); // kernel data
|
|
||||||
gdt->write_entry(0x18, 0x00000000, 0xFFFFF, 0xFA, 0xA); // user code
|
|
||||||
gdt->write_entry(0x20, 0x00000000, 0xFFFFF, 0xF2, 0xC); // user data
|
|
||||||
gdt->write_tss();
|
|
||||||
|
|
||||||
return gdt;
|
|
||||||
}
|
|
||||||
|
|
||||||
void GDT::write_entry(uint8_t offset, uint32_t base, uint32_t limit, uint8_t access, uint8_t flags)
|
|
||||||
{
|
|
||||||
ASSERT(offset % sizeof(SegmentDescriptor) == 0);
|
|
||||||
uint8_t idx = offset / sizeof(SegmentDescriptor);
|
|
||||||
|
|
||||||
auto& desc = m_gdt[idx];
|
|
||||||
desc.base1 = (base >> 0) & 0xFFFF;
|
|
||||||
desc.base2 = (base >> 16) & 0xFF;
|
|
||||||
desc.base3 = (base >> 24) & 0xFF;
|
|
||||||
|
|
||||||
desc.limit1 = (limit >> 0) & 0xFFFF;
|
|
||||||
desc.limit2 = (limit >> 16) & 0x0F;
|
|
||||||
|
|
||||||
desc.access = access & 0xFF;
|
|
||||||
|
|
||||||
desc.flags = flags & 0x0F;
|
|
||||||
}
|
|
||||||
|
|
||||||
void GDT::write_tss()
|
|
||||||
{
|
|
||||||
memset(&m_tss, 0x00, sizeof(TaskStateSegment));
|
|
||||||
m_tss.iopb = sizeof(TaskStateSegment);
|
|
||||||
|
|
||||||
uint64_t base = reinterpret_cast<uint64_t>(&m_tss);
|
|
||||||
|
|
||||||
write_entry(0x28, (uint32_t)base, sizeof(TaskStateSegment), 0x89, 0x0);
|
|
||||||
|
|
||||||
auto& desc = m_gdt[0x30 / sizeof(SegmentDescriptor)];
|
|
||||||
desc.low = base >> 32;
|
|
||||||
desc.high = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -6,13 +6,16 @@ sys_fork_trampoline:
|
||||||
pushq %r13
|
pushq %r13
|
||||||
pushq %r14
|
pushq %r14
|
||||||
pushq %r15
|
pushq %r15
|
||||||
call read_rip
|
|
||||||
|
call read_ip
|
||||||
testq %rax, %rax
|
testq %rax, %rax
|
||||||
je .done
|
je .reload_stack
|
||||||
|
|
||||||
movq %rax, %rsi
|
movq %rax, %rsi
|
||||||
movq %rsp, %rdi
|
movq %rsp, %rdi
|
||||||
call sys_fork
|
call sys_fork
|
||||||
.done:
|
|
||||||
|
.done:
|
||||||
popq %r15
|
popq %r15
|
||||||
popq %r14
|
popq %r14
|
||||||
popq %r13
|
popq %r13
|
||||||
|
@ -20,3 +23,9 @@ sys_fork_trampoline:
|
||||||
popq %rbp
|
popq %rbp
|
||||||
popq %rbx
|
popq %rbx
|
||||||
ret
|
ret
|
||||||
|
|
||||||
|
.reload_stack:
|
||||||
|
call get_thread_start_sp
|
||||||
|
movq %rax, %rsp
|
||||||
|
xorq %rax, %rax
|
||||||
|
jmp .done
|
|
@ -1,39 +1,52 @@
|
||||||
# uint64_t read_rip()
|
# uint64_t read_ip()
|
||||||
.global read_rip
|
.global read_ip
|
||||||
read_rip:
|
read_ip:
|
||||||
popq %rax
|
popq %rax
|
||||||
jmp *%rax
|
jmp *%rax
|
||||||
|
|
||||||
exit_thread_trampoline:
|
# void start_kernel_thread()
|
||||||
|
.global start_kernel_thread
|
||||||
|
start_kernel_thread:
|
||||||
|
call get_thread_start_sp
|
||||||
|
movq %rax, %rsp
|
||||||
|
|
||||||
|
# STACK LAYOUT
|
||||||
|
# on_exit arg
|
||||||
|
# on_exit func
|
||||||
|
# entry arg
|
||||||
|
# entry func
|
||||||
|
|
||||||
movq 8(%rsp), %rdi
|
movq 8(%rsp), %rdi
|
||||||
ret
|
movq 0(%rsp), %rsi
|
||||||
|
|
||||||
# void start_thread(uint64_t rsp, uint64_t rip)
|
|
||||||
.global start_thread
|
|
||||||
start_thread:
|
|
||||||
movq %rdi, %rsp
|
|
||||||
popq %rdi
|
|
||||||
movq $0, %rbp
|
|
||||||
pushq $exit_thread_trampoline
|
|
||||||
sti
|
sti
|
||||||
jmp *%rsi
|
call *%rsi
|
||||||
|
|
||||||
# void continue_thread(uint64_t rsp, uint64_t rip)
|
movq 24(%rsp), %rdi
|
||||||
.global continue_thread
|
movq 16(%rsp), %rsi
|
||||||
continue_thread:
|
call *%rsi
|
||||||
movq %rdi, %rsp
|
|
||||||
movq $0, %rax
|
|
||||||
jmp *%rsi
|
|
||||||
|
|
||||||
# void thread_userspace_trampoline(uint64_t rsp, uint64_t rip, int argc, char** argv, char** envp)
|
.global start_userspace_thread
|
||||||
.global thread_userspace_trampoline
|
start_userspace_thread:
|
||||||
thread_userspace_trampoline:
|
call get_thread_start_sp
|
||||||
pushq $0x23
|
movq %rax, %rsp
|
||||||
pushq %rdi
|
|
||||||
pushfq
|
# STACK LAYOUT
|
||||||
pushq $0x1B
|
# entry
|
||||||
pushq %rsi
|
# argc
|
||||||
movq %rdx, %rdi
|
# argv
|
||||||
movq %rcx, %rsi
|
# envp
|
||||||
movq %r8, %rdx
|
# userspace stack
|
||||||
|
|
||||||
|
call get_userspace_thread_stack_top
|
||||||
|
|
||||||
|
popq %rdx
|
||||||
|
popq %rsi
|
||||||
|
popq %rdi
|
||||||
|
popq %rcx
|
||||||
|
|
||||||
|
pushq $(0x20 | 3)
|
||||||
|
pushq %rax
|
||||||
|
pushq $0x202
|
||||||
|
pushq $(0x18 | 3)
|
||||||
|
pushq %rcx
|
||||||
iretq
|
iretq
|
||||||
|
|
|
@ -249,10 +249,7 @@ ap_stack_ptr:
|
||||||
1:
|
1:
|
||||||
cli; cld
|
cli; cld
|
||||||
ljmpl $0x00, $ap_cs_clear
|
ljmpl $0x00, $ap_cs_clear
|
||||||
|
|
||||||
ap_cs_clear:
|
ap_cs_clear:
|
||||||
xorw %ax, %ax
|
|
||||||
movw %ax, %ds
|
|
||||||
|
|
||||||
# load ap gdt and enter protected mode
|
# load ap gdt and enter protected mode
|
||||||
lgdt ap_gdtr
|
lgdt ap_gdtr
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
.macro pushaq
|
.macro pushaq
|
||||||
pushq %rax
|
pushq %rax
|
||||||
pushq %rbx
|
|
||||||
pushq %rcx
|
pushq %rcx
|
||||||
pushq %rdx
|
pushq %rdx
|
||||||
|
pushq %rbx
|
||||||
pushq %rbp
|
pushq %rbp
|
||||||
pushq %rdi
|
|
||||||
pushq %rsi
|
pushq %rsi
|
||||||
|
pushq %rdi
|
||||||
pushq %r8
|
pushq %r8
|
||||||
pushq %r9
|
pushq %r9
|
||||||
pushq %r10
|
pushq %r10
|
||||||
|
@ -25,12 +25,12 @@
|
||||||
popq %r10
|
popq %r10
|
||||||
popq %r9
|
popq %r9
|
||||||
popq %r8
|
popq %r8
|
||||||
popq %rsi
|
|
||||||
popq %rdi
|
popq %rdi
|
||||||
|
popq %rsi
|
||||||
popq %rbp
|
popq %rbp
|
||||||
|
popq %rbx
|
||||||
popq %rdx
|
popq %rdx
|
||||||
popq %rcx
|
popq %rcx
|
||||||
popq %rbx
|
|
||||||
popq %rax
|
popq %rax
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
@ -43,44 +43,36 @@
|
||||||
popq %r10
|
popq %r10
|
||||||
popq %r9
|
popq %r9
|
||||||
popq %r8
|
popq %r8
|
||||||
popq %rsi
|
|
||||||
popq %rdi
|
popq %rdi
|
||||||
|
popq %rsi
|
||||||
popq %rbp
|
popq %rbp
|
||||||
|
popq %rbx
|
||||||
popq %rdx
|
popq %rdx
|
||||||
popq %rcx
|
popq %rcx
|
||||||
popq %rbx
|
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
isr_stub:
|
isr_stub:
|
||||||
pushaq
|
pushaq
|
||||||
|
|
||||||
movq %cr0, %rax; pushq %rax
|
movq %cr0, %rax; pushq %rax
|
||||||
movq %cr2, %rax; pushq %rax
|
movq %cr2, %rax; pushq %rax
|
||||||
movq %cr3, %rax; pushq %rax
|
movq %cr3, %rax; pushq %rax
|
||||||
movq %cr4, %rax; pushq %rax
|
movq %cr4, %rax; pushq %rax
|
||||||
movq 184(%rsp), %rax; pushq %rax
|
|
||||||
movq 176(%rsp), %rax; pushq %rax
|
|
||||||
movq 208(%rsp), %rax; pushq %rax
|
|
||||||
|
|
||||||
movq 176(%rsp), %rdi
|
|
||||||
movq 184(%rsp), %rsi
|
|
||||||
|
|
||||||
movq %rsp, %rdx
|
|
||||||
addq $192, %rdx
|
|
||||||
|
|
||||||
movq %rsp, %rcx
|
|
||||||
|
|
||||||
|
movq 152(%rsp), %rdi // isr number
|
||||||
|
movq 160(%rsp), %rsi // error code
|
||||||
|
leaq 168(%rsp), %rdx // interrupt stack ptr
|
||||||
|
movq %rsp, %rcx // register ptr
|
||||||
call cpp_isr_handler
|
call cpp_isr_handler
|
||||||
addq $56, %rsp
|
addq $32, %rsp
|
||||||
|
|
||||||
popaq
|
popaq
|
||||||
addq $16, %rsp
|
addq $16, %rsp
|
||||||
iretq
|
iretq
|
||||||
|
|
||||||
irq_stub:
|
irq_stub:
|
||||||
pushaq
|
pushaq
|
||||||
movq 0x78(%rsp), %rdi # irq number
|
movq 120(%rsp), %rdi # irq number
|
||||||
movq %rsp, %rsi
|
|
||||||
addq $136, %rsi
|
|
||||||
call cpp_irq_handler
|
call cpp_irq_handler
|
||||||
popaq
|
popaq
|
||||||
addq $16, %rsp
|
addq $16, %rsp
|
||||||
|
@ -174,7 +166,15 @@ irq 28
|
||||||
irq 29
|
irq 29
|
||||||
irq 30
|
irq 30
|
||||||
irq 31
|
irq 31
|
||||||
irq 32
|
|
||||||
|
.global asm_reschedule_handler
|
||||||
|
asm_reschedule_handler:
|
||||||
|
pushaq
|
||||||
|
leaq 120(%rsp), %rdi # interrupt stack ptr
|
||||||
|
movq %rsp, %rsi # interrupt register ptr
|
||||||
|
call cpp_reschedule_handler
|
||||||
|
popaq
|
||||||
|
iretq
|
||||||
|
|
||||||
// arguments in RAX, RBX, RCX, RDX, RSI, RDI
|
// arguments in RAX, RBX, RCX, RDX, RSI, RDI
|
||||||
// System V ABI: RDI, RSI, RDX, RCX, R8, R9
|
// System V ABI: RDI, RSI, RDX, RCX, R8, R9
|
||||||
|
|
|
@ -1,115 +1,41 @@
|
||||||
#include <BAN/Atomic.h>
|
|
||||||
#include <kernel/Panic.h>
|
#include <kernel/Panic.h>
|
||||||
|
|
||||||
#define ATEXIT_MAX_FUNCS 128
|
#define ATEXIT_MAX_FUNCS 128
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
extern "C" {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
typedef unsigned uarch_t;
|
|
||||||
|
|
||||||
struct atexit_func_entry_t
|
struct atexit_func_entry_t
|
||||||
{
|
{
|
||||||
/*
|
void(*func)(void*);
|
||||||
* Each member is at least 4 bytes large. Such that each entry is 12bytes.
|
void* arg;
|
||||||
* 128 * 12 = 1.5KB exact.
|
void* dso_handle;
|
||||||
**/
|
|
||||||
void (*destructor_func)(void *);
|
|
||||||
void *obj_ptr;
|
|
||||||
void *dso_handle;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
atexit_func_entry_t __atexit_funcs[ATEXIT_MAX_FUNCS];
|
static atexit_func_entry_t __atexit_funcs[ATEXIT_MAX_FUNCS];
|
||||||
uarch_t __atexit_func_count = 0;
|
static size_t __atexit_func_count = 0;
|
||||||
|
|
||||||
int __cxa_atexit(void (*f)(void *), void *objptr, void *dso)
|
extern "C" int __cxa_atexit(void(*func)(void*), void* arg, void* dso_handle)
|
||||||
{
|
{
|
||||||
if (__atexit_func_count >= ATEXIT_MAX_FUNCS) {return -1;};
|
if (__atexit_func_count >= ATEXIT_MAX_FUNCS)
|
||||||
__atexit_funcs[__atexit_func_count].destructor_func = f;
|
return -1;
|
||||||
__atexit_funcs[__atexit_func_count].obj_ptr = objptr;
|
auto& atexit_func = __atexit_funcs[__atexit_func_count++];
|
||||||
__atexit_funcs[__atexit_func_count].dso_handle = dso;
|
atexit_func.func = func;
|
||||||
__atexit_func_count++;
|
atexit_func.arg = arg;
|
||||||
return 0; /*I would prefer if functions returned 1 on success, but the ABI says...*/
|
atexit_func.dso_handle = dso_handle;
|
||||||
|
return 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
void __cxa_finalize(void *f)
|
extern "C" void __cxa_finalize(void* f)
|
||||||
{
|
{
|
||||||
uarch_t i = __atexit_func_count;
|
for (size_t i = __atexit_func_count; i > 0; i--)
|
||||||
if (!f)
|
|
||||||
{
|
{
|
||||||
/*
|
auto& atexit_func = __atexit_funcs[i - 1];
|
||||||
* According to the Itanium C++ ABI, if __cxa_finalize is called without a
|
if (atexit_func.func == nullptr)
|
||||||
* function ptr, then it means that we should destroy EVERYTHING MUAHAHAHA!!
|
continue;
|
||||||
*
|
if (f == nullptr || f == atexit_func.func)
|
||||||
* TODO:
|
|
||||||
* Note well, however, that deleting a function from here that contains a __dso_handle
|
|
||||||
* means that one link to a shared object file has been terminated. In other words,
|
|
||||||
* We should monitor this list (optional, of course), since it tells us how many links to
|
|
||||||
* an object file exist at runtime in a particular application. This can be used to tell
|
|
||||||
* when a shared object is no longer in use. It is one of many methods, however.
|
|
||||||
**/
|
|
||||||
//You may insert a prinf() here to tell you whether or not the function gets called. Testing
|
|
||||||
//is CRITICAL!
|
|
||||||
while (i--)
|
|
||||||
{
|
{
|
||||||
if (__atexit_funcs[i].destructor_func)
|
atexit_func.func(atexit_func.arg);
|
||||||
{
|
atexit_func.func = nullptr;
|
||||||
/* ^^^ That if statement is a safeguard...
|
}
|
||||||
* To make sure we don't call any entries that have already been called and unset at runtime.
|
}
|
||||||
* Those will contain a value of 0, and calling a function with value 0
|
|
||||||
* will cause undefined behaviour. Remember that linear address 0,
|
|
||||||
* in a non-virtual address space (physical) contains the IVT and BDA.
|
|
||||||
*
|
|
||||||
* In a virtual environment, the kernel will receive a page fault, and then probably
|
|
||||||
* map in some trash, or a blank page, or something stupid like that.
|
|
||||||
* This will result in the processor executing trash, and...we don't want that.
|
|
||||||
**/
|
|
||||||
(*__atexit_funcs[i].destructor_func)(__atexit_funcs[i].obj_ptr);
|
|
||||||
};
|
|
||||||
};
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
|
|
||||||
while (i--)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* The ABI states that multiple calls to the __cxa_finalize(destructor_func_ptr) function
|
|
||||||
* should not destroy objects multiple times. Only one call is needed to eliminate multiple
|
|
||||||
* entries with the same address.
|
|
||||||
*
|
|
||||||
* FIXME:
|
|
||||||
* This presents the obvious problem: all destructors must be stored in the order they
|
|
||||||
* were placed in the list. I.e: the last initialized object's destructor must be first
|
|
||||||
* in the list of destructors to be called. But removing a destructor from the list at runtime
|
|
||||||
* creates holes in the table with unfilled entries.
|
|
||||||
* Remember that the insertion algorithm in __cxa_atexit simply inserts the next destructor
|
|
||||||
* at the end of the table. So, we have holes with our current algorithm
|
|
||||||
* This function should be modified to move all the destructors above the one currently
|
|
||||||
* being called and removed one place down in the list, so as to cover up the hole.
|
|
||||||
* Otherwise, whenever a destructor is called and removed, an entire space in the table is wasted.
|
|
||||||
**/
|
|
||||||
if (__atexit_funcs[i].destructor_func == f)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Note that in the next line, not every destructor function is a class destructor.
|
|
||||||
* It is perfectly legal to register a non class destructor function as a simple cleanup
|
|
||||||
* function to be called on program termination, in which case, it would not NEED an
|
|
||||||
* object This pointer. A smart programmer may even take advantage of this and register
|
|
||||||
* a C function in the table with the address of some structure containing data about
|
|
||||||
* what to clean up on exit.
|
|
||||||
* In the case of a function that takes no arguments, it will simply be ignore within the
|
|
||||||
* function itself. No worries.
|
|
||||||
**/
|
|
||||||
(*__atexit_funcs[i].destructor_func)(__atexit_funcs[i].obj_ptr);
|
|
||||||
__atexit_funcs[i].destructor_func = 0;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Notice that we didn't decrement __atexit_func_count: this is because this algorithm
|
|
||||||
* requires patching to deal with the FIXME outlined above.
|
|
||||||
**/
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
namespace __cxxabiv1
|
namespace __cxxabiv1
|
||||||
|
@ -118,23 +44,19 @@ namespace __cxxabiv1
|
||||||
|
|
||||||
int __cxa_guard_acquire (__guard* g)
|
int __cxa_guard_acquire (__guard* g)
|
||||||
{
|
{
|
||||||
auto& atomic = *reinterpret_cast<BAN::Atomic<__guard>*>(g);
|
uint8_t* byte = reinterpret_cast<uint8_t*>(g);
|
||||||
return atomic == 0;
|
uint8_t zero = 0;
|
||||||
|
return __atomic_compare_exchange_n(byte, &zero, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __cxa_guard_release (__guard* g)
|
void __cxa_guard_release (__guard* g)
|
||||||
{
|
{
|
||||||
auto& atomic = *reinterpret_cast<BAN::Atomic<__guard>*>(g);
|
uint8_t* byte = reinterpret_cast<uint8_t*>(g);
|
||||||
atomic = 1;
|
__atomic_store_n(byte, 0, __ATOMIC_RELEASE);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __cxa_guard_abort (__guard*)
|
void __cxa_guard_abort (__guard*)
|
||||||
{
|
{
|
||||||
Kernel::panic("__cxa_guard_abort");
|
Kernel::panic("__cxa_guard_abort");
|
||||||
__builtin_unreachable();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
};
|
|
||||||
#endif
|
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#define x86_64 1
|
#define x86_64 1
|
||||||
#define i386 2
|
#define i686 2
|
||||||
|
|
||||||
#define ARCH(arch) (__arch == arch)
|
#define ARCH(arch) (__arch == arch)
|
||||||
|
|
||||||
#if !defined(__arch) || (__arch != x86_64 && __arch != i386)
|
#if !defined(__arch) || (__arch != x86_64 && __arch != i686)
|
||||||
#error "Unsupported architecture"
|
#error "Unsupported architecture"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -13,16 +13,14 @@
|
||||||
#define read_rsp(rsp) asm volatile("movq %%rsp, %0" : "=r"(rsp))
|
#define read_rsp(rsp) asm volatile("movq %%rsp, %0" : "=r"(rsp))
|
||||||
#define push_callee_saved() asm volatile("pushq %rbx; pushq %rbp; pushq %r12; pushq %r13; pushq %r14; pushq %r15")
|
#define push_callee_saved() asm volatile("pushq %rbx; pushq %rbp; pushq %r12; pushq %r13; pushq %r14; pushq %r15")
|
||||||
#define pop_callee_saved() asm volatile("popq %r15; popq %r14; popq %r13; popq %r12; popq %rbp; popq %rbx")
|
#define pop_callee_saved() asm volatile("popq %r15; popq %r14; popq %r13; popq %r12; popq %rbp; popq %rbx")
|
||||||
#else
|
#elif ARCH(i686)
|
||||||
#define read_rsp(rsp) asm volatile("movl %%esp, %0" : "=r"(rsp))
|
#define read_rsp(rsp) asm volatile("movl %%esp, %0" : "=r"(rsp))
|
||||||
#define push_callee_saved() asm volatile("pushal")
|
#define push_callee_saved() asm volatile("pushal")
|
||||||
#define pop_callee_saved() asm volatile("popal")
|
#define pop_callee_saved() asm volatile("popal")
|
||||||
|
#else
|
||||||
|
#error
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
#ifdef __cplusplus
|
extern "C" uintptr_t read_ip();
|
||||||
extern "C" uintptr_t read_rip();
|
|
||||||
#else
|
|
||||||
extern uintptr_t read_rip();
|
|
||||||
#endif
|
|
||||||
|
|
|
@ -21,12 +21,24 @@ namespace Kernel
|
||||||
size_t size { 0 };
|
size_t size { 0 };
|
||||||
blkcnt_t blocks { 0 };
|
blkcnt_t blocks { 0 };
|
||||||
|
|
||||||
|
#if ARCH(x86_64)
|
||||||
// 2x direct blocks
|
// 2x direct blocks
|
||||||
// 1x singly indirect
|
// 1x singly indirect
|
||||||
// 1x doubly indirect
|
// 1x doubly indirect
|
||||||
// 1x triply indirect
|
// 1x triply indirect
|
||||||
BAN::Array<paddr_t, 5> block;
|
BAN::Array<paddr_t, 5> block;
|
||||||
static constexpr size_t direct_block_count = 2;
|
static constexpr size_t direct_block_count = 2;
|
||||||
|
#elif ARCH(i686)
|
||||||
|
uint32_t __padding;
|
||||||
|
// 5x direct blocks
|
||||||
|
// 1x singly indirect
|
||||||
|
// 1x doubly indirect
|
||||||
|
// 1x triply indirect
|
||||||
|
BAN::Array<paddr_t, 8> block;
|
||||||
|
static constexpr size_t direct_block_count = 5;
|
||||||
|
#else
|
||||||
|
#error
|
||||||
|
#endif
|
||||||
static constexpr size_t max_size =
|
static constexpr size_t max_size =
|
||||||
direct_block_count * PAGE_SIZE +
|
direct_block_count * PAGE_SIZE +
|
||||||
(PAGE_SIZE / sizeof(paddr_t)) * PAGE_SIZE +
|
(PAGE_SIZE / sizeof(paddr_t)) * PAGE_SIZE +
|
||||||
|
|
|
@ -2,12 +2,14 @@
|
||||||
|
|
||||||
#include <BAN/Array.h>
|
#include <BAN/Array.h>
|
||||||
#include <BAN/NoCopyMove.h>
|
#include <BAN/NoCopyMove.h>
|
||||||
|
#include <kernel/Arch.h>
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
namespace Kernel
|
namespace Kernel
|
||||||
{
|
{
|
||||||
|
|
||||||
|
#if ARCH(x86_64)
|
||||||
struct TaskStateSegment
|
struct TaskStateSegment
|
||||||
{
|
{
|
||||||
uint32_t reserved1;
|
uint32_t reserved1;
|
||||||
|
@ -26,6 +28,54 @@ namespace Kernel
|
||||||
uint16_t reserved4;
|
uint16_t reserved4;
|
||||||
uint16_t iopb;
|
uint16_t iopb;
|
||||||
} __attribute__((packed));
|
} __attribute__((packed));
|
||||||
|
static_assert(sizeof(TaskStateSegment) == 104);
|
||||||
|
#elif ARCH(i686)
|
||||||
|
struct TaskStateSegment
|
||||||
|
{
|
||||||
|
uint16_t link;
|
||||||
|
uint16_t __reserved0;
|
||||||
|
uint32_t esp0;
|
||||||
|
uint16_t ss0;
|
||||||
|
uint16_t __reserved1;
|
||||||
|
uint32_t esp1;
|
||||||
|
uint16_t ss1;
|
||||||
|
uint16_t __reserved2;
|
||||||
|
uint32_t esp2;
|
||||||
|
uint16_t ss2;
|
||||||
|
uint16_t __reserved3;
|
||||||
|
uint32_t cr3;
|
||||||
|
uint32_t eip;
|
||||||
|
uint32_t eflags;
|
||||||
|
uint32_t eax;
|
||||||
|
uint32_t ecx;
|
||||||
|
uint32_t edx;
|
||||||
|
uint32_t ebx;
|
||||||
|
uint32_t esp;
|
||||||
|
uint32_t ebp;
|
||||||
|
uint32_t esi;
|
||||||
|
uint32_t edi;
|
||||||
|
uint16_t es;
|
||||||
|
uint16_t __reserved4;
|
||||||
|
uint16_t cs;
|
||||||
|
uint16_t __reserved5;
|
||||||
|
uint16_t ss;
|
||||||
|
uint16_t __reserved6;
|
||||||
|
uint16_t ds;
|
||||||
|
uint16_t __reserved7;
|
||||||
|
uint16_t fs;
|
||||||
|
uint16_t __reserved8;
|
||||||
|
uint16_t gs;
|
||||||
|
uint16_t __reserved9;
|
||||||
|
uint16_t ldtr;
|
||||||
|
uint16_t __reserved10;
|
||||||
|
uint16_t __reserved11;
|
||||||
|
uint16_t iopb;
|
||||||
|
uint32_t ssp;
|
||||||
|
};
|
||||||
|
static_assert(sizeof(TaskStateSegment) == 108);
|
||||||
|
#else
|
||||||
|
#error
|
||||||
|
#endif
|
||||||
|
|
||||||
union SegmentDescriptor
|
union SegmentDescriptor
|
||||||
{
|
{
|
||||||
|
@ -38,20 +88,20 @@ namespace Kernel
|
||||||
uint8_t limit2 : 4;
|
uint8_t limit2 : 4;
|
||||||
uint8_t flags : 4;
|
uint8_t flags : 4;
|
||||||
uint8_t base3;
|
uint8_t base3;
|
||||||
} __attribute__((packed));
|
};
|
||||||
|
|
||||||
struct
|
struct
|
||||||
{
|
{
|
||||||
uint32_t low;
|
uint32_t low;
|
||||||
uint32_t high;
|
uint32_t high;
|
||||||
} __attribute__((packed));
|
};
|
||||||
|
};
|
||||||
} __attribute__((packed));
|
static_assert(sizeof(SegmentDescriptor) == 8);
|
||||||
|
|
||||||
struct GDTR
|
struct GDTR
|
||||||
{
|
{
|
||||||
uint16_t size;
|
uint16_t size;
|
||||||
uint64_t address;
|
uintptr_t address;
|
||||||
} __attribute__((packed));
|
} __attribute__((packed));
|
||||||
|
|
||||||
class GDT
|
class GDT
|
||||||
|
@ -60,7 +110,7 @@ namespace Kernel
|
||||||
BAN_NON_MOVABLE(GDT);
|
BAN_NON_MOVABLE(GDT);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static GDT* create();
|
static GDT* create(void* processor);
|
||||||
void load() { flush_gdt(); flush_tss(); }
|
void load() { flush_gdt(); flush_tss(); }
|
||||||
|
|
||||||
static constexpr inline bool is_user_segment(uint8_t segment)
|
static constexpr inline bool is_user_segment(uint8_t segment)
|
||||||
|
@ -68,9 +118,14 @@ namespace Kernel
|
||||||
return (segment & 3) == 3;
|
return (segment & 3) == 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_tss_stack(uintptr_t rsp)
|
void set_tss_stack(uintptr_t sp)
|
||||||
{
|
{
|
||||||
m_tss.rsp0 = rsp;
|
#if ARCH(x86_64)
|
||||||
|
m_tss.rsp0 = sp;
|
||||||
|
#elif ARCH(i686)
|
||||||
|
m_tss.esp0 = sp;
|
||||||
|
m_tss.ss0 = 0x10;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -86,15 +141,21 @@ namespace Kernel
|
||||||
|
|
||||||
void flush_tss()
|
void flush_tss()
|
||||||
{
|
{
|
||||||
asm volatile("ltr %0" :: "rm"((uint16_t)0x28) : "memory");
|
asm volatile("ltr %0" :: "rm"(m_tss_offset) : "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
#if ARCH(x86_64)
|
||||||
BAN::Array<SegmentDescriptor, 7> m_gdt; // null, kernel code, kernel data, user code, user data, tss low, tss high
|
BAN::Array<SegmentDescriptor, 7> m_gdt; // null, kernel code, kernel data, user code, user data, tss low, tss high
|
||||||
|
static constexpr uint16_t m_tss_offset = 0x28;
|
||||||
|
#elif ARCH(i686)
|
||||||
|
BAN::Array<SegmentDescriptor, 7> m_gdt; // null, kernel code, kernel data, user code, user data, processor data, tss
|
||||||
|
static constexpr uint16_t m_tss_offset = 0x30;
|
||||||
|
#endif
|
||||||
TaskStateSegment m_tss;
|
TaskStateSegment m_tss;
|
||||||
const GDTR m_gdtr {
|
const GDTR m_gdtr {
|
||||||
.size = m_gdt.size() * sizeof(SegmentDescriptor) - 1,
|
.size = m_gdt.size() * sizeof(SegmentDescriptor) - 1,
|
||||||
.address = reinterpret_cast<uint64_t>(m_gdt.data())
|
.address = reinterpret_cast<uintptr_t>(m_gdt.data())
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
|
|
||||||
#include <BAN/Array.h>
|
#include <BAN/Array.h>
|
||||||
#include <BAN/NoCopyMove.h>
|
#include <BAN/NoCopyMove.h>
|
||||||
|
#include <kernel/Arch.h>
|
||||||
#include <kernel/Interruptable.h>
|
#include <kernel/Interruptable.h>
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
@ -12,21 +13,36 @@ constexpr uint8_t IRQ_IPI = 32;
|
||||||
namespace Kernel
|
namespace Kernel
|
||||||
{
|
{
|
||||||
|
|
||||||
|
#if ARCH(x86_64)
|
||||||
struct GateDescriptor
|
struct GateDescriptor
|
||||||
{
|
{
|
||||||
uint16_t offset1;
|
uint16_t offset0;
|
||||||
uint16_t selector;
|
uint16_t selector;
|
||||||
uint8_t IST;
|
uint8_t IST;
|
||||||
uint8_t flags;
|
uint8_t flags;
|
||||||
uint16_t offset2;
|
uint16_t offset1;
|
||||||
uint32_t offset3;
|
uint32_t offset2;
|
||||||
uint32_t reserved;
|
uint32_t reserved;
|
||||||
} __attribute__((packed));
|
};
|
||||||
|
static_assert(sizeof(GateDescriptor) == 16);
|
||||||
|
#elif ARCH(i686)
|
||||||
|
struct GateDescriptor
|
||||||
|
{
|
||||||
|
uint16_t offset0;
|
||||||
|
uint16_t selector;
|
||||||
|
uint8_t reserved;
|
||||||
|
uint8_t flags;
|
||||||
|
uint16_t offset1;
|
||||||
|
};
|
||||||
|
static_assert(sizeof(GateDescriptor) == 8);
|
||||||
|
#else
|
||||||
|
#error
|
||||||
|
#endif
|
||||||
|
|
||||||
struct IDTR
|
struct IDTR
|
||||||
{
|
{
|
||||||
uint16_t size;
|
uint16_t size;
|
||||||
uint64_t offset;
|
uintptr_t offset;
|
||||||
} __attribute__((packed));
|
} __attribute__((packed));
|
||||||
|
|
||||||
class IDT
|
class IDT
|
||||||
|
|
|
@ -18,6 +18,7 @@ namespace Kernel
|
||||||
virtual bool is_in_service(uint8_t) = 0;
|
virtual bool is_in_service(uint8_t) = 0;
|
||||||
|
|
||||||
static void initialize(bool force_pic);
|
static void initialize(bool force_pic);
|
||||||
|
static bool is_initialized();
|
||||||
static InterruptController& get();
|
static InterruptController& get();
|
||||||
|
|
||||||
virtual void initialize_multiprocessor() = 0;
|
virtual void initialize_multiprocessor() = 0;
|
||||||
|
|
|
@ -7,11 +7,45 @@ namespace Kernel
|
||||||
|
|
||||||
struct InterruptStack
|
struct InterruptStack
|
||||||
{
|
{
|
||||||
uint64_t rip;
|
uintptr_t ip;
|
||||||
uint64_t cs;
|
uintptr_t cs;
|
||||||
uint64_t flags;
|
uintptr_t flags;
|
||||||
uint64_t rsp;
|
uintptr_t sp;
|
||||||
uint64_t ss;
|
uintptr_t ss;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#if ARCH(x86_64)
|
||||||
|
struct InterruptRegisters
|
||||||
|
{
|
||||||
|
uintptr_t r15;
|
||||||
|
uintptr_t r14;
|
||||||
|
uintptr_t r13;
|
||||||
|
uintptr_t r12;
|
||||||
|
uintptr_t r11;
|
||||||
|
uintptr_t r10;
|
||||||
|
uintptr_t r9;
|
||||||
|
uintptr_t r8;
|
||||||
|
|
||||||
|
uintptr_t rdi;
|
||||||
|
uintptr_t rsi;
|
||||||
|
uintptr_t rbp;
|
||||||
|
uintptr_t rbx;
|
||||||
|
uintptr_t rdx;
|
||||||
|
uintptr_t rcx;
|
||||||
|
uintptr_t rax;
|
||||||
|
};
|
||||||
|
#elif ARCH(i686)
|
||||||
|
struct InterruptRegisters
|
||||||
|
{
|
||||||
|
uintptr_t edi;
|
||||||
|
uintptr_t esi;
|
||||||
|
uintptr_t ebp;
|
||||||
|
uintptr_t unused;
|
||||||
|
uintptr_t ebx;
|
||||||
|
uintptr_t edx;
|
||||||
|
uintptr_t ecx;
|
||||||
|
uintptr_t eax;
|
||||||
|
};
|
||||||
|
#endif
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,7 +25,7 @@ namespace Kernel
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
while (!m_locker.compare_exchange(-1, tid))
|
while (!m_locker.compare_exchange(-1, tid))
|
||||||
Scheduler::get().reschedule();
|
Scheduler::get().yield();
|
||||||
ASSERT(m_lock_depth == 0);
|
ASSERT(m_lock_depth == 0);
|
||||||
}
|
}
|
||||||
m_lock_depth++;
|
m_lock_depth++;
|
||||||
|
@ -81,7 +81,7 @@ namespace Kernel
|
||||||
if (has_priority)
|
if (has_priority)
|
||||||
m_queue_length++;
|
m_queue_length++;
|
||||||
while (!(has_priority || m_queue_length == 0) || !m_locker.compare_exchange(-1, tid))
|
while (!(has_priority || m_queue_length == 0) || !m_locker.compare_exchange(-1, tid))
|
||||||
Scheduler::get().reschedule();
|
Scheduler::get().yield();
|
||||||
ASSERT(m_lock_depth == 0);
|
ASSERT(m_lock_depth == 0);
|
||||||
}
|
}
|
||||||
m_lock_depth++;
|
m_lock_depth++;
|
||||||
|
|
|
@ -4,12 +4,15 @@
|
||||||
|
|
||||||
#if ARCH(x86_64)
|
#if ARCH(x86_64)
|
||||||
#define KERNEL_OFFSET 0xFFFFFFFF80000000
|
#define KERNEL_OFFSET 0xFFFFFFFF80000000
|
||||||
#define V2P(vaddr) (((vaddr_t)(vaddr)) - KERNEL_OFFSET)
|
#elif ARCH(i686)
|
||||||
#define P2V(paddr) (((paddr_t)(paddr)) + KERNEL_OFFSET)
|
#define KERNEL_OFFSET 0xC0000000
|
||||||
#else
|
#else
|
||||||
#error
|
#error
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#define V2P(vaddr) (((vaddr_t)(vaddr)) - KERNEL_OFFSET)
|
||||||
|
#define P2V(paddr) (((paddr_t)(paddr)) + KERNEL_OFFSET)
|
||||||
|
|
||||||
#define PAGE_SIZE ((uintptr_t)4096)
|
#define PAGE_SIZE ((uintptr_t)4096)
|
||||||
#define PAGE_SIZE_SHIFT 12
|
#define PAGE_SIZE_SHIFT 12
|
||||||
#define PAGE_ADDR_MASK (~(uintptr_t)0xFFF)
|
#define PAGE_ADDR_MASK (~(uintptr_t)0xFFF)
|
||||||
|
@ -18,6 +21,6 @@ namespace Kernel
|
||||||
{
|
{
|
||||||
|
|
||||||
using vaddr_t = uintptr_t;
|
using vaddr_t = uintptr_t;
|
||||||
using paddr_t = uintptr_t;
|
using paddr_t = uint64_t;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -199,6 +199,33 @@ namespace Kernel
|
||||||
BAN::ErrorOr<void> validate_string_access(const char*);
|
BAN::ErrorOr<void> validate_string_access(const char*);
|
||||||
BAN::ErrorOr<void> validate_pointer_access(const void*, size_t);
|
BAN::ErrorOr<void> validate_pointer_access(const void*, size_t);
|
||||||
|
|
||||||
|
uint64_t signal_pending_mask() const
|
||||||
|
{
|
||||||
|
return ((uint64_t)m_signal_pending_mask[1].load() << 32) | m_signal_pending_mask[0].load();
|
||||||
|
}
|
||||||
|
|
||||||
|
void add_pending_signal(uint8_t signal)
|
||||||
|
{
|
||||||
|
ASSERT(signal >= _SIGMIN);
|
||||||
|
ASSERT(signal <= _SIGMAX);
|
||||||
|
ASSERT(signal < 64);
|
||||||
|
if (signal < 32)
|
||||||
|
m_signal_pending_mask[0] |= (uint32_t)1 << signal;
|
||||||
|
else
|
||||||
|
m_signal_pending_mask[1] |= (uint32_t)1 << (signal - 32);
|
||||||
|
}
|
||||||
|
|
||||||
|
void remove_pending_signal(uint8_t signal)
|
||||||
|
{
|
||||||
|
ASSERT(signal >= _SIGMIN);
|
||||||
|
ASSERT(signal <= _SIGMAX);
|
||||||
|
ASSERT(signal < 64);
|
||||||
|
if (signal < 32)
|
||||||
|
m_signal_pending_mask[0] &= ~((uint32_t)1 << signal);
|
||||||
|
else
|
||||||
|
m_signal_pending_mask[1] &= ~((uint32_t)1 << (signal - 32));
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
struct ExitStatus
|
struct ExitStatus
|
||||||
{
|
{
|
||||||
|
@ -226,7 +253,8 @@ namespace Kernel
|
||||||
BAN::Vector<Thread*> m_threads;
|
BAN::Vector<Thread*> m_threads;
|
||||||
|
|
||||||
BAN::Atomic<vaddr_t> m_signal_handlers[_SIGMAX + 1] { };
|
BAN::Atomic<vaddr_t> m_signal_handlers[_SIGMAX + 1] { };
|
||||||
BAN::Atomic<uint64_t> m_signal_pending_mask { 0 };
|
// This is 2 32 bit values to allow atomicity on 32 targets
|
||||||
|
BAN::Atomic<uint32_t> m_signal_pending_mask[2] { 0, 0 };
|
||||||
|
|
||||||
BAN::Vector<BAN::String> m_cmdline;
|
BAN::Vector<BAN::String> m_cmdline;
|
||||||
BAN::Vector<BAN::String> m_environ;
|
BAN::Vector<BAN::String> m_environ;
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
#include <kernel/Arch.h>
|
#include <kernel/Arch.h>
|
||||||
#include <kernel/GDT.h>
|
#include <kernel/GDT.h>
|
||||||
#include <kernel/IDT.h>
|
#include <kernel/IDT.h>
|
||||||
|
#include <kernel/InterruptStack.h>
|
||||||
#include <kernel/SchedulerQueue.h>
|
#include <kernel/SchedulerQueue.h>
|
||||||
|
|
||||||
namespace Kernel
|
namespace Kernel
|
||||||
|
@ -16,10 +17,10 @@ namespace Kernel
|
||||||
Enabled,
|
Enabled,
|
||||||
};
|
};
|
||||||
|
|
||||||
using ProcessorID = uint8_t;
|
using ProcessorID = uint32_t;
|
||||||
constexpr ProcessorID PROCESSOR_NONE = 0xFF;
|
constexpr ProcessorID PROCESSOR_NONE = 0xFFFFFFFF;
|
||||||
|
|
||||||
#if ARCH(x86_64)
|
#if ARCH(x86_64) || ARCH(i686)
|
||||||
class Processor
|
class Processor
|
||||||
{
|
{
|
||||||
BAN_NON_COPYABLE(Processor);
|
BAN_NON_COPYABLE(Processor);
|
||||||
|
@ -68,6 +69,11 @@ namespace Kernel
|
||||||
static SchedulerQueue::Node* get_current_thread() { return reinterpret_cast<SchedulerQueue::Node*>(read_gs_ptr(offsetof(Processor, m_current_thread))); }
|
static SchedulerQueue::Node* get_current_thread() { return reinterpret_cast<SchedulerQueue::Node*>(read_gs_ptr(offsetof(Processor, m_current_thread))); }
|
||||||
static void set_current_thread(SchedulerQueue::Node* thread) { write_gs_ptr(offsetof(Processor, m_current_thread), thread); }
|
static void set_current_thread(SchedulerQueue::Node* thread) { write_gs_ptr(offsetof(Processor, m_current_thread), thread); }
|
||||||
|
|
||||||
|
static void enter_interrupt(InterruptStack*, InterruptRegisters*);
|
||||||
|
static void leave_interrupt();
|
||||||
|
static InterruptStack& get_interrupt_stack();
|
||||||
|
static InterruptRegisters& get_interrupt_registers();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Processor() = default;
|
Processor() = default;
|
||||||
~Processor() { ASSERT_NOT_REACHED(); }
|
~Processor() { ASSERT_NOT_REACHED(); }
|
||||||
|
@ -75,16 +81,16 @@ namespace Kernel
|
||||||
template<typename T>
|
template<typename T>
|
||||||
static T read_gs_sized(uintptr_t offset) requires(sizeof(T) <= 8)
|
static T read_gs_sized(uintptr_t offset) requires(sizeof(T) <= 8)
|
||||||
{
|
{
|
||||||
#define __ASM_INPUT(operation) operation " %%gs:%a[offset], %[result]" : [result]"=r"(result) : [offset]"ir"(offset)
|
#define __ASM_INPUT(operation) asm volatile(operation " %%gs:%a[offset], %[result]" : [result]"=r"(result) : [offset]"ir"(offset))
|
||||||
T result;
|
T result;
|
||||||
if constexpr(sizeof(T) == 8)
|
if constexpr(sizeof(T) == 8)
|
||||||
asm volatile(__ASM_INPUT("movq"));
|
__ASM_INPUT("movq");
|
||||||
if constexpr(sizeof(T) == 4)
|
if constexpr(sizeof(T) == 4)
|
||||||
asm volatile(__ASM_INPUT("movl"));
|
__ASM_INPUT("movl");
|
||||||
if constexpr(sizeof(T) == 2)
|
if constexpr(sizeof(T) == 2)
|
||||||
asm volatile(__ASM_INPUT("movw"));
|
__ASM_INPUT("movw");
|
||||||
if constexpr(sizeof(T) == 1)
|
if constexpr(sizeof(T) == 1)
|
||||||
asm volatile(__ASM_INPUT("movb"));
|
__ASM_INPUT("movb");
|
||||||
return result;
|
return result;
|
||||||
#undef __ASM_INPUT
|
#undef __ASM_INPUT
|
||||||
}
|
}
|
||||||
|
@ -92,15 +98,15 @@ namespace Kernel
|
||||||
template<typename T>
|
template<typename T>
|
||||||
static void write_gs_sized(uintptr_t offset, T value) requires(sizeof(T) <= 8)
|
static void write_gs_sized(uintptr_t offset, T value) requires(sizeof(T) <= 8)
|
||||||
{
|
{
|
||||||
#define __ASM_INPUT(operation) operation " %[value], %%gs:%a[offset]" :: [value]"r"(value), [offset]"ir"(offset) : "memory"
|
#define __ASM_INPUT(operation) asm volatile(operation " %[value], %%gs:%a[offset]" :: [value]"r"(value), [offset]"ir"(offset) : "memory")
|
||||||
if constexpr(sizeof(T) == 8)
|
if constexpr(sizeof(T) == 8)
|
||||||
asm volatile(__ASM_INPUT("movq"));
|
__ASM_INPUT("movq");
|
||||||
if constexpr(sizeof(T) == 4)
|
if constexpr(sizeof(T) == 4)
|
||||||
asm volatile(__ASM_INPUT("movl"));
|
__ASM_INPUT("movl");
|
||||||
if constexpr(sizeof(T) == 2)
|
if constexpr(sizeof(T) == 2)
|
||||||
asm volatile(__ASM_INPUT("movw"));
|
__ASM_INPUT("movw");
|
||||||
if constexpr(sizeof(T) == 1)
|
if constexpr(sizeof(T) == 1)
|
||||||
asm volatile(__ASM_INPUT("movb"));
|
__ASM_INPUT("movb");
|
||||||
#undef __ASM_INPUT
|
#undef __ASM_INPUT
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -121,6 +127,9 @@ namespace Kernel
|
||||||
Thread* m_idle_thread { nullptr };
|
Thread* m_idle_thread { nullptr };
|
||||||
SchedulerQueue::Node* m_current_thread { nullptr };
|
SchedulerQueue::Node* m_current_thread { nullptr };
|
||||||
|
|
||||||
|
InterruptStack* m_interrupt_stack { nullptr };
|
||||||
|
InterruptRegisters* m_interrupt_registers { nullptr };
|
||||||
|
|
||||||
void* m_current_page_table { nullptr };
|
void* m_current_page_table { nullptr };
|
||||||
|
|
||||||
friend class BAN::Array<Processor, 0xFF>;
|
friend class BAN::Array<Processor, 0xFF>;
|
||||||
|
|
|
@ -16,8 +16,10 @@ namespace Kernel
|
||||||
|
|
||||||
[[noreturn]] void start();
|
[[noreturn]] void start();
|
||||||
|
|
||||||
|
void yield();
|
||||||
|
|
||||||
void timer_reschedule();
|
void timer_reschedule();
|
||||||
void reschedule();
|
void irq_reschedule();
|
||||||
void reschedule_if_idling();
|
void reschedule_if_idling();
|
||||||
|
|
||||||
void set_current_thread_sleeping(uint64_t wake_time);
|
void set_current_thread_sleeping(uint64_t wake_time);
|
||||||
|
@ -30,9 +32,6 @@ namespace Kernel
|
||||||
Thread& current_thread();
|
Thread& current_thread();
|
||||||
static pid_t current_tid();
|
static pid_t current_tid();
|
||||||
|
|
||||||
[[noreturn]] void execute_current_thread();
|
|
||||||
[[noreturn]] void delete_current_process_and_thread();
|
|
||||||
|
|
||||||
// This is no return if called on current thread
|
// This is no return if called on current thread
|
||||||
void terminate_thread(Thread*);
|
void terminate_thread(Thread*);
|
||||||
|
|
||||||
|
@ -41,11 +40,7 @@ namespace Kernel
|
||||||
|
|
||||||
void set_current_thread_sleeping_impl(Semaphore* semaphore, uint64_t wake_time);
|
void set_current_thread_sleeping_impl(Semaphore* semaphore, uint64_t wake_time);
|
||||||
|
|
||||||
[[nodiscard]] bool save_current_thread();
|
void setup_next_thread();
|
||||||
void advance_current_thread();
|
|
||||||
|
|
||||||
[[noreturn]] void execute_current_thread_locked();
|
|
||||||
[[noreturn]] void execute_current_thread_stack_loaded();
|
|
||||||
|
|
||||||
BAN::ErrorOr<void> add_thread(Thread*);
|
BAN::ErrorOr<void> add_thread(Thread*);
|
||||||
|
|
||||||
|
|
|
@ -26,6 +26,7 @@ namespace Kernel
|
||||||
Thread* thread;
|
Thread* thread;
|
||||||
uint64_t wake_time { 0 };
|
uint64_t wake_time { 0 };
|
||||||
Semaphore* semaphore { nullptr };
|
Semaphore* semaphore { nullptr };
|
||||||
|
bool should_block { false };
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Node* next { nullptr };
|
Node* next { nullptr };
|
||||||
|
|
|
@ -31,11 +31,13 @@ namespace Kernel
|
||||||
uint32_t m_cq_head { 0 };
|
uint32_t m_cq_head { 0 };
|
||||||
uint16_t m_cq_valid_phase { 1 };
|
uint16_t m_cq_valid_phase { 1 };
|
||||||
|
|
||||||
Semaphore m_semaphore;
|
Semaphore m_semaphore;
|
||||||
SpinLock m_lock;
|
SpinLock m_lock;
|
||||||
BAN::Atomic<uint64_t> m_used_mask { 0 };
|
BAN::Atomic<size_t> m_used_mask { 0 };
|
||||||
BAN::Atomic<uint64_t> m_done_mask { 0 };
|
BAN::Atomic<size_t> m_done_mask { 0 };
|
||||||
volatile uint16_t m_status_codes[64] { };
|
volatile uint16_t m_status_codes[64] { };
|
||||||
|
|
||||||
|
static constexpr size_t m_mask_bits = sizeof(size_t) * 8;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
#include <BAN/RefPtr.h>
|
#include <BAN/RefPtr.h>
|
||||||
#include <BAN/UniqPtr.h>
|
#include <BAN/UniqPtr.h>
|
||||||
#include <kernel/Memory/VirtualRange.h>
|
#include <kernel/Memory/VirtualRange.h>
|
||||||
|
#include <kernel/InterruptStack.h>
|
||||||
|
|
||||||
#include <signal.h>
|
#include <signal.h>
|
||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
|
@ -25,7 +26,7 @@ namespace Kernel
|
||||||
{
|
{
|
||||||
NotStarted,
|
NotStarted,
|
||||||
Executing,
|
Executing,
|
||||||
Terminated
|
Terminated,
|
||||||
};
|
};
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
@ -33,7 +34,7 @@ namespace Kernel
|
||||||
static BAN::ErrorOr<Thread*> create_userspace(Process*);
|
static BAN::ErrorOr<Thread*> create_userspace(Process*);
|
||||||
~Thread();
|
~Thread();
|
||||||
|
|
||||||
BAN::ErrorOr<Thread*> clone(Process*, uintptr_t rsp, uintptr_t rip);
|
BAN::ErrorOr<Thread*> clone(Process*, uintptr_t sp, uintptr_t ip);
|
||||||
void setup_exec();
|
void setup_exec();
|
||||||
void setup_process_cleanup();
|
void setup_process_cleanup();
|
||||||
|
|
||||||
|
@ -52,40 +53,33 @@ namespace Kernel
|
||||||
BAN::ErrorOr<void> block_or_eintr_or_timeout(Semaphore& semaphore, uint64_t timeout_ms, bool etimedout);
|
BAN::ErrorOr<void> block_or_eintr_or_timeout(Semaphore& semaphore, uint64_t timeout_ms, bool etimedout);
|
||||||
BAN::ErrorOr<void> block_or_eintr_or_waketime(Semaphore& semaphore, uint64_t wake_time_ms, bool etimedout);
|
BAN::ErrorOr<void> block_or_eintr_or_waketime(Semaphore& semaphore, uint64_t wake_time_ms, bool etimedout);
|
||||||
|
|
||||||
void set_return_rsp(uintptr_t& rsp) { m_return_rsp = &rsp; }
|
|
||||||
void set_return_rip(uintptr_t& rip) { m_return_rip = &rip; }
|
|
||||||
uintptr_t return_rsp() { ASSERT(m_return_rsp); return *m_return_rsp; }
|
|
||||||
uintptr_t return_rip() { ASSERT(m_return_rip); return *m_return_rip; }
|
|
||||||
|
|
||||||
pid_t tid() const { return m_tid; }
|
pid_t tid() const { return m_tid; }
|
||||||
|
|
||||||
void set_rsp(uintptr_t rsp) { m_rsp = rsp; validate_stack(); }
|
|
||||||
void set_rip(uintptr_t rip) { m_rip = rip; }
|
|
||||||
uintptr_t rsp() const { return m_rsp; }
|
|
||||||
uintptr_t rip() const { return m_rip; }
|
|
||||||
|
|
||||||
void set_started() { ASSERT(m_state == State::NotStarted); m_state = State::Executing; }
|
|
||||||
State state() const { return m_state; }
|
State state() const { return m_state; }
|
||||||
|
|
||||||
vaddr_t stack_base() const { return m_stack->vaddr(); }
|
vaddr_t kernel_stack_bottom() const { return m_kernel_stack->vaddr(); }
|
||||||
size_t stack_size() const { return m_stack->size(); }
|
vaddr_t kernel_stack_top() const { return m_kernel_stack->vaddr() + m_kernel_stack->size(); }
|
||||||
VirtualRange& stack() { return *m_stack; }
|
VirtualRange& kernel_stack() { return *m_kernel_stack; }
|
||||||
VirtualRange& interrupt_stack() { return *m_interrupt_stack; }
|
|
||||||
|
|
||||||
vaddr_t interrupt_stack_base() const { return m_interrupt_stack ? m_interrupt_stack->vaddr() : 0; }
|
vaddr_t userspace_stack_bottom() const { return is_userspace() ? m_userspace_stack->vaddr() : 0; }
|
||||||
size_t interrupt_stack_size() const { return m_interrupt_stack ? m_interrupt_stack->size() : 0; }
|
vaddr_t userspace_stack_top() const { return is_userspace() ? m_userspace_stack->vaddr() + m_userspace_stack->size() : 0; }
|
||||||
|
VirtualRange& userspace_stack() { ASSERT(is_userspace()); return *m_userspace_stack; }
|
||||||
|
|
||||||
static Thread& current();
|
static Thread& current();
|
||||||
static pid_t current_tid();
|
static pid_t current_tid();
|
||||||
|
|
||||||
Process& process();
|
Process& process();
|
||||||
|
const Process& process() const;
|
||||||
bool has_process() const { return m_process; }
|
bool has_process() const { return m_process; }
|
||||||
|
|
||||||
bool is_userspace() const { return m_is_userspace; }
|
bool is_userspace() const { return m_is_userspace; }
|
||||||
|
|
||||||
size_t virtual_page_count() const { return m_stack->size() / PAGE_SIZE; }
|
size_t virtual_page_count() const { return (m_kernel_stack->size() / PAGE_SIZE) + (m_userspace_stack->size() / PAGE_SIZE); }
|
||||||
size_t physical_page_count() const { return virtual_page_count(); }
|
size_t physical_page_count() const { return virtual_page_count(); }
|
||||||
|
|
||||||
|
InterruptStack& interrupt_stack() { return m_interrupt_stack; }
|
||||||
|
InterruptRegisters& interrupt_registers() { return m_interrupt_registers; }
|
||||||
|
|
||||||
#if __enable_sse
|
#if __enable_sse
|
||||||
void save_sse();
|
void save_sse();
|
||||||
void load_sse();
|
void load_sse();
|
||||||
|
@ -94,25 +88,23 @@ namespace Kernel
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Thread(pid_t tid, Process*);
|
Thread(pid_t tid, Process*);
|
||||||
void on_exit();
|
|
||||||
|
|
||||||
void validate_stack() const;
|
static void on_exit_trampoline(Thread*);
|
||||||
|
void on_exit();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static constexpr size_t m_kernel_stack_size = PAGE_SIZE * 4;
|
static constexpr size_t m_kernel_stack_size = PAGE_SIZE * 4;
|
||||||
static constexpr size_t m_userspace_stack_size = PAGE_SIZE * 2;
|
static constexpr size_t m_userspace_stack_size = PAGE_SIZE * 4;
|
||||||
static constexpr size_t m_interrupt_stack_size = PAGE_SIZE * 2;
|
BAN::UniqPtr<VirtualRange> m_kernel_stack;
|
||||||
BAN::UniqPtr<VirtualRange> m_interrupt_stack;
|
BAN::UniqPtr<VirtualRange> m_userspace_stack;
|
||||||
BAN::UniqPtr<VirtualRange> m_stack;
|
|
||||||
uintptr_t m_rip { 0 };
|
|
||||||
uintptr_t m_rsp { 0 };
|
|
||||||
const pid_t m_tid { 0 };
|
const pid_t m_tid { 0 };
|
||||||
State m_state { State::NotStarted };
|
State m_state { State::NotStarted };
|
||||||
Process* m_process { nullptr };
|
Process* m_process { nullptr };
|
||||||
bool m_is_userspace { false };
|
bool m_is_userspace { false };
|
||||||
|
bool m_delete_process { false };
|
||||||
|
|
||||||
uintptr_t* m_return_rsp { nullptr };
|
InterruptStack m_interrupt_stack { };
|
||||||
uintptr_t* m_return_rip { nullptr };
|
InterruptRegisters m_interrupt_registers { };
|
||||||
|
|
||||||
uint64_t m_signal_pending_mask { 0 };
|
uint64_t m_signal_pending_mask { 0 };
|
||||||
uint64_t m_signal_block_mask { 0 };
|
uint64_t m_signal_block_mask { 0 };
|
||||||
|
@ -123,6 +115,7 @@ namespace Kernel
|
||||||
alignas(16) uint8_t m_sse_storage[512] {};
|
alignas(16) uint8_t m_sse_storage[512] {};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
friend class Process;
|
||||||
friend class Scheduler;
|
friend class Scheduler;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -31,7 +31,9 @@ namespace Kernel
|
||||||
if (s_instance == nullptr)
|
if (s_instance == nullptr)
|
||||||
return BAN::Error::from_errno(ENOMEM);
|
return BAN::Error::from_errno(ENOMEM);
|
||||||
TRY(s_instance->initialize_impl());
|
TRY(s_instance->initialize_impl());
|
||||||
|
#if ARCH(x86_64)
|
||||||
lai_create_namespace();
|
lai_create_namespace();
|
||||||
|
#endif
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -93,7 +95,9 @@ namespace Kernel
|
||||||
const RSDP* rsdp = locate_rsdp();
|
const RSDP* rsdp = locate_rsdp();
|
||||||
if (rsdp == nullptr)
|
if (rsdp == nullptr)
|
||||||
return BAN::Error::from_error_code(ErrorCode::ACPI_NoRootSDT);
|
return BAN::Error::from_error_code(ErrorCode::ACPI_NoRootSDT);
|
||||||
|
#if ARCH(x86_64)
|
||||||
lai_set_acpi_revision(rsdp->revision);
|
lai_set_acpi_revision(rsdp->revision);
|
||||||
|
#endif
|
||||||
|
|
||||||
uint32_t root_entry_count = 0;
|
uint32_t root_entry_count = 0;
|
||||||
|
|
||||||
|
|
|
@ -21,8 +21,8 @@ namespace Debug
|
||||||
|
|
||||||
struct stackframe
|
struct stackframe
|
||||||
{
|
{
|
||||||
stackframe* rbp;
|
stackframe* bp;
|
||||||
uintptr_t rip;
|
uintptr_t ip;
|
||||||
};
|
};
|
||||||
|
|
||||||
SpinLockGuard _(s_debug_lock);
|
SpinLockGuard _(s_debug_lock);
|
||||||
|
@ -33,8 +33,8 @@ namespace Debug
|
||||||
dprintln("Could not get frame address");
|
dprintln("Could not get frame address");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
uintptr_t first_rip = frame->rip;
|
uintptr_t first_ip = frame->ip;
|
||||||
uintptr_t last_rip = 0;
|
uintptr_t last_ip = 0;
|
||||||
bool first = true;
|
bool first = true;
|
||||||
|
|
||||||
BAN::Formatter::print(Debug::putchar, "\e[36mStack trace:\r\n");
|
BAN::Formatter::print(Debug::putchar, "\e[36mStack trace:\r\n");
|
||||||
|
@ -46,21 +46,21 @@ namespace Debug
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
BAN::Formatter::print(Debug::putchar, " {}\r\n", (void*)frame->rip);
|
BAN::Formatter::print(Debug::putchar, " {}\r\n", (void*)frame->ip);
|
||||||
|
|
||||||
if (!first && frame->rip == first_rip)
|
if (!first && frame->ip == first_ip)
|
||||||
{
|
{
|
||||||
derrorln("looping kernel panic :(");
|
derrorln("looping kernel panic :(");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
else if (!first && frame->rip == last_rip)
|
else if (!first && frame->ip == last_ip)
|
||||||
{
|
{
|
||||||
derrorln("repeating stack trace");
|
derrorln("repeating stack trace");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
last_rip = frame->rip;
|
last_ip = frame->ip;
|
||||||
frame = frame->rbp;
|
frame = frame->bp;
|
||||||
first = false;
|
first = false;
|
||||||
}
|
}
|
||||||
BAN::Formatter::print(Debug::putchar, "\e[m");
|
BAN::Formatter::print(Debug::putchar, "\e[m");
|
||||||
|
|
|
@ -240,7 +240,7 @@ namespace Kernel
|
||||||
return BAN::Error::from_errno(ENOMEM);
|
return BAN::Error::from_errno(ENOMEM);
|
||||||
auto region = BAN::UniqPtr<FramebufferMemoryRegion>::adopt(region_ptr);
|
auto region = BAN::UniqPtr<FramebufferMemoryRegion>::adopt(region_ptr);
|
||||||
|
|
||||||
TRY(region->initialize({ m_vaddr, m_vaddr + BAN::Math::div_round_up(m_size, PAGE_SIZE) * PAGE_SIZE }));
|
TRY(region->initialize({ m_vaddr, m_vaddr + BAN::Math::div_round_up<uintptr_t>(m_size, PAGE_SIZE) * PAGE_SIZE }));
|
||||||
|
|
||||||
return BAN::UniqPtr<MemoryRegion>(BAN::move(region));
|
return BAN::UniqPtr<MemoryRegion>(BAN::move(region));
|
||||||
}
|
}
|
||||||
|
|
|
@ -108,10 +108,10 @@ namespace Kernel
|
||||||
ASSERT(!mode().ifdir());
|
ASSERT(!mode().ifdir());
|
||||||
ASSERT(offset >= 0);
|
ASSERT(offset >= 0);
|
||||||
|
|
||||||
if (offset >= UINT32_MAX || buffer.size() >= UINT32_MAX || buffer.size() >= (size_t)(UINT32_MAX - offset))
|
if (static_cast<BAN::make_unsigned_t<decltype(offset)>>(offset) >= UINT32_MAX || buffer.size() >= UINT32_MAX || buffer.size() >= (size_t)(UINT32_MAX - offset))
|
||||||
return BAN::Error::from_errno(EOVERFLOW);
|
return BAN::Error::from_errno(EOVERFLOW);
|
||||||
|
|
||||||
if (offset >= m_inode.size)
|
if (static_cast<BAN::make_unsigned_t<decltype(offset)>>(offset) >= m_inode.size)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
uint32_t count = buffer.size();
|
uint32_t count = buffer.size();
|
||||||
|
@ -152,7 +152,7 @@ namespace Kernel
|
||||||
ASSERT(!mode().ifdir());
|
ASSERT(!mode().ifdir());
|
||||||
ASSERT(offset >= 0);
|
ASSERT(offset >= 0);
|
||||||
|
|
||||||
if (offset >= UINT32_MAX || buffer.size() >= UINT32_MAX || buffer.size() >= (size_t)(UINT32_MAX - offset))
|
if (static_cast<BAN::make_unsigned_t<decltype(offset)>>(offset) >= UINT32_MAX || buffer.size() >= UINT32_MAX || buffer.size() >= (size_t)(UINT32_MAX - offset))
|
||||||
return BAN::Error::from_errno(EOVERFLOW);
|
return BAN::Error::from_errno(EOVERFLOW);
|
||||||
|
|
||||||
if (m_inode.size < offset + buffer.size())
|
if (m_inode.size < offset + buffer.size())
|
||||||
|
@ -304,7 +304,7 @@ done:
|
||||||
ASSERT(mode().ifdir());
|
ASSERT(mode().ifdir());
|
||||||
ASSERT(offset >= 0);
|
ASSERT(offset >= 0);
|
||||||
|
|
||||||
if (offset >= max_used_data_block_count())
|
if (static_cast<BAN::make_unsigned_t<decltype(offset)>>(offset) >= max_used_data_block_count())
|
||||||
{
|
{
|
||||||
list->entry_count = 0;
|
list->entry_count = 0;
|
||||||
return {};
|
return {};
|
||||||
|
|
|
@ -0,0 +1,69 @@
|
||||||
|
#include <kernel/GDT.h>
|
||||||
|
#include <kernel/Processor.h>
|
||||||
|
|
||||||
|
#include <string.h>
|
||||||
|
|
||||||
|
namespace Kernel
|
||||||
|
{
|
||||||
|
|
||||||
|
GDT* GDT::create([[maybe_unused]] void* processor)
|
||||||
|
{
|
||||||
|
auto* gdt = new GDT();
|
||||||
|
ASSERT(gdt);
|
||||||
|
|
||||||
|
#if ARCH(x86_64)
|
||||||
|
constexpr uint8_t code_flags = 0xA;
|
||||||
|
constexpr uint8_t data_flags = 0xC;
|
||||||
|
#elif ARCH(i686)
|
||||||
|
constexpr uint8_t code_flags = 0xC;
|
||||||
|
constexpr uint8_t data_flags = 0xC;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
gdt->write_entry(0x00, 0x00000000, 0x00000, 0x00, 0x0); // null
|
||||||
|
gdt->write_entry(0x08, 0x00000000, 0xFFFFF, 0x9A, code_flags); // kernel code
|
||||||
|
gdt->write_entry(0x10, 0x00000000, 0xFFFFF, 0x92, data_flags); // kernel data
|
||||||
|
gdt->write_entry(0x18, 0x00000000, 0xFFFFF, 0xFA, code_flags); // user code
|
||||||
|
gdt->write_entry(0x20, 0x00000000, 0xFFFFF, 0xF2, data_flags); // user data
|
||||||
|
#if ARCH(i686)
|
||||||
|
gdt->write_entry(0x28, reinterpret_cast<uint32_t>(processor), sizeof(Processor), 0x92, 0x4); // processor data
|
||||||
|
#endif
|
||||||
|
gdt->write_tss();
|
||||||
|
|
||||||
|
return gdt;
|
||||||
|
}
|
||||||
|
|
||||||
|
void GDT::write_entry(uint8_t offset, uint32_t base, uint32_t limit, uint8_t access, uint8_t flags)
|
||||||
|
{
|
||||||
|
ASSERT(offset % sizeof(SegmentDescriptor) == 0);
|
||||||
|
uint8_t idx = offset / sizeof(SegmentDescriptor);
|
||||||
|
|
||||||
|
auto& desc = m_gdt[idx];
|
||||||
|
desc.base1 = (base >> 0) & 0xFFFF;
|
||||||
|
desc.base2 = (base >> 16) & 0xFF;
|
||||||
|
desc.base3 = (base >> 24) & 0xFF;
|
||||||
|
|
||||||
|
desc.limit1 = (limit >> 0) & 0xFFFF;
|
||||||
|
desc.limit2 = (limit >> 16) & 0x0F;
|
||||||
|
|
||||||
|
desc.access = access & 0xFF;
|
||||||
|
|
||||||
|
desc.flags = flags & 0x0F;
|
||||||
|
}
|
||||||
|
|
||||||
|
void GDT::write_tss()
|
||||||
|
{
|
||||||
|
memset(&m_tss, 0x00, sizeof(TaskStateSegment));
|
||||||
|
m_tss.iopb = sizeof(TaskStateSegment);
|
||||||
|
|
||||||
|
uintptr_t base = reinterpret_cast<uintptr_t>(&m_tss);
|
||||||
|
|
||||||
|
write_entry(m_tss_offset, (uint32_t)base, sizeof(TaskStateSegment), 0x89, 0x0);
|
||||||
|
|
||||||
|
#if ARCH(x86_64)
|
||||||
|
auto& desc = m_gdt[(m_tss_offset + 8) / sizeof(SegmentDescriptor)];
|
||||||
|
desc.low = base >> 32;
|
||||||
|
desc.high = 0;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -10,16 +10,14 @@
|
||||||
#include <kernel/Timer/PIT.h>
|
#include <kernel/Timer/PIT.h>
|
||||||
|
|
||||||
#define ISR_LIST_X X(0) X(1) X(2) X(3) X(4) X(5) X(6) X(7) X(8) X(9) X(10) X(11) X(12) X(13) X(14) X(15) X(16) X(17) X(18) X(19) X(20) X(21) X(22) X(23) X(24) X(25) X(26) X(27) X(28) X(29) X(30) X(31)
|
#define ISR_LIST_X X(0) X(1) X(2) X(3) X(4) X(5) X(6) X(7) X(8) X(9) X(10) X(11) X(12) X(13) X(14) X(15) X(16) X(17) X(18) X(19) X(20) X(21) X(22) X(23) X(24) X(25) X(26) X(27) X(28) X(29) X(30) X(31)
|
||||||
#define IRQ_LIST_X X(0) X(1) X(2) X(3) X(4) X(5) X(6) X(7) X(8) X(9) X(10) X(11) X(12) X(13) X(14) X(15) X(16) X(17) X(18) X(19) X(20) X(21) X(22) X(23) X(24) X(25) X(26) X(27) X(28) X(29) X(30) X(31) X(32)
|
#define IRQ_LIST_X X(0) X(1) X(2) X(3) X(4) X(5) X(6) X(7) X(8) X(9) X(10) X(11) X(12) X(13) X(14) X(15) X(16) X(17) X(18) X(19) X(20) X(21) X(22) X(23) X(24) X(25) X(26) X(27) X(28) X(29) X(30) X(31)
|
||||||
|
|
||||||
namespace Kernel
|
namespace Kernel
|
||||||
{
|
{
|
||||||
|
|
||||||
|
#if ARCH(x86_64)
|
||||||
struct Registers
|
struct Registers
|
||||||
{
|
{
|
||||||
uint64_t rsp;
|
|
||||||
uint64_t rip;
|
|
||||||
uint64_t rflags;
|
|
||||||
uint64_t cr4;
|
uint64_t cr4;
|
||||||
uint64_t cr3;
|
uint64_t cr3;
|
||||||
uint64_t cr2;
|
uint64_t cr2;
|
||||||
|
@ -33,14 +31,33 @@ namespace Kernel
|
||||||
uint64_t r10;
|
uint64_t r10;
|
||||||
uint64_t r9;
|
uint64_t r9;
|
||||||
uint64_t r8;
|
uint64_t r8;
|
||||||
uint64_t rsi;
|
|
||||||
uint64_t rdi;
|
uint64_t rdi;
|
||||||
|
uint64_t rsi;
|
||||||
uint64_t rbp;
|
uint64_t rbp;
|
||||||
|
uint64_t rbx;
|
||||||
uint64_t rdx;
|
uint64_t rdx;
|
||||||
uint64_t rcx;
|
uint64_t rcx;
|
||||||
uint64_t rbx;
|
|
||||||
uint64_t rax;
|
uint64_t rax;
|
||||||
};
|
};
|
||||||
|
#elif ARCH(i686)
|
||||||
|
struct Registers
|
||||||
|
{
|
||||||
|
uint32_t cr4;
|
||||||
|
uint32_t cr3;
|
||||||
|
uint32_t cr2;
|
||||||
|
uint32_t cr0;
|
||||||
|
|
||||||
|
uint32_t edi;
|
||||||
|
uint32_t esi;
|
||||||
|
uint32_t ebp;
|
||||||
|
uint32_t unused;
|
||||||
|
uint32_t ebx;
|
||||||
|
uint32_t edx;
|
||||||
|
uint32_t ecx;
|
||||||
|
uint32_t eax;
|
||||||
|
};
|
||||||
|
#endif
|
||||||
|
|
||||||
#define X(num) 1 +
|
#define X(num) 1 +
|
||||||
static BAN::Array<Interruptable*, IRQ_LIST_X 0> s_interruptables;
|
static BAN::Array<Interruptable*, IRQ_LIST_X 0> s_interruptables;
|
||||||
|
@ -141,46 +158,37 @@ namespace Kernel
|
||||||
"Unkown Exception 0x1F",
|
"Unkown Exception 0x1F",
|
||||||
};
|
};
|
||||||
|
|
||||||
extern "C" void cpp_isr_handler(uint64_t isr, uint64_t error, InterruptStack& interrupt_stack, const Registers* regs)
|
extern "C" void cpp_isr_handler(uint32_t isr, uint32_t error, InterruptStack* interrupt_stack, const Registers* regs)
|
||||||
{
|
{
|
||||||
if (g_paniced)
|
if (g_paniced)
|
||||||
{
|
{
|
||||||
dprintln("Processor {} halted", Processor::current_id());
|
dprintln("Processor {} halted", Processor::current_id());
|
||||||
InterruptController::get().broadcast_ipi();
|
if (InterruptController::is_initialized())
|
||||||
|
InterruptController::get().broadcast_ipi();
|
||||||
asm volatile("cli; 1: hlt; jmp 1b");
|
asm volatile("cli; 1: hlt; jmp 1b");
|
||||||
}
|
}
|
||||||
|
|
||||||
#if __enable_sse
|
|
||||||
bool from_userspace = (interrupt_stack.cs & 0b11) == 0b11;
|
|
||||||
if (from_userspace)
|
|
||||||
Thread::current().save_sse();
|
|
||||||
#endif
|
|
||||||
|
|
||||||
pid_t tid = Scheduler::current_tid();
|
pid_t tid = Scheduler::current_tid();
|
||||||
pid_t pid = tid ? Process::current().pid() : 0;
|
pid_t pid = tid ? Process::current().pid() : 0;
|
||||||
|
|
||||||
if (tid)
|
if (tid)
|
||||||
{
|
{
|
||||||
Thread::current().set_return_rsp(interrupt_stack.rsp);
|
|
||||||
Thread::current().set_return_rip(interrupt_stack.rip);
|
|
||||||
|
|
||||||
if (isr == ISR::PageFault)
|
if (isr == ISR::PageFault)
|
||||||
{
|
{
|
||||||
// Check if stack is OOB
|
// Check if stack is OOB
|
||||||
auto& stack = Thread::current().stack();
|
auto& thread = Thread::current();
|
||||||
auto& istack = Thread::current().interrupt_stack();
|
if (thread.userspace_stack_bottom() < interrupt_stack->sp && interrupt_stack->sp <= thread.userspace_stack_top())
|
||||||
if (stack.vaddr() < interrupt_stack.rsp && interrupt_stack.rsp <= stack.vaddr() + stack.size())
|
; // using userspace stack
|
||||||
; // using normal stack
|
else if (thread.kernel_stack_bottom() < interrupt_stack->sp && interrupt_stack->sp <= thread.kernel_stack_top())
|
||||||
else if (istack.vaddr() < interrupt_stack.rsp && interrupt_stack.rsp <= istack.vaddr() + istack.size())
|
; // using kernel stack
|
||||||
; // using interrupt stack
|
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
derrorln("Stack pointer out of bounds!");
|
derrorln("Stack pointer out of bounds!");
|
||||||
derrorln("rip {H}", interrupt_stack.rip);
|
derrorln("rip {H}", interrupt_stack->ip);
|
||||||
derrorln("rsp {H}, stack {H}->{H}, istack {H}->{H}",
|
derrorln("rsp {H}, userspace stack {H}->{H}, kernel stack {H}->{H}",
|
||||||
interrupt_stack.rsp,
|
interrupt_stack->sp,
|
||||||
stack.vaddr(), stack.vaddr() + stack.size(),
|
thread.userspace_stack_bottom(), thread.userspace_stack_top(),
|
||||||
istack.vaddr(), istack.vaddr() + istack.size()
|
thread.kernel_stack_bottom(), thread.kernel_stack_top()
|
||||||
);
|
);
|
||||||
Thread::current().handle_signal(SIGKILL);
|
Thread::current().handle_signal(SIGKILL);
|
||||||
goto done;
|
goto done;
|
||||||
|
@ -191,9 +199,9 @@ namespace Kernel
|
||||||
page_fault_error.raw = error;
|
page_fault_error.raw = error;
|
||||||
if (!page_fault_error.present)
|
if (!page_fault_error.present)
|
||||||
{
|
{
|
||||||
asm volatile("sti");
|
Processor::set_interrupt_state(InterruptState::Enabled);
|
||||||
auto result = Process::current().allocate_page_for_demand_paging(regs->cr2);
|
auto result = Process::current().allocate_page_for_demand_paging(regs->cr2);
|
||||||
asm volatile("cli");
|
Processor::set_interrupt_state(InterruptState::Disabled);
|
||||||
|
|
||||||
if (!result.is_error() && result.value())
|
if (!result.is_error() && result.value())
|
||||||
goto done;
|
goto done;
|
||||||
|
@ -209,11 +217,19 @@ namespace Kernel
|
||||||
#if __enable_sse
|
#if __enable_sse
|
||||||
else if (isr == ISR::DeviceNotAvailable)
|
else if (isr == ISR::DeviceNotAvailable)
|
||||||
{
|
{
|
||||||
|
#if ARCH(x86_64)
|
||||||
asm volatile(
|
asm volatile(
|
||||||
"movq %cr0, %rax;"
|
"movq %cr0, %rax;"
|
||||||
"andq $~(1 << 3), %rax;"
|
"andq $~(1 << 3), %rax;"
|
||||||
"movq %rax, %cr0;"
|
"movq %rax, %cr0;"
|
||||||
);
|
);
|
||||||
|
#elif ARCH(i686)
|
||||||
|
asm volatile(
|
||||||
|
"movl %cr0, %eax;"
|
||||||
|
"andl $~(1 << 3), %eax;"
|
||||||
|
"movl %eax, %cr0;"
|
||||||
|
);
|
||||||
|
#endif
|
||||||
if (auto* current = &Thread::current(); current != Thread::sse_thread())
|
if (auto* current = &Thread::current(); current != Thread::sse_thread())
|
||||||
{
|
{
|
||||||
if (auto* sse = Thread::sse_thread())
|
if (auto* sse = Thread::sse_thread())
|
||||||
|
@ -225,9 +241,9 @@ namespace Kernel
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
if (PageTable::current().get_page_flags(interrupt_stack.rip & PAGE_ADDR_MASK) & PageTable::Flags::Present)
|
if (PageTable::current().get_page_flags(interrupt_stack->ip & PAGE_ADDR_MASK) & PageTable::Flags::Present)
|
||||||
{
|
{
|
||||||
auto* machine_code = (const uint8_t*)interrupt_stack.rip;
|
auto* machine_code = (const uint8_t*)interrupt_stack->ip;
|
||||||
dwarnln("While executing: {2H}{2H}{2H}{2H}{2H}{2H}{2H}{2H}",
|
dwarnln("While executing: {2H}{2H}{2H}{2H}{2H}{2H}{2H}{2H}",
|
||||||
machine_code[0],
|
machine_code[0],
|
||||||
machine_code[1],
|
machine_code[1],
|
||||||
|
@ -240,8 +256,9 @@ namespace Kernel
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if ARCH(x86_64)
|
||||||
dwarnln(
|
dwarnln(
|
||||||
"{} (error code: 0x{16H}), pid {}, tid {}\r\n"
|
"{} (error code: 0x{8H}), pid {}, tid {}\r\n"
|
||||||
"Register dump\r\n"
|
"Register dump\r\n"
|
||||||
"rax=0x{16H}, rbx=0x{16H}, rcx=0x{16H}, rdx=0x{16H}\r\n"
|
"rax=0x{16H}, rbx=0x{16H}, rcx=0x{16H}, rdx=0x{16H}\r\n"
|
||||||
"rsp=0x{16H}, rbp=0x{16H}, rdi=0x{16H}, rsi=0x{16H}\r\n"
|
"rsp=0x{16H}, rbp=0x{16H}, rdi=0x{16H}, rsi=0x{16H}\r\n"
|
||||||
|
@ -249,10 +266,25 @@ namespace Kernel
|
||||||
"cr0=0x{16H}, cr2=0x{16H}, cr3=0x{16H}, cr4=0x{16H}",
|
"cr0=0x{16H}, cr2=0x{16H}, cr3=0x{16H}, cr4=0x{16H}",
|
||||||
isr_exceptions[isr], error, pid, tid,
|
isr_exceptions[isr], error, pid, tid,
|
||||||
regs->rax, regs->rbx, regs->rcx, regs->rdx,
|
regs->rax, regs->rbx, regs->rcx, regs->rdx,
|
||||||
regs->rsp, regs->rbp, regs->rdi, regs->rsi,
|
interrupt_stack->sp, regs->rbp, regs->rdi, regs->rsi,
|
||||||
regs->rip, regs->rflags,
|
interrupt_stack->ip, interrupt_stack->flags,
|
||||||
regs->cr0, regs->cr2, regs->cr3, regs->cr4
|
regs->cr0, regs->cr2, regs->cr3, regs->cr4
|
||||||
);
|
);
|
||||||
|
#elif ARCH(i686)
|
||||||
|
dwarnln(
|
||||||
|
"{} (error code: 0x{8H}), pid {}, tid {}\r\n"
|
||||||
|
"Register dump\r\n"
|
||||||
|
"eax=0x{8H}, ebx=0x{8H}, ecx=0x{8H}, edx=0x{8H}\r\n"
|
||||||
|
"esp=0x{8H}, ebp=0x{8H}, edi=0x{8H}, esi=0x{8H}\r\n"
|
||||||
|
"eip=0x{8H}, eflags=0x{8H}\r\n"
|
||||||
|
"cr0=0x{8H}, cr2=0x{8H}, cr3=0x{8H}, cr4=0x{8H}",
|
||||||
|
isr_exceptions[isr], error, pid, tid,
|
||||||
|
regs->eax, regs->ebx, regs->ecx, regs->edx,
|
||||||
|
interrupt_stack->sp, regs->ebp, regs->edi, regs->esi,
|
||||||
|
interrupt_stack->ip, interrupt_stack->flags,
|
||||||
|
regs->cr0, regs->cr2, regs->cr3, regs->cr4
|
||||||
|
);
|
||||||
|
#endif
|
||||||
if (isr == ISR::PageFault)
|
if (isr == ISR::PageFault)
|
||||||
PageTable::current().debug_dump();
|
PageTable::current().debug_dump();
|
||||||
Debug::dump_stack_trace();
|
Debug::dump_stack_trace();
|
||||||
|
@ -297,29 +329,31 @@ done:
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
extern "C" void cpp_irq_handler(uint64_t irq, InterruptStack& interrupt_stack)
|
extern "C" void cpp_reschedule_handler(InterruptStack* interrupt_stack, InterruptRegisters* interrupt_registers)
|
||||||
|
{
|
||||||
|
Processor::enter_interrupt(interrupt_stack, interrupt_registers);
|
||||||
|
Scheduler::get().irq_reschedule();
|
||||||
|
Processor::leave_interrupt();
|
||||||
|
}
|
||||||
|
|
||||||
|
extern "C" void cpp_irq_handler(uint32_t irq)
|
||||||
{
|
{
|
||||||
if (g_paniced)
|
if (g_paniced)
|
||||||
{
|
{
|
||||||
dprintln("Processor {} halted", Processor::current_id());
|
dprintln("Processor {} halted", Processor::current_id());
|
||||||
InterruptController::get().broadcast_ipi();
|
if (InterruptController::is_initialized())
|
||||||
|
InterruptController::get().broadcast_ipi();
|
||||||
asm volatile("cli; 1: hlt; jmp 1b");
|
asm volatile("cli; 1: hlt; jmp 1b");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (Scheduler::current_tid())
|
ASSERT(irq != IRQ_IPI);
|
||||||
{
|
|
||||||
Thread::current().set_return_rsp(interrupt_stack.rsp);
|
|
||||||
Thread::current().set_return_rip(interrupt_stack.rip);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!InterruptController::get().is_in_service(irq))
|
if (!InterruptController::get().is_in_service(irq))
|
||||||
dprintln("spurious irq 0x{2H}", irq);
|
dprintln("spurious irq 0x{2H}", irq);
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
InterruptController::get().eoi(irq);
|
InterruptController::get().eoi(irq);
|
||||||
if (irq == IRQ_IPI)
|
if (auto* handler = s_interruptables[irq])
|
||||||
Scheduler::get().reschedule();
|
|
||||||
else if (auto* handler = s_interruptables[irq])
|
|
||||||
handler->handle_irq();
|
handler->handle_irq();
|
||||||
else
|
else
|
||||||
dprintln("no handler for irq 0x{2H}", irq);
|
dprintln("no handler for irq 0x{2H}", irq);
|
||||||
|
@ -332,14 +366,17 @@ done:
|
||||||
|
|
||||||
void IDT::register_interrupt_handler(uint8_t index, void (*handler)())
|
void IDT::register_interrupt_handler(uint8_t index, void (*handler)())
|
||||||
{
|
{
|
||||||
auto& descriptor = m_idt[index];
|
auto& desc = m_idt[index];
|
||||||
descriptor.offset1 = (uint16_t)((uint64_t)handler >> 0);
|
memset(&desc, 0, sizeof(GateDescriptor));
|
||||||
descriptor.offset2 = (uint16_t)((uint64_t)handler >> 16);
|
|
||||||
descriptor.offset3 = (uint32_t)((uint64_t)handler >> 32);
|
|
||||||
|
|
||||||
descriptor.selector = 0x08;
|
desc.offset0 = (uint16_t)((uintptr_t)handler >> 0);
|
||||||
descriptor.IST = 0;
|
desc.offset1 = (uint16_t)((uintptr_t)handler >> 16);
|
||||||
descriptor.flags = 0x8E;
|
#if ARCH(x86_64)
|
||||||
|
desc.offset2 = (uint32_t)((uintptr_t)handler >> 32);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
desc.selector = 0x08;
|
||||||
|
desc.flags = 0x8E;
|
||||||
}
|
}
|
||||||
|
|
||||||
void IDT::register_syscall_handler(uint8_t index, void (*handler)())
|
void IDT::register_syscall_handler(uint8_t index, void (*handler)())
|
||||||
|
@ -363,6 +400,7 @@ done:
|
||||||
IRQ_LIST_X
|
IRQ_LIST_X
|
||||||
#undef X
|
#undef X
|
||||||
|
|
||||||
|
extern "C" void asm_reschedule_handler();
|
||||||
extern "C" void syscall_asm();
|
extern "C" void syscall_asm();
|
||||||
|
|
||||||
IDT* IDT::create()
|
IDT* IDT::create()
|
||||||
|
@ -380,6 +418,8 @@ done:
|
||||||
IRQ_LIST_X
|
IRQ_LIST_X
|
||||||
#undef X
|
#undef X
|
||||||
|
|
||||||
|
idt->register_interrupt_handler(IRQ_VECTOR_BASE + IRQ_IPI, asm_reschedule_handler);
|
||||||
|
|
||||||
idt->register_syscall_handler(0x80, syscall_asm);
|
idt->register_syscall_handler(0x80, syscall_asm);
|
||||||
|
|
||||||
return idt;
|
return idt;
|
|
@ -40,9 +40,16 @@ namespace Kernel
|
||||||
s_instance->m_using_apic = false;
|
s_instance->m_using_apic = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool InterruptController::is_initialized()
|
||||||
|
{
|
||||||
|
return s_instance;
|
||||||
|
}
|
||||||
|
|
||||||
void InterruptController::enter_acpi_mode()
|
void InterruptController::enter_acpi_mode()
|
||||||
{
|
{
|
||||||
|
#if ARCH(x86_64)
|
||||||
if (lai_enable_acpi(m_using_apic ? 1 : 0) != 0)
|
if (lai_enable_acpi(m_using_apic ? 1 : 0) != 0)
|
||||||
|
#endif
|
||||||
dwarnln("could not enter acpi mode");
|
dwarnln("could not enter acpi mode");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -79,7 +79,7 @@ namespace Kernel
|
||||||
if (it != m_arp_table.end())
|
if (it != m_arp_table.end())
|
||||||
return it->value;
|
return it->value;
|
||||||
}
|
}
|
||||||
Scheduler::get().reschedule();
|
Scheduler::get().yield();
|
||||||
}
|
}
|
||||||
|
|
||||||
return BAN::Error::from_errno(ETIMEDOUT);
|
return BAN::Error::from_errno(ETIMEDOUT);
|
||||||
|
|
|
@ -173,8 +173,9 @@ namespace Kernel
|
||||||
rx_descriptors[i].status = 0;
|
rx_descriptors[i].status = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
write32(REG_RDBAL0, m_rx_descriptor_region->paddr() & 0xFFFFFFFF);
|
uint64_t paddr64 = m_rx_descriptor_region->paddr();
|
||||||
write32(REG_RDBAH0, m_rx_descriptor_region->paddr() >> 32);
|
write32(REG_RDBAL0, paddr64 & 0xFFFFFFFF);
|
||||||
|
write32(REG_RDBAH0, paddr64 >> 32);
|
||||||
write32(REG_RDLEN0, E1000_RX_DESCRIPTOR_COUNT * sizeof(e1000_rx_desc));
|
write32(REG_RDLEN0, E1000_RX_DESCRIPTOR_COUNT * sizeof(e1000_rx_desc));
|
||||||
write32(REG_RDH0, 0);
|
write32(REG_RDH0, 0);
|
||||||
write32(REG_RDT0, E1000_RX_DESCRIPTOR_COUNT - 1);
|
write32(REG_RDT0, E1000_RX_DESCRIPTOR_COUNT - 1);
|
||||||
|
@ -206,8 +207,9 @@ namespace Kernel
|
||||||
tx_descriptors[i].cmd = 0;
|
tx_descriptors[i].cmd = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
write32(REG_TDBAL, m_tx_descriptor_region->paddr() & 0xFFFFFFFF);
|
uint64_t paddr64 = m_tx_descriptor_region->paddr();
|
||||||
write32(REG_TDBAH, m_tx_descriptor_region->paddr() >> 32);
|
write32(REG_TDBAL, paddr64 & 0xFFFFFFFF);
|
||||||
|
write32(REG_TDBAH, paddr64 >> 32);
|
||||||
write32(REG_TDLEN, E1000_TX_DESCRIPTOR_COUNT * sizeof(e1000_tx_desc));
|
write32(REG_TDLEN, E1000_TX_DESCRIPTOR_COUNT * sizeof(e1000_tx_desc));
|
||||||
write32(REG_TDH, 0);
|
write32(REG_TDH, 0);
|
||||||
write32(REG_TDT, 0);
|
write32(REG_TDT, 0);
|
||||||
|
|
|
@ -164,7 +164,7 @@ namespace Kernel
|
||||||
}
|
}
|
||||||
|
|
||||||
while (!connection_info.connection_done)
|
while (!connection_info.connection_done)
|
||||||
Scheduler::get().reschedule();
|
Scheduler::get().yield();
|
||||||
|
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
|
@ -127,9 +127,6 @@ namespace Kernel
|
||||||
}
|
}
|
||||||
process->m_loadable_elf->reserve_address_space();
|
process->m_loadable_elf->reserve_address_space();
|
||||||
|
|
||||||
process->m_is_userspace = true;
|
|
||||||
process->m_userspace_info.entry = process->m_loadable_elf->entry_point();
|
|
||||||
|
|
||||||
char** argv = nullptr;
|
char** argv = nullptr;
|
||||||
{
|
{
|
||||||
size_t needed_bytes = sizeof(char*) * 2 + path.size() + 1;
|
size_t needed_bytes = sizeof(char*) * 2 + path.size() + 1;
|
||||||
|
@ -155,6 +152,8 @@ namespace Kernel
|
||||||
MUST(process->m_mapped_regions.push_back(BAN::move(argv_region)));
|
MUST(process->m_mapped_regions.push_back(BAN::move(argv_region)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
process->m_is_userspace = true;
|
||||||
|
process->m_userspace_info.entry = process->m_loadable_elf->entry_point();
|
||||||
process->m_userspace_info.argc = 1;
|
process->m_userspace_info.argc = 1;
|
||||||
process->m_userspace_info.argv = argv;
|
process->m_userspace_info.argv = argv;
|
||||||
process->m_userspace_info.envp = nullptr;
|
process->m_userspace_info.envp = nullptr;
|
||||||
|
@ -207,7 +206,7 @@ namespace Kernel
|
||||||
m_exit_status.semaphore.unblock();
|
m_exit_status.semaphore.unblock();
|
||||||
|
|
||||||
while (m_exit_status.waiting > 0)
|
while (m_exit_status.waiting > 0)
|
||||||
Scheduler::get().reschedule();
|
Scheduler::get().yield();
|
||||||
|
|
||||||
m_process_lock.lock();
|
m_process_lock.lock();
|
||||||
|
|
||||||
|
@ -220,7 +219,7 @@ namespace Kernel
|
||||||
|
|
||||||
bool Process::on_thread_exit(Thread& thread)
|
bool Process::on_thread_exit(Thread& thread)
|
||||||
{
|
{
|
||||||
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
|
LockGuard _(m_process_lock);
|
||||||
|
|
||||||
ASSERT(m_threads.size() > 0);
|
ASSERT(m_threads.size() > 0);
|
||||||
|
|
||||||
|
@ -228,8 +227,6 @@ namespace Kernel
|
||||||
{
|
{
|
||||||
ASSERT(m_threads.front() == &thread);
|
ASSERT(m_threads.front() == &thread);
|
||||||
m_threads.clear();
|
m_threads.clear();
|
||||||
|
|
||||||
thread.setup_process_cleanup();
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -248,11 +245,18 @@ namespace Kernel
|
||||||
void Process::exit(int status, int signal)
|
void Process::exit(int status, int signal)
|
||||||
{
|
{
|
||||||
m_exit_status.exit_code = __WGENEXITCODE(status, signal);
|
m_exit_status.exit_code = __WGENEXITCODE(status, signal);
|
||||||
for (auto* thread : m_threads)
|
while (!m_threads.empty())
|
||||||
if (thread != &Thread::current())
|
m_threads.front()->on_exit();
|
||||||
Scheduler::get().terminate_thread(thread);
|
//for (auto* thread : m_threads)
|
||||||
if (this == &Process::current())
|
// if (thread != &Thread::current())
|
||||||
Scheduler::get().terminate_thread(&Thread::current());
|
// Scheduler::get().terminate_thread(thread);
|
||||||
|
//if (this == &Process::current())
|
||||||
|
//{
|
||||||
|
// m_threads.clear();
|
||||||
|
// Processor::set_interrupt_state(InterruptState::Disabled);
|
||||||
|
// Thread::current().setup_process_cleanup();
|
||||||
|
// Scheduler::get().yield();
|
||||||
|
//}
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t Process::proc_meminfo(off_t offset, BAN::ByteSpan buffer) const
|
size_t Process::proc_meminfo(off_t offset, BAN::ByteSpan buffer) const
|
||||||
|
@ -390,7 +394,7 @@ namespace Kernel
|
||||||
return TRY(LibELF::LoadableELF::load_from_inode(page_table, file.inode));
|
return TRY(LibELF::LoadableELF::load_from_inode(page_table, file.inode));
|
||||||
}
|
}
|
||||||
|
|
||||||
BAN::ErrorOr<long> Process::sys_fork(uintptr_t rsp, uintptr_t rip)
|
BAN::ErrorOr<long> Process::sys_fork(uintptr_t sp, uintptr_t ip)
|
||||||
{
|
{
|
||||||
auto page_table = BAN::UniqPtr<PageTable>::adopt(TRY(PageTable::create_userspace()));
|
auto page_table = BAN::UniqPtr<PageTable>::adopt(TRY(PageTable::create_userspace()));
|
||||||
|
|
||||||
|
@ -423,7 +427,7 @@ namespace Kernel
|
||||||
|
|
||||||
ASSERT(this == &Process::current());
|
ASSERT(this == &Process::current());
|
||||||
// FIXME: this should be able to fail
|
// FIXME: this should be able to fail
|
||||||
Thread* thread = MUST(Thread::current().clone(forked, rsp, rip));
|
Thread* thread = MUST(Thread::current().clone(forked, sp, ip));
|
||||||
forked->add_thread(thread);
|
forked->add_thread(thread);
|
||||||
forked->register_to_scheduler();
|
forked->register_to_scheduler();
|
||||||
|
|
||||||
|
@ -533,7 +537,7 @@ namespace Kernel
|
||||||
m_has_called_exec = true;
|
m_has_called_exec = true;
|
||||||
|
|
||||||
m_threads.front()->setup_exec();
|
m_threads.front()->setup_exec();
|
||||||
Scheduler::get().execute_current_thread();
|
Scheduler::get().yield();
|
||||||
ASSERT_NOT_REACHED();
|
ASSERT_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -676,9 +680,9 @@ namespace Kernel
|
||||||
|
|
||||||
LockGuard _(m_process_lock);
|
LockGuard _(m_process_lock);
|
||||||
|
|
||||||
if (Thread::current().stack().contains(address))
|
if (Thread::current().userspace_stack().contains(address))
|
||||||
{
|
{
|
||||||
TRY(Thread::current().stack().allocate_page_for_demand_paging(address));
|
TRY(Thread::current().userspace_stack().allocate_page_for_demand_paging(address));
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1187,7 +1191,9 @@ namespace Kernel
|
||||||
|
|
||||||
[[noreturn]] static void reset_system()
|
[[noreturn]] static void reset_system()
|
||||||
{
|
{
|
||||||
|
#if ARCH(x86_64)
|
||||||
lai_acpi_reset();
|
lai_acpi_reset();
|
||||||
|
#endif
|
||||||
|
|
||||||
// acpi reset did not work
|
// acpi reset did not work
|
||||||
|
|
||||||
|
@ -1206,21 +1212,17 @@ namespace Kernel
|
||||||
|
|
||||||
DevFileSystem::get().initiate_sync(true);
|
DevFileSystem::get().initiate_sync(true);
|
||||||
|
|
||||||
lai_api_error_t error;
|
if (command == POWEROFF_REBOOT)
|
||||||
switch (command)
|
reset_system();
|
||||||
{
|
|
||||||
case POWEROFF_REBOOT:
|
|
||||||
reset_system();
|
|
||||||
break;
|
|
||||||
case POWEROFF_SHUTDOWN:
|
|
||||||
error = lai_enter_sleep(5);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
ASSERT_NOT_REACHED();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
#if ARCH(x86_64)
|
||||||
|
auto error = lai_enter_sleep(5);
|
||||||
// If we reach here, there was an error
|
// If we reach here, there was an error
|
||||||
dprintln("{}", lai_api_error_to_string(error));
|
dprintln("{}", lai_api_error_to_string(error));
|
||||||
|
#else
|
||||||
|
dprintln("poweroff available only on x86_64");
|
||||||
|
#endif
|
||||||
|
|
||||||
return BAN::Error::from_errno(EUNKNOWN);
|
return BAN::Error::from_errno(EUNKNOWN);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1490,7 +1492,7 @@ namespace Kernel
|
||||||
|
|
||||||
if (pid == m_pid)
|
if (pid == m_pid)
|
||||||
{
|
{
|
||||||
m_signal_pending_mask |= 1 << signal;
|
add_pending_signal(signal);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1503,7 +1505,7 @@ namespace Kernel
|
||||||
found = true;
|
found = true;
|
||||||
if (signal)
|
if (signal)
|
||||||
{
|
{
|
||||||
process.m_signal_pending_mask |= 1 << signal;
|
process.add_pending_signal(signal);
|
||||||
// FIXME: This feels hacky
|
// FIXME: This feels hacky
|
||||||
Scheduler::get().unblock_thread(process.m_threads.front()->tid());
|
Scheduler::get().unblock_thread(process.m_threads.front()->tid());
|
||||||
}
|
}
|
||||||
|
@ -1881,7 +1883,7 @@ namespace Kernel
|
||||||
if (vaddr == 0)
|
if (vaddr == 0)
|
||||||
return {};
|
return {};
|
||||||
|
|
||||||
if (vaddr >= thread.stack_base() && vaddr + size <= thread.stack_base() + thread.stack_size())
|
if (vaddr >= thread.userspace_stack_bottom() && vaddr + size <= thread.userspace_stack_top())
|
||||||
return {};
|
return {};
|
||||||
|
|
||||||
// FIXME: should we allow cross mapping access?
|
// FIXME: should we allow cross mapping access?
|
||||||
|
|
|
@ -13,14 +13,13 @@ namespace Kernel
|
||||||
|
|
||||||
static ProcessorID read_processor_id()
|
static ProcessorID read_processor_id()
|
||||||
{
|
{
|
||||||
uint8_t id;
|
uint32_t id;
|
||||||
asm volatile(
|
asm volatile(
|
||||||
"movl $1, %%eax;"
|
"movl $1, %%eax;"
|
||||||
"cpuid;"
|
"cpuid;"
|
||||||
"shrl $24, %%ebx;"
|
"shrl $24, %%ebx;"
|
||||||
"movb %%bl, %0;"
|
: "=b"(id)
|
||||||
: "=rm"(id)
|
:: "eax", "ecx", "edx"
|
||||||
:: "eax", "ebx", "ecx", "edx"
|
|
||||||
);
|
);
|
||||||
return id;
|
return id;
|
||||||
}
|
}
|
||||||
|
@ -39,7 +38,7 @@ namespace Kernel
|
||||||
processor.m_stack = kmalloc(s_stack_size, 4096, true);
|
processor.m_stack = kmalloc(s_stack_size, 4096, true);
|
||||||
ASSERT(processor.m_stack);
|
ASSERT(processor.m_stack);
|
||||||
|
|
||||||
processor.m_gdt = GDT::create();
|
processor.m_gdt = GDT::create(&processor);
|
||||||
ASSERT(processor.m_gdt);
|
ASSERT(processor.m_gdt);
|
||||||
|
|
||||||
processor.m_idt = IDT::create();
|
processor.m_idt = IDT::create();
|
||||||
|
@ -53,12 +52,19 @@ namespace Kernel
|
||||||
auto id = read_processor_id();
|
auto id = read_processor_id();
|
||||||
auto& processor = s_processors[id];
|
auto& processor = s_processors[id];
|
||||||
|
|
||||||
|
ASSERT(processor.m_gdt);
|
||||||
|
processor.m_gdt->load();
|
||||||
|
|
||||||
|
// initialize GS
|
||||||
|
#if ARCH(x86_64)
|
||||||
// set gs base to pointer to this processor
|
// set gs base to pointer to this processor
|
||||||
uint64_t ptr = reinterpret_cast<uint64_t>(&processor);
|
uint64_t ptr = reinterpret_cast<uint64_t>(&processor);
|
||||||
asm volatile("wrmsr" :: "d"(ptr >> 32), "a"(ptr), "c"(MSR_IA32_GS_BASE));
|
uint32_t ptr_hi = ptr >> 32;
|
||||||
|
uint32_t ptr_lo = ptr & 0xFFFFFFFF;
|
||||||
ASSERT(processor.m_gdt);
|
asm volatile("wrmsr" :: "d"(ptr_hi), "a"(ptr_lo), "c"(MSR_IA32_GS_BASE));
|
||||||
processor.gdt().load();
|
#elif ARCH(i686)
|
||||||
|
asm volatile("movw $0x28, %%ax; movw %%ax, %%gs" ::: "ax");
|
||||||
|
#endif
|
||||||
|
|
||||||
ASSERT(processor.m_idt);
|
ASSERT(processor.m_idt);
|
||||||
processor.idt().load();
|
processor.idt().load();
|
||||||
|
@ -73,4 +79,34 @@ namespace Kernel
|
||||||
write_gs_ptr(offsetof(Processor, m_idle_thread), idle_thread);
|
write_gs_ptr(offsetof(Processor, m_idle_thread), idle_thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Processor::enter_interrupt(InterruptStack* interrupt_stack, InterruptRegisters* interrupt_registers)
|
||||||
|
{
|
||||||
|
ASSERT(get_interrupt_state() == InterruptState::Disabled);
|
||||||
|
ASSERT(read_gs_ptr(offsetof(Processor, m_interrupt_stack)) == nullptr);
|
||||||
|
write_gs_ptr(offsetof(Processor, m_interrupt_stack), interrupt_stack);
|
||||||
|
write_gs_ptr(offsetof(Processor, m_interrupt_registers), interrupt_registers);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Processor::leave_interrupt()
|
||||||
|
{
|
||||||
|
ASSERT(get_interrupt_state() == InterruptState::Disabled);
|
||||||
|
ASSERT(read_gs_ptr(offsetof(Processor, m_interrupt_stack)) != nullptr);
|
||||||
|
write_gs_ptr(offsetof(Processor, m_interrupt_stack), nullptr);
|
||||||
|
write_gs_ptr(offsetof(Processor, m_interrupt_registers), nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
InterruptStack& Processor::get_interrupt_stack()
|
||||||
|
{
|
||||||
|
ASSERT(get_interrupt_state() == InterruptState::Disabled);
|
||||||
|
ASSERT(read_gs_ptr(offsetof(Processor, m_interrupt_stack)));
|
||||||
|
return *read_gs_sized<InterruptStack*>(offsetof(Processor, m_interrupt_stack));
|
||||||
|
}
|
||||||
|
|
||||||
|
InterruptRegisters& Processor::get_interrupt_registers()
|
||||||
|
{
|
||||||
|
ASSERT(get_interrupt_state() == InterruptState::Disabled);
|
||||||
|
ASSERT(read_gs_ptr(offsetof(Processor, m_interrupt_registers)));
|
||||||
|
return *read_gs_sized<InterruptRegisters*>(offsetof(Processor, m_interrupt_registers));
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,19 @@ namespace Kernel
|
||||||
|
|
||||||
if (ecx & CPUID::ECX_RDRND)
|
if (ecx & CPUID::ECX_RDRND)
|
||||||
{
|
{
|
||||||
asm volatile("rdrand %0" : "=a"(s_rand_seed));
|
#if ARCH(x86_64)
|
||||||
|
asm volatile("rdrand %0" : "=r"(s_rand_seed));
|
||||||
|
#elif ARCH(i686)
|
||||||
|
uint32_t lo, hi;
|
||||||
|
asm volatile(
|
||||||
|
"rdrand %[lo];"
|
||||||
|
"rdrand %[hi];"
|
||||||
|
: [lo]"=r"(lo), [hi]"=r"(hi)
|
||||||
|
);
|
||||||
|
s_rand_seed = ((uint64_t)hi << 32) | lo;
|
||||||
|
#else
|
||||||
|
#error
|
||||||
|
#endif
|
||||||
dprintln("RNG seeded by RDRAND");
|
dprintln("RNG seeded by RDRAND");
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
|
|
@ -11,17 +11,9 @@
|
||||||
namespace Kernel
|
namespace Kernel
|
||||||
{
|
{
|
||||||
|
|
||||||
extern "C" [[noreturn]] void start_thread(uintptr_t rsp, uintptr_t rip);
|
|
||||||
extern "C" [[noreturn]] void continue_thread(uintptr_t rsp, uintptr_t rip);
|
|
||||||
|
|
||||||
static Scheduler* s_instance = nullptr;
|
static Scheduler* s_instance = nullptr;
|
||||||
static BAN::Atomic<bool> s_started { false };
|
static BAN::Atomic<bool> s_started { false };
|
||||||
|
|
||||||
ALWAYS_INLINE static void load_temp_stack()
|
|
||||||
{
|
|
||||||
asm volatile("movq %0, %%rsp" :: "rm"(Processor::current_stack_top()));
|
|
||||||
}
|
|
||||||
|
|
||||||
BAN::ErrorOr<void> Scheduler::initialize()
|
BAN::ErrorOr<void> Scheduler::initialize()
|
||||||
{
|
{
|
||||||
ASSERT(s_instance == nullptr);
|
ASSERT(s_instance == nullptr);
|
||||||
|
@ -40,10 +32,8 @@ namespace Kernel
|
||||||
void Scheduler::start()
|
void Scheduler::start()
|
||||||
{
|
{
|
||||||
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
|
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
|
||||||
m_lock.lock();
|
ASSERT(!m_active_threads.empty());
|
||||||
s_started = true;
|
yield();
|
||||||
advance_current_thread();
|
|
||||||
execute_current_thread_locked();
|
|
||||||
ASSERT_NOT_REACHED();
|
ASSERT_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -65,41 +55,138 @@ namespace Kernel
|
||||||
return Scheduler::get().current_thread().tid();
|
return Scheduler::get().current_thread().tid();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Scheduler::setup_next_thread()
|
||||||
|
{
|
||||||
|
ASSERT(m_lock.current_processor_has_lock());
|
||||||
|
|
||||||
|
if (auto* current = Processor::get_current_thread())
|
||||||
|
{
|
||||||
|
auto* thread = current->thread;
|
||||||
|
|
||||||
|
if (thread->state() == Thread::State::Terminated)
|
||||||
|
{
|
||||||
|
PageTable::kernel().load();
|
||||||
|
delete thread;
|
||||||
|
delete current;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// thread->state() can be NotStarted when calling exec or cleaning up process
|
||||||
|
if (thread->state() != Thread::State::NotStarted)
|
||||||
|
{
|
||||||
|
thread->interrupt_stack() = Processor::get_interrupt_stack();
|
||||||
|
thread->interrupt_registers() = Processor::get_interrupt_registers();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (current->should_block)
|
||||||
|
{
|
||||||
|
current->should_block = false;
|
||||||
|
m_blocking_threads.add_with_wake_time(current);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
m_active_threads.push_back(current);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
SchedulerQueue::Node* node = nullptr;
|
||||||
|
while (!m_active_threads.empty())
|
||||||
|
{
|
||||||
|
node = m_active_threads.pop_front();
|
||||||
|
if (node->thread->state() != Thread::State::Terminated)
|
||||||
|
break;
|
||||||
|
|
||||||
|
PageTable::kernel().load();
|
||||||
|
delete node->thread;
|
||||||
|
delete node;
|
||||||
|
node = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
Processor::set_current_thread(node);
|
||||||
|
|
||||||
|
auto* thread = node ? node->thread : Processor::idle_thread();
|
||||||
|
|
||||||
|
if (thread->has_process())
|
||||||
|
thread->process().page_table().load();
|
||||||
|
else
|
||||||
|
PageTable::kernel().load();
|
||||||
|
|
||||||
|
if (thread->state() == Thread::State::NotStarted)
|
||||||
|
thread->m_state = Thread::State::Executing;
|
||||||
|
|
||||||
|
Processor::gdt().set_tss_stack(thread->kernel_stack_top());
|
||||||
|
Processor::get_interrupt_stack() = thread->interrupt_stack();
|
||||||
|
Processor::get_interrupt_registers() = thread->interrupt_registers();
|
||||||
|
}
|
||||||
|
|
||||||
void Scheduler::timer_reschedule()
|
void Scheduler::timer_reschedule()
|
||||||
{
|
{
|
||||||
// Broadcast IPI to all other processors for them
|
// Broadcast IPI to all other processors for them
|
||||||
// to perform reschedule
|
// to perform reschedule
|
||||||
InterruptController::get().broadcast_ipi();
|
InterruptController::get().broadcast_ipi();
|
||||||
|
|
||||||
auto state = m_lock.lock();
|
{
|
||||||
m_blocking_threads.remove_with_wake_time(m_active_threads, SystemTimer::get().ms_since_boot());
|
SpinLockGuard _(m_lock);
|
||||||
if (save_current_thread())
|
m_blocking_threads.remove_with_wake_time(m_active_threads, SystemTimer::get().ms_since_boot());
|
||||||
return Processor::set_interrupt_state(state);
|
}
|
||||||
advance_current_thread();
|
|
||||||
execute_current_thread_locked();
|
yield();
|
||||||
ASSERT_NOT_REACHED();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Scheduler::reschedule()
|
void Scheduler::yield()
|
||||||
{
|
{
|
||||||
auto state = m_lock.lock();
|
auto state = Processor::get_interrupt_state();
|
||||||
if (save_current_thread())
|
Processor::set_interrupt_state(InterruptState::Disabled);
|
||||||
return Processor::set_interrupt_state(state);
|
|
||||||
advance_current_thread();
|
#if ARCH(x86_64)
|
||||||
execute_current_thread_locked();
|
asm volatile(
|
||||||
ASSERT_NOT_REACHED();
|
"movq %%rsp, %%rcx;"
|
||||||
|
"movq %[load_sp], %%rsp;"
|
||||||
|
"int %[ipi];"
|
||||||
|
"movq %%rcx, %%rsp;"
|
||||||
|
// NOTE: This is offset by 2 pointers since interrupt without PL change
|
||||||
|
// does not push SP and SS. This allows accessing "whole" interrupt stack.
|
||||||
|
:: [load_sp]"r"(Processor::current_stack_top() - 2 * sizeof(uintptr_t)),
|
||||||
|
[ipi]"i"(IRQ_VECTOR_BASE + IRQ_IPI)
|
||||||
|
: "memory", "rcx"
|
||||||
|
);
|
||||||
|
#elif ARCH(i686)
|
||||||
|
asm volatile(
|
||||||
|
"movl %%esp, %%ecx;"
|
||||||
|
"movl %[load_sp], %%esp;"
|
||||||
|
"int %[ipi];"
|
||||||
|
"movl %%ecx, %%esp;"
|
||||||
|
// NOTE: This is offset by 2 pointers since interrupt without PL change
|
||||||
|
// does not push SP and SS. This allows accessing "whole" interrupt stack.
|
||||||
|
:: [load_sp]"r"(Processor::current_stack_top() - 2 * sizeof(uintptr_t)),
|
||||||
|
[ipi]"i"(IRQ_VECTOR_BASE + IRQ_IPI)
|
||||||
|
: "memory", "ecx"
|
||||||
|
);
|
||||||
|
#else
|
||||||
|
#error
|
||||||
|
#endif
|
||||||
|
|
||||||
|
Processor::set_interrupt_state(state);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Scheduler::irq_reschedule()
|
||||||
|
{
|
||||||
|
SpinLockGuard _(m_lock);
|
||||||
|
setup_next_thread();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Scheduler::reschedule_if_idling()
|
void Scheduler::reschedule_if_idling()
|
||||||
{
|
{
|
||||||
auto state = m_lock.lock();
|
{
|
||||||
if (m_active_threads.empty() || Processor::get_current_thread())
|
SpinLockGuard _(m_lock);
|
||||||
return m_lock.unlock(state);
|
if (Processor::get_current_thread())
|
||||||
if (save_current_thread())
|
return;
|
||||||
return Processor::set_interrupt_state(state);
|
if (m_active_threads.empty())
|
||||||
advance_current_thread();
|
return;
|
||||||
execute_current_thread_locked();
|
}
|
||||||
ASSERT_NOT_REACHED();
|
|
||||||
|
yield();
|
||||||
}
|
}
|
||||||
|
|
||||||
BAN::ErrorOr<void> Scheduler::add_thread(Thread* thread)
|
BAN::ErrorOr<void> Scheduler::add_thread(Thread* thread)
|
||||||
|
@ -114,180 +201,49 @@ namespace Kernel
|
||||||
|
|
||||||
void Scheduler::terminate_thread(Thread* thread)
|
void Scheduler::terminate_thread(Thread* thread)
|
||||||
{
|
{
|
||||||
SpinLockGuard _(m_lock);
|
auto state = m_lock.lock();
|
||||||
|
|
||||||
|
ASSERT(thread->state() == Thread::State::Executing);
|
||||||
thread->m_state = Thread::State::Terminated;
|
thread->m_state = Thread::State::Terminated;
|
||||||
if (thread == ¤t_thread())
|
thread->interrupt_stack().sp = Processor::current_stack_top();
|
||||||
execute_current_thread_locked();
|
|
||||||
}
|
|
||||||
|
|
||||||
void Scheduler::advance_current_thread()
|
m_lock.unlock(InterruptState::Disabled);
|
||||||
{
|
|
||||||
ASSERT(m_lock.current_processor_has_lock());
|
|
||||||
|
|
||||||
if (auto* current = Processor::get_current_thread())
|
// actual deletion will be done while rescheduling
|
||||||
m_active_threads.push_back(current);
|
|
||||||
Processor::set_current_thread(nullptr);
|
|
||||||
|
|
||||||
if (!m_active_threads.empty())
|
if (¤t_thread() == thread)
|
||||||
Processor::set_current_thread(m_active_threads.pop_front());
|
|
||||||
}
|
|
||||||
|
|
||||||
// NOTE: this is declared always inline, so we don't corrupt the stack
|
|
||||||
// after getting the rsp
|
|
||||||
ALWAYS_INLINE bool Scheduler::save_current_thread()
|
|
||||||
{
|
|
||||||
ASSERT(m_lock.current_processor_has_lock());
|
|
||||||
|
|
||||||
uintptr_t rsp, rip;
|
|
||||||
push_callee_saved();
|
|
||||||
if (!(rip = read_rip()))
|
|
||||||
{
|
{
|
||||||
pop_callee_saved();
|
yield();
|
||||||
return true;
|
ASSERT_NOT_REACHED();
|
||||||
}
|
|
||||||
read_rsp(rsp);
|
|
||||||
|
|
||||||
Thread& current = current_thread();
|
|
||||||
current.set_rip(rip);
|
|
||||||
current.set_rsp(rsp);
|
|
||||||
|
|
||||||
load_temp_stack();
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Scheduler::delete_current_process_and_thread()
|
|
||||||
{
|
|
||||||
m_lock.lock();
|
|
||||||
|
|
||||||
load_temp_stack();
|
|
||||||
PageTable::kernel().load();
|
|
||||||
|
|
||||||
auto* current = Processor::get_current_thread();
|
|
||||||
ASSERT(current);
|
|
||||||
delete ¤t->thread->process();
|
|
||||||
delete current->thread;
|
|
||||||
delete current;
|
|
||||||
Processor::set_current_thread(nullptr);
|
|
||||||
|
|
||||||
advance_current_thread();
|
|
||||||
execute_current_thread_locked();
|
|
||||||
ASSERT_NOT_REACHED();
|
|
||||||
}
|
|
||||||
|
|
||||||
void Scheduler::execute_current_thread()
|
|
||||||
{
|
|
||||||
m_lock.lock();
|
|
||||||
load_temp_stack();
|
|
||||||
PageTable::kernel().load();
|
|
||||||
execute_current_thread_stack_loaded();
|
|
||||||
ASSERT_NOT_REACHED();
|
|
||||||
}
|
|
||||||
|
|
||||||
void Scheduler::execute_current_thread_locked()
|
|
||||||
{
|
|
||||||
ASSERT(m_lock.current_processor_has_lock());
|
|
||||||
load_temp_stack();
|
|
||||||
PageTable::kernel().load();
|
|
||||||
execute_current_thread_stack_loaded();
|
|
||||||
ASSERT_NOT_REACHED();
|
|
||||||
}
|
|
||||||
|
|
||||||
NEVER_INLINE void Scheduler::execute_current_thread_stack_loaded()
|
|
||||||
{
|
|
||||||
ASSERT(m_lock.current_processor_has_lock());
|
|
||||||
|
|
||||||
#if SCHEDULER_VERIFY_STACK
|
|
||||||
vaddr_t rsp;
|
|
||||||
read_rsp(rsp);
|
|
||||||
ASSERT(Processor::current_stack_bottom() <= rsp && rsp <= Processor::current_stack_top());
|
|
||||||
ASSERT(&PageTable::current() == &PageTable::kernel());
|
|
||||||
#endif
|
|
||||||
|
|
||||||
Thread* current = ¤t_thread();
|
|
||||||
|
|
||||||
#if __enable_sse
|
|
||||||
if (current != Thread::sse_thread())
|
|
||||||
{
|
|
||||||
asm volatile(
|
|
||||||
"movq %cr0, %rax;"
|
|
||||||
"orq $(1 << 3), %rax;"
|
|
||||||
"movq %rax, %cr0"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
while (current->state() == Thread::State::Terminated)
|
|
||||||
{
|
|
||||||
auto* node = Processor::get_current_thread();
|
|
||||||
if (node->thread->has_process())
|
|
||||||
if (node->thread->process().on_thread_exit(*node->thread))
|
|
||||||
break;
|
|
||||||
|
|
||||||
delete node->thread;
|
|
||||||
delete node;
|
|
||||||
Processor::set_current_thread(nullptr);
|
|
||||||
|
|
||||||
advance_current_thread();
|
|
||||||
current = ¤t_thread();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (current->has_process())
|
Processor::set_interrupt_state(state);
|
||||||
{
|
|
||||||
current->process().page_table().load();
|
|
||||||
Processor::gdt().set_tss_stack(current->interrupt_stack_base() + current->interrupt_stack_size());
|
|
||||||
}
|
|
||||||
else
|
|
||||||
PageTable::kernel().load();
|
|
||||||
|
|
||||||
switch (current->state())
|
|
||||||
{
|
|
||||||
case Thread::State::NotStarted:
|
|
||||||
current->set_started();
|
|
||||||
m_lock.unlock(InterruptState::Disabled);
|
|
||||||
start_thread(current->rsp(), current->rip());
|
|
||||||
case Thread::State::Executing:
|
|
||||||
m_lock.unlock(InterruptState::Disabled);
|
|
||||||
while (current->can_add_signal_to_execute())
|
|
||||||
current->handle_signal();
|
|
||||||
continue_thread(current->rsp(), current->rip());
|
|
||||||
case Thread::State::Terminated:
|
|
||||||
ASSERT_NOT_REACHED();
|
|
||||||
}
|
|
||||||
|
|
||||||
ASSERT_NOT_REACHED();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Scheduler::set_current_thread_sleeping_impl(Semaphore* semaphore, uint64_t wake_time)
|
void Scheduler::set_current_thread_sleeping_impl(Semaphore* semaphore, uint64_t wake_time)
|
||||||
{
|
{
|
||||||
ASSERT(m_lock.current_processor_has_lock());
|
auto state = m_lock.lock();
|
||||||
|
|
||||||
if (save_current_thread())
|
|
||||||
return;
|
|
||||||
|
|
||||||
auto* current = Processor::get_current_thread();
|
auto* current = Processor::get_current_thread();
|
||||||
current->semaphore = semaphore;
|
current->semaphore = semaphore;
|
||||||
current->wake_time = wake_time;
|
current->wake_time = wake_time;
|
||||||
m_blocking_threads.add_with_wake_time(current);
|
current->should_block = true;
|
||||||
Processor::set_current_thread(nullptr);
|
|
||||||
|
|
||||||
advance_current_thread();
|
m_lock.unlock(InterruptState::Disabled);
|
||||||
execute_current_thread_locked();
|
|
||||||
ASSERT_NOT_REACHED();
|
yield();
|
||||||
|
|
||||||
|
Processor::set_interrupt_state(state);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Scheduler::set_current_thread_sleeping(uint64_t wake_time)
|
void Scheduler::set_current_thread_sleeping(uint64_t wake_time)
|
||||||
{
|
{
|
||||||
auto state = m_lock.lock();
|
|
||||||
set_current_thread_sleeping_impl(nullptr, wake_time);
|
set_current_thread_sleeping_impl(nullptr, wake_time);
|
||||||
Processor::set_interrupt_state(state);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Scheduler::block_current_thread(Semaphore* semaphore, uint64_t wake_time)
|
void Scheduler::block_current_thread(Semaphore* semaphore, uint64_t wake_time)
|
||||||
{
|
{
|
||||||
auto state = m_lock.lock();
|
|
||||||
set_current_thread_sleeping_impl(semaphore, wake_time);
|
set_current_thread_sleeping_impl(semaphore, wake_time);
|
||||||
Processor::set_interrupt_state(state);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Scheduler::unblock_threads(Semaphore* semaphore)
|
void Scheduler::unblock_threads(Semaphore* semaphore)
|
||||||
|
|
|
@ -70,11 +70,11 @@ namespace Kernel
|
||||||
|
|
||||||
stop_cmd(m_port);
|
stop_cmd(m_port);
|
||||||
|
|
||||||
paddr_t fis_paddr = m_dma_region->paddr();
|
uint64_t fis_paddr = m_dma_region->paddr();
|
||||||
m_port->fb = fis_paddr & 0xFFFFFFFF;
|
m_port->fb = fis_paddr & 0xFFFFFFFF;
|
||||||
m_port->fbu = fis_paddr >> 32;
|
m_port->fbu = fis_paddr >> 32;
|
||||||
|
|
||||||
paddr_t command_list_paddr = fis_paddr + sizeof(ReceivedFIS);
|
uint64_t command_list_paddr = fis_paddr + sizeof(ReceivedFIS);
|
||||||
m_port->clb = command_list_paddr & 0xFFFFFFFF;
|
m_port->clb = command_list_paddr & 0xFFFFFFFF;
|
||||||
m_port->clbu = command_list_paddr >> 32;
|
m_port->clbu = command_list_paddr >> 32;
|
||||||
|
|
||||||
|
@ -168,7 +168,7 @@ namespace Kernel
|
||||||
// This doesn't allow scheduler to go properly idle.
|
// This doesn't allow scheduler to go properly idle.
|
||||||
while (SystemTimer::get().ms_since_boot() < start_time + s_ata_timeout)
|
while (SystemTimer::get().ms_since_boot() < start_time + s_ata_timeout)
|
||||||
{
|
{
|
||||||
Scheduler::get().reschedule();
|
Scheduler::get().yield();
|
||||||
if (!(m_port->ci & (1 << command_slot)))
|
if (!(m_port->ci & (1 << command_slot)))
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
@ -236,8 +236,9 @@ namespace Kernel
|
||||||
|
|
||||||
volatile auto& command_table = *reinterpret_cast<HBACommandTable*>(m_dma_region->paddr_to_vaddr(command_header.ctba));
|
volatile auto& command_table = *reinterpret_cast<HBACommandTable*>(m_dma_region->paddr_to_vaddr(command_header.ctba));
|
||||||
memset(const_cast<HBACommandTable*>(&command_table), 0x00, sizeof(HBACommandTable));
|
memset(const_cast<HBACommandTable*>(&command_table), 0x00, sizeof(HBACommandTable));
|
||||||
command_table.prdt_entry[0].dba = m_data_dma_region->paddr() & 0xFFFFFFFF;
|
uint64_t data_dma_paddr64 = m_data_dma_region->paddr();
|
||||||
command_table.prdt_entry[0].dbau = m_data_dma_region->paddr() >> 32;
|
command_table.prdt_entry[0].dba = data_dma_paddr64 & 0xFFFFFFFF;
|
||||||
|
command_table.prdt_entry[0].dbau = data_dma_paddr64 >> 32;
|
||||||
command_table.prdt_entry[0].dbc = sector_count * sector_size() - 1;
|
command_table.prdt_entry[0].dbc = sector_count * sector_size() - 1;
|
||||||
command_table.prdt_entry[0].i = 1;
|
command_table.prdt_entry[0].i = 1;
|
||||||
|
|
||||||
|
|
|
@ -63,7 +63,7 @@ namespace Kernel
|
||||||
|
|
||||||
for (uint64_t i = 0; i < sector_count;)
|
for (uint64_t i = 0; i < sector_count;)
|
||||||
{
|
{
|
||||||
uint16_t count = BAN::Math::min(sector_count - i, m_dma_region->size() / m_block_size);
|
uint16_t count = BAN::Math::min<uint64_t>(sector_count - i, m_dma_region->size() / m_block_size);
|
||||||
|
|
||||||
NVMe::SubmissionQueueEntry sqe {};
|
NVMe::SubmissionQueueEntry sqe {};
|
||||||
sqe.opc = NVMe::OPC_IO_READ;
|
sqe.opc = NVMe::OPC_IO_READ;
|
||||||
|
@ -90,7 +90,7 @@ namespace Kernel
|
||||||
|
|
||||||
for (uint64_t i = 0; i < sector_count;)
|
for (uint64_t i = 0; i < sector_count;)
|
||||||
{
|
{
|
||||||
uint16_t count = BAN::Math::min(sector_count - i, m_dma_region->size() / m_block_size);
|
uint16_t count = BAN::Math::min<uint16_t>(sector_count - i, m_dma_region->size() / m_block_size);
|
||||||
|
|
||||||
memcpy(reinterpret_cast<void*>(m_dma_region->vaddr()), buffer.data() + i * m_block_size, count * m_block_size);
|
memcpy(reinterpret_cast<void*>(m_dma_region->vaddr()), buffer.data() + i * m_block_size, count * m_block_size);
|
||||||
|
|
||||||
|
|
|
@ -15,8 +15,8 @@ namespace Kernel
|
||||||
, m_doorbell(db)
|
, m_doorbell(db)
|
||||||
, m_qdepth(qdepth)
|
, m_qdepth(qdepth)
|
||||||
{
|
{
|
||||||
for (uint32_t i = qdepth; i < 64; i++)
|
for (uint32_t i = qdepth; i < m_mask_bits; i++)
|
||||||
m_used_mask |= (uint64_t)1 << i;
|
m_used_mask |= (size_t)1 << i;
|
||||||
set_irq(irq);
|
set_irq(irq);
|
||||||
enable_interrupt();
|
enable_interrupt();
|
||||||
}
|
}
|
||||||
|
@ -29,8 +29,8 @@ namespace Kernel
|
||||||
{
|
{
|
||||||
uint16_t sts = cq_ptr[m_cq_head].sts >> 1;
|
uint16_t sts = cq_ptr[m_cq_head].sts >> 1;
|
||||||
uint16_t cid = cq_ptr[m_cq_head].cid;
|
uint16_t cid = cq_ptr[m_cq_head].cid;
|
||||||
uint64_t cid_mask = (uint64_t)1 << cid;
|
size_t cid_mask = (size_t)1 << cid;
|
||||||
ASSERT(cid < 64);
|
ASSERT(cid < m_mask_bits);
|
||||||
|
|
||||||
ASSERT((m_done_mask & cid_mask) == 0);
|
ASSERT((m_done_mask & cid_mask) == 0);
|
||||||
|
|
||||||
|
@ -50,7 +50,7 @@ namespace Kernel
|
||||||
uint16_t NVMeQueue::submit_command(NVMe::SubmissionQueueEntry& sqe)
|
uint16_t NVMeQueue::submit_command(NVMe::SubmissionQueueEntry& sqe)
|
||||||
{
|
{
|
||||||
uint16_t cid = reserve_cid();
|
uint16_t cid = reserve_cid();
|
||||||
uint64_t cid_mask = (uint64_t)1 << cid;
|
size_t cid_mask = (size_t)1 << cid;
|
||||||
|
|
||||||
{
|
{
|
||||||
SpinLockGuard _(m_lock);
|
SpinLockGuard _(m_lock);
|
||||||
|
@ -98,13 +98,13 @@ namespace Kernel
|
||||||
}
|
}
|
||||||
|
|
||||||
uint16_t cid = 0;
|
uint16_t cid = 0;
|
||||||
for (; cid < 64; cid++)
|
for (; cid < m_mask_bits; cid++)
|
||||||
if ((m_used_mask & ((uint64_t)1 << cid)) == 0)
|
if ((m_used_mask & ((size_t)1 << cid)) == 0)
|
||||||
break;
|
break;
|
||||||
ASSERT(cid < 64);
|
ASSERT(cid < m_mask_bits);
|
||||||
ASSERT(cid < m_qdepth);
|
ASSERT(cid < m_qdepth);
|
||||||
|
|
||||||
m_used_mask |= (uint64_t)1 << cid;
|
m_used_mask |= (size_t)1 << cid;
|
||||||
|
|
||||||
m_lock.unlock(state);
|
m_lock.unlock(state);
|
||||||
return cid;
|
return cid;
|
||||||
|
|
|
@ -10,9 +10,9 @@
|
||||||
namespace Kernel
|
namespace Kernel
|
||||||
{
|
{
|
||||||
|
|
||||||
extern "C" long sys_fork(uintptr_t rsp, uintptr_t rip)
|
extern "C" long sys_fork(uintptr_t sp, uintptr_t ip)
|
||||||
{
|
{
|
||||||
auto ret = Process::current().sys_fork(rsp, rip);
|
auto ret = Process::current().sys_fork(sp, ip);
|
||||||
if (ret.is_error())
|
if (ret.is_error())
|
||||||
return -ret.error().get_error_code();
|
return -ret.error().get_error_code();
|
||||||
return ret.value();
|
return ret.value();
|
||||||
|
@ -28,12 +28,9 @@ namespace Kernel
|
||||||
#undef O
|
#undef O
|
||||||
};
|
};
|
||||||
|
|
||||||
extern "C" long cpp_syscall_handler(int syscall, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, uintptr_t arg4, uintptr_t arg5, InterruptStack& interrupt_stack)
|
extern "C" long cpp_syscall_handler(int syscall, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, uintptr_t arg4, uintptr_t arg5, InterruptStack* interrupt_stack)
|
||||||
{
|
{
|
||||||
ASSERT((interrupt_stack.cs & 0b11) == 0b11);
|
ASSERT(GDT::is_user_segment(interrupt_stack->cs));
|
||||||
|
|
||||||
Thread::current().set_return_rsp(interrupt_stack.rsp);
|
|
||||||
Thread::current().set_return_rip(interrupt_stack.rip);
|
|
||||||
|
|
||||||
asm volatile("sti");
|
asm volatile("sti");
|
||||||
|
|
||||||
|
|
|
@ -12,19 +12,26 @@
|
||||||
namespace Kernel
|
namespace Kernel
|
||||||
{
|
{
|
||||||
|
|
||||||
extern "C" void thread_userspace_trampoline(uint64_t rsp, uint64_t rip, int argc, char** argv, char** envp);
|
extern "C" [[noreturn]] void start_kernel_thread();
|
||||||
extern "C" uintptr_t read_rip();
|
extern "C" [[noreturn]] void start_userspace_thread();
|
||||||
|
|
||||||
extern "C" void signal_trampoline();
|
extern "C" void signal_trampoline();
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
static void write_to_stack(uintptr_t& rsp, const T& value)
|
static void write_to_stack(uintptr_t& rsp, const T& value) requires(sizeof(T) <= sizeof(uintptr_t))
|
||||||
{
|
{
|
||||||
rsp -= sizeof(uintptr_t);
|
rsp -= sizeof(uintptr_t);
|
||||||
if constexpr(sizeof(T) < sizeof(uintptr_t))
|
*(uintptr_t*)rsp = (uintptr_t)value;
|
||||||
*(uintptr_t*)rsp = (uintptr_t)value;
|
}
|
||||||
else
|
|
||||||
memcpy((void*)rsp, (void*)&value, sizeof(uintptr_t));
|
extern "C" uintptr_t get_thread_start_sp()
|
||||||
|
{
|
||||||
|
return Thread::current().interrupt_stack().sp;
|
||||||
|
}
|
||||||
|
|
||||||
|
extern "C" uintptr_t get_userspace_thread_stack_top()
|
||||||
|
{
|
||||||
|
return Thread::current().userspace_stack_top() - 4 * sizeof(uintptr_t);
|
||||||
}
|
}
|
||||||
|
|
||||||
static pid_t s_next_tid = 1;
|
static pid_t s_next_tid = 1;
|
||||||
|
@ -38,7 +45,7 @@ namespace Kernel
|
||||||
BAN::ScopeGuard thread_deleter([thread] { delete thread; });
|
BAN::ScopeGuard thread_deleter([thread] { delete thread; });
|
||||||
|
|
||||||
// Initialize stack and registers
|
// Initialize stack and registers
|
||||||
thread->m_stack = TRY(VirtualRange::create_to_vaddr_range(
|
thread->m_kernel_stack = TRY(VirtualRange::create_to_vaddr_range(
|
||||||
PageTable::kernel(),
|
PageTable::kernel(),
|
||||||
KERNEL_OFFSET,
|
KERNEL_OFFSET,
|
||||||
~(uintptr_t)0,
|
~(uintptr_t)0,
|
||||||
|
@ -46,14 +53,21 @@ namespace Kernel
|
||||||
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
|
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
|
||||||
true
|
true
|
||||||
));
|
));
|
||||||
thread->m_rsp = thread->stack_base() + thread->stack_size();
|
|
||||||
thread->m_rip = (uintptr_t)entry;
|
|
||||||
|
|
||||||
// Initialize stack for returning
|
// Initialize stack for returning
|
||||||
write_to_stack(thread->m_rsp, nullptr); // alignment
|
uintptr_t sp = thread->kernel_stack_top();
|
||||||
write_to_stack(thread->m_rsp, thread);
|
write_to_stack(sp, thread);
|
||||||
write_to_stack(thread->m_rsp, &Thread::on_exit);
|
write_to_stack(sp, &Thread::on_exit_trampoline);
|
||||||
write_to_stack(thread->m_rsp, data);
|
write_to_stack(sp, data);
|
||||||
|
write_to_stack(sp, entry);
|
||||||
|
|
||||||
|
thread->m_interrupt_stack.ip = reinterpret_cast<vaddr_t>(start_kernel_thread);
|
||||||
|
thread->m_interrupt_stack.cs = 0x08;
|
||||||
|
thread->m_interrupt_stack.flags = 0x002;
|
||||||
|
thread->m_interrupt_stack.sp = sp;
|
||||||
|
thread->m_interrupt_stack.ss = 0x10;
|
||||||
|
|
||||||
|
memset(&thread->m_interrupt_registers, 0, sizeof(InterruptRegisters));
|
||||||
|
|
||||||
thread_deleter.disable();
|
thread_deleter.disable();
|
||||||
|
|
||||||
|
@ -72,7 +86,15 @@ namespace Kernel
|
||||||
|
|
||||||
thread->m_is_userspace = true;
|
thread->m_is_userspace = true;
|
||||||
|
|
||||||
thread->m_stack = TRY(VirtualRange::create_to_vaddr_range(
|
thread->m_kernel_stack = TRY(VirtualRange::create_to_vaddr_range(
|
||||||
|
process->page_table(),
|
||||||
|
0x300000, KERNEL_OFFSET,
|
||||||
|
m_kernel_stack_size,
|
||||||
|
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
|
||||||
|
true
|
||||||
|
));
|
||||||
|
|
||||||
|
thread->m_userspace_stack = TRY(VirtualRange::create_to_vaddr_range(
|
||||||
process->page_table(),
|
process->page_table(),
|
||||||
0x300000, KERNEL_OFFSET,
|
0x300000, KERNEL_OFFSET,
|
||||||
m_userspace_stack_size,
|
m_userspace_stack_size,
|
||||||
|
@ -80,14 +102,6 @@ namespace Kernel
|
||||||
true
|
true
|
||||||
));
|
));
|
||||||
|
|
||||||
thread->m_interrupt_stack = TRY(VirtualRange::create_to_vaddr_range(
|
|
||||||
process->page_table(),
|
|
||||||
0x300000, KERNEL_OFFSET,
|
|
||||||
m_interrupt_stack_size,
|
|
||||||
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
|
|
||||||
true
|
|
||||||
));
|
|
||||||
|
|
||||||
thread->setup_exec();
|
thread->setup_exec();
|
||||||
|
|
||||||
thread_deleter.disable();
|
thread_deleter.disable();
|
||||||
|
@ -99,16 +113,33 @@ namespace Kernel
|
||||||
: m_tid(tid), m_process(process)
|
: m_tid(tid), m_process(process)
|
||||||
{
|
{
|
||||||
#if __enable_sse
|
#if __enable_sse
|
||||||
|
#if ARCH(x86_64)
|
||||||
uintptr_t cr0;
|
uintptr_t cr0;
|
||||||
asm volatile(
|
asm volatile(
|
||||||
"movq %%cr0, %%rax;"
|
"movq %%cr0, %%rax;"
|
||||||
"movq %%rax, %%rbx;"
|
"movq %%rax, %[cr0];"
|
||||||
"andq $~(1 << 3), %%rax;"
|
"andq $~(1 << 3), %%rax;"
|
||||||
"movq %%rax, %%cr0;"
|
"movq %%rax, %%cr0;"
|
||||||
: "=b"(cr0)
|
: [cr0]"=r"(cr0)
|
||||||
|
:: "rax"
|
||||||
);
|
);
|
||||||
save_sse();
|
save_sse();
|
||||||
asm volatile("movq %0, %%cr0" :: "r"(cr0));
|
asm volatile("movq %0, %%cr0" :: "r"(cr0));
|
||||||
|
#elif ARCH(i686)
|
||||||
|
uintptr_t cr0;
|
||||||
|
asm volatile(
|
||||||
|
"movl %%cr0, %%eax;"
|
||||||
|
"movl %%eax, %[cr0];"
|
||||||
|
"andl $~(1 << 3), %%eax;"
|
||||||
|
"movl %%eax, %%cr0;"
|
||||||
|
: [cr0]"=r"(cr0)
|
||||||
|
:: "eax"
|
||||||
|
);
|
||||||
|
save_sse();
|
||||||
|
asm volatile("movl %0, %%cr0" :: "r"(cr0));
|
||||||
|
#else
|
||||||
|
#error
|
||||||
|
#endif
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -123,11 +154,22 @@ namespace Kernel
|
||||||
return *m_process;
|
return *m_process;
|
||||||
}
|
}
|
||||||
|
|
||||||
Thread::~Thread()
|
const Process& Thread::process() const
|
||||||
{
|
{
|
||||||
|
ASSERT(m_process);
|
||||||
|
return *m_process;
|
||||||
}
|
}
|
||||||
|
|
||||||
BAN::ErrorOr<Thread*> Thread::clone(Process* new_process, uintptr_t rsp, uintptr_t rip)
|
Thread::~Thread()
|
||||||
|
{
|
||||||
|
if (m_delete_process)
|
||||||
|
{
|
||||||
|
ASSERT(m_process);
|
||||||
|
delete m_process;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
BAN::ErrorOr<Thread*> Thread::clone(Process* new_process, uintptr_t sp, uintptr_t ip)
|
||||||
{
|
{
|
||||||
ASSERT(m_is_userspace);
|
ASSERT(m_is_userspace);
|
||||||
ASSERT(m_state == State::Executing);
|
ASSERT(m_state == State::Executing);
|
||||||
|
@ -139,13 +181,22 @@ namespace Kernel
|
||||||
|
|
||||||
thread->m_is_userspace = true;
|
thread->m_is_userspace = true;
|
||||||
|
|
||||||
thread->m_interrupt_stack = TRY(m_interrupt_stack->clone(new_process->page_table()));
|
thread->m_kernel_stack = TRY(m_kernel_stack->clone(new_process->page_table()));
|
||||||
thread->m_stack = TRY(m_stack->clone(new_process->page_table()));
|
thread->m_userspace_stack = TRY(m_userspace_stack->clone(new_process->page_table()));
|
||||||
|
|
||||||
thread->m_state = State::Executing;
|
thread->m_state = State::NotStarted;
|
||||||
|
|
||||||
thread->m_rip = rip;
|
thread->m_interrupt_stack.ip = ip;
|
||||||
thread->m_rsp = rsp;
|
thread->m_interrupt_stack.cs = 0x08;
|
||||||
|
thread->m_interrupt_stack.flags = 0x002;
|
||||||
|
thread->m_interrupt_stack.sp = sp;
|
||||||
|
thread->m_interrupt_stack.ss = 0x10;
|
||||||
|
|
||||||
|
#if ARCH(x86_64)
|
||||||
|
thread->m_interrupt_registers.rax = 0;
|
||||||
|
#elif ARCH(i686)
|
||||||
|
thread->m_interrupt_registers.eax = 0;
|
||||||
|
#endif
|
||||||
|
|
||||||
thread_deleter.disable();
|
thread_deleter.disable();
|
||||||
|
|
||||||
|
@ -156,58 +207,69 @@ namespace Kernel
|
||||||
{
|
{
|
||||||
ASSERT(is_userspace());
|
ASSERT(is_userspace());
|
||||||
m_state = State::NotStarted;
|
m_state = State::NotStarted;
|
||||||
static entry_t entry_trampoline(
|
|
||||||
[](void*)
|
|
||||||
{
|
|
||||||
const auto& info = Process::current().userspace_info();
|
|
||||||
thread_userspace_trampoline(Thread::current().rsp(), info.entry, info.argc, info.argv, info.envp);
|
|
||||||
ASSERT_NOT_REACHED();
|
|
||||||
}
|
|
||||||
);
|
|
||||||
m_rsp = stack_base() + stack_size();
|
|
||||||
m_rip = (uintptr_t)entry_trampoline;
|
|
||||||
|
|
||||||
// Signal mask is inherited
|
// Signal mask is inherited
|
||||||
|
|
||||||
// Setup stack for returning
|
auto& userspace_info = process().userspace_info();
|
||||||
ASSERT(m_rsp % PAGE_SIZE == 0);
|
ASSERT(userspace_info.entry);
|
||||||
PageTable::with_fast_page(process().page_table().physical_address_of(m_rsp - PAGE_SIZE), [&] {
|
|
||||||
uintptr_t rsp = PageTable::fast_page() + PAGE_SIZE;
|
// Initialize stack for returning
|
||||||
write_to_stack(rsp, nullptr); // alignment
|
PageTable::with_fast_page(process().page_table().physical_address_of(kernel_stack_top() - PAGE_SIZE), [&] {
|
||||||
write_to_stack(rsp, this);
|
uintptr_t sp = PageTable::fast_page() + PAGE_SIZE;
|
||||||
write_to_stack(rsp, &Thread::on_exit);
|
write_to_stack(sp, userspace_info.entry);
|
||||||
write_to_stack(rsp, nullptr);
|
write_to_stack(sp, userspace_info.argc);
|
||||||
m_rsp -= 4 * sizeof(uintptr_t);
|
write_to_stack(sp, userspace_info.argv);
|
||||||
|
write_to_stack(sp, userspace_info.envp);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
m_interrupt_stack.ip = reinterpret_cast<vaddr_t>(start_userspace_thread);;
|
||||||
|
m_interrupt_stack.cs = 0x08;
|
||||||
|
m_interrupt_stack.flags = 0x002;
|
||||||
|
m_interrupt_stack.sp = kernel_stack_top() - 4 * sizeof(uintptr_t);
|
||||||
|
m_interrupt_stack.ss = 0x10;
|
||||||
|
|
||||||
|
memset(&m_interrupt_registers, 0, sizeof(InterruptRegisters));
|
||||||
}
|
}
|
||||||
|
|
||||||
void Thread::setup_process_cleanup()
|
void Thread::setup_process_cleanup()
|
||||||
{
|
{
|
||||||
|
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
|
||||||
|
|
||||||
m_state = State::NotStarted;
|
m_state = State::NotStarted;
|
||||||
static entry_t entry(
|
static entry_t entry(
|
||||||
[](void* process_ptr)
|
[](void* process_ptr)
|
||||||
{
|
{
|
||||||
auto& process = *reinterpret_cast<Process*>(process_ptr);
|
auto* thread = &Thread::current();
|
||||||
process.cleanup_function();
|
auto* process = static_cast<Process*>(process_ptr);
|
||||||
Scheduler::get().delete_current_process_and_thread();
|
|
||||||
ASSERT_NOT_REACHED();
|
ASSERT(thread->m_process == process);
|
||||||
|
|
||||||
|
process->cleanup_function();
|
||||||
|
|
||||||
|
thread->m_delete_process = true;
|
||||||
|
|
||||||
|
// will call on thread exit after return
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
m_rsp = stack_base() + stack_size();
|
|
||||||
m_rip = (uintptr_t)entry;
|
|
||||||
|
|
||||||
m_signal_pending_mask = 0;
|
m_signal_pending_mask = 0;
|
||||||
m_signal_block_mask = ~0ull;
|
m_signal_block_mask = ~0ull;
|
||||||
|
|
||||||
ASSERT(m_rsp % PAGE_SIZE == 0);
|
PageTable::with_fast_page(process().page_table().physical_address_of(kernel_stack_top() - PAGE_SIZE), [&] {
|
||||||
PageTable::with_fast_page(process().page_table().physical_address_of(m_rsp - PAGE_SIZE), [&] {
|
uintptr_t sp = PageTable::fast_page() + PAGE_SIZE;
|
||||||
uintptr_t rsp = PageTable::fast_page() + PAGE_SIZE;
|
write_to_stack(sp, this);
|
||||||
write_to_stack(rsp, nullptr); // alignment
|
write_to_stack(sp, &Thread::on_exit_trampoline);
|
||||||
write_to_stack(rsp, this);
|
write_to_stack(sp, m_process);
|
||||||
write_to_stack(rsp, &Thread::on_exit);
|
write_to_stack(sp, entry);
|
||||||
write_to_stack(rsp, m_process);
|
|
||||||
m_rsp -= 4 * sizeof(uintptr_t);
|
|
||||||
});
|
});
|
||||||
|
|
||||||
|
m_interrupt_stack.ip = reinterpret_cast<vaddr_t>(start_kernel_thread);
|
||||||
|
m_interrupt_stack.cs = 0x08;
|
||||||
|
m_interrupt_stack.flags = 0x202;
|
||||||
|
m_interrupt_stack.sp = kernel_stack_top() - 4 * sizeof(uintptr_t);
|
||||||
|
m_interrupt_stack.ss = 0x10;
|
||||||
|
|
||||||
|
memset(&m_interrupt_registers, 0, sizeof(InterruptRegisters));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Thread::is_interrupted_by_signal()
|
bool Thread::is_interrupted_by_signal()
|
||||||
|
@ -221,10 +283,10 @@ namespace Kernel
|
||||||
{
|
{
|
||||||
if (!is_userspace() || m_state != State::Executing)
|
if (!is_userspace() || m_state != State::Executing)
|
||||||
return false;
|
return false;
|
||||||
auto& interrupt_stack = *reinterpret_cast<InterruptStack*>(interrupt_stack_base() + interrupt_stack_size() - sizeof(InterruptStack));
|
auto& interrupt_stack = *reinterpret_cast<InterruptStack*>(kernel_stack_top() - sizeof(InterruptStack));
|
||||||
if (!GDT::is_user_segment(interrupt_stack.cs))
|
if (!GDT::is_user_segment(interrupt_stack.cs))
|
||||||
return false;
|
return false;
|
||||||
uint64_t full_pending_mask = m_signal_pending_mask | m_process->m_signal_pending_mask;
|
uint64_t full_pending_mask = m_signal_pending_mask | process().signal_pending_mask();;
|
||||||
return full_pending_mask & ~m_signal_block_mask;
|
return full_pending_mask & ~m_signal_block_mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -232,8 +294,8 @@ namespace Kernel
|
||||||
{
|
{
|
||||||
if (!is_userspace() || m_state != State::Executing)
|
if (!is_userspace() || m_state != State::Executing)
|
||||||
return false;
|
return false;
|
||||||
auto& interrupt_stack = *reinterpret_cast<InterruptStack*>(interrupt_stack_base() + interrupt_stack_size() - sizeof(InterruptStack));
|
auto& interrupt_stack = *reinterpret_cast<InterruptStack*>(kernel_stack_top() - sizeof(InterruptStack));
|
||||||
return interrupt_stack.rip == (uintptr_t)signal_trampoline;
|
return interrupt_stack.ip == (uintptr_t)signal_trampoline;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Thread::handle_signal(int signal)
|
void Thread::handle_signal(int signal)
|
||||||
|
@ -243,12 +305,12 @@ namespace Kernel
|
||||||
|
|
||||||
SpinLockGuard _(m_signal_lock);
|
SpinLockGuard _(m_signal_lock);
|
||||||
|
|
||||||
auto& interrupt_stack = *reinterpret_cast<InterruptStack*>(interrupt_stack_base() + interrupt_stack_size() - sizeof(InterruptStack));
|
auto& interrupt_stack = *reinterpret_cast<InterruptStack*>(kernel_stack_top() - sizeof(InterruptStack));
|
||||||
ASSERT(GDT::is_user_segment(interrupt_stack.cs));
|
ASSERT(GDT::is_user_segment(interrupt_stack.cs));
|
||||||
|
|
||||||
if (signal == 0)
|
if (signal == 0)
|
||||||
{
|
{
|
||||||
uint64_t full_pending_mask = m_signal_pending_mask | process().m_signal_pending_mask;
|
uint64_t full_pending_mask = m_signal_pending_mask | process().signal_pending_mask();
|
||||||
for (signal = _SIGMIN; signal <= _SIGMAX; signal++)
|
for (signal = _SIGMIN; signal <= _SIGMAX; signal++)
|
||||||
{
|
{
|
||||||
uint64_t mask = 1ull << signal;
|
uint64_t mask = 1ull << signal;
|
||||||
|
@ -266,18 +328,18 @@ namespace Kernel
|
||||||
vaddr_t signal_handler = process().m_signal_handlers[signal];
|
vaddr_t signal_handler = process().m_signal_handlers[signal];
|
||||||
|
|
||||||
m_signal_pending_mask &= ~(1ull << signal);
|
m_signal_pending_mask &= ~(1ull << signal);
|
||||||
process().m_signal_pending_mask &= ~(1ull << signal);
|
process().remove_pending_signal(signal);
|
||||||
|
|
||||||
if (signal_handler == (vaddr_t)SIG_IGN)
|
if (signal_handler == (vaddr_t)SIG_IGN)
|
||||||
;
|
;
|
||||||
else if (signal_handler != (vaddr_t)SIG_DFL)
|
else if (signal_handler != (vaddr_t)SIG_DFL)
|
||||||
{
|
{
|
||||||
// call userspace signal handlers
|
// call userspace signal handlers
|
||||||
interrupt_stack.rsp -= 128; // skip possible red-zone
|
interrupt_stack.sp -= 128; // skip possible red-zone
|
||||||
write_to_stack(interrupt_stack.rsp, interrupt_stack.rip);
|
write_to_stack(interrupt_stack.sp, interrupt_stack.ip);
|
||||||
write_to_stack(interrupt_stack.rsp, signal);
|
write_to_stack(interrupt_stack.sp, signal);
|
||||||
write_to_stack(interrupt_stack.rsp, signal_handler);
|
write_to_stack(interrupt_stack.sp, signal_handler);
|
||||||
interrupt_stack.rip = (uintptr_t)signal_trampoline;
|
interrupt_stack.ip = (uintptr_t)signal_trampoline;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -373,22 +435,29 @@ namespace Kernel
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
void Thread::validate_stack() const
|
void Thread::on_exit_trampoline(Thread* thread)
|
||||||
{
|
{
|
||||||
if (stack_base() <= m_rsp && m_rsp <= stack_base() + stack_size())
|
thread->on_exit();
|
||||||
return;
|
|
||||||
if (interrupt_stack_base() <= m_rsp && m_rsp <= interrupt_stack_base() + interrupt_stack_size())
|
|
||||||
return;
|
|
||||||
Kernel::panic("rsp {8H}, stack {8H}->{8H}, interrupt_stack {8H}->{8H}", m_rsp,
|
|
||||||
stack_base(), stack_base() + stack_size(),
|
|
||||||
interrupt_stack_base(), interrupt_stack_base() + interrupt_stack_size()
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Thread::on_exit()
|
void Thread::on_exit()
|
||||||
{
|
{
|
||||||
ASSERT(this == &Thread::current());
|
ASSERT(this == &Thread::current());
|
||||||
Scheduler::get().terminate_thread(this);
|
if (!m_delete_process && has_process())
|
||||||
|
{
|
||||||
|
if (process().on_thread_exit(*this))
|
||||||
|
{
|
||||||
|
Processor::set_interrupt_state(InterruptState::Disabled);
|
||||||
|
setup_process_cleanup();
|
||||||
|
Scheduler::get().yield();
|
||||||
|
}
|
||||||
|
else
|
||||||
|
Scheduler::get().terminate_thread(this);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
Scheduler::get().terminate_thread(this);
|
||||||
|
}
|
||||||
ASSERT_NOT_REACHED();
|
ASSERT_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,33 +0,0 @@
|
||||||
.section .text
|
|
||||||
|
|
||||||
.global _start
|
|
||||||
_start:
|
|
||||||
# zero out stack frame
|
|
||||||
pushl $0
|
|
||||||
pushl $0
|
|
||||||
movl %esp, %ebp
|
|
||||||
|
|
||||||
# FIXME: handle stack alignment
|
|
||||||
ud2
|
|
||||||
|
|
||||||
# push argc, argv, environ for call to main
|
|
||||||
pushl %edx
|
|
||||||
pushl %esi
|
|
||||||
pushl %edi
|
|
||||||
|
|
||||||
# initialize libc
|
|
||||||
pushl %edx
|
|
||||||
call _init_libc
|
|
||||||
addl $4, %esp
|
|
||||||
|
|
||||||
# call global constructos
|
|
||||||
call _init
|
|
||||||
|
|
||||||
# call main, arguments are already on stack
|
|
||||||
call main
|
|
||||||
|
|
||||||
# cleanly exit the process
|
|
||||||
pushl %eax
|
|
||||||
call exit
|
|
||||||
|
|
||||||
.size _start, . - _start
|
|
|
@ -0,0 +1,34 @@
|
||||||
|
.section .text
|
||||||
|
|
||||||
|
.global _start
|
||||||
|
_start:
|
||||||
|
pushl $0
|
||||||
|
pushl %edi
|
||||||
|
pushl %esi
|
||||||
|
pushl %edx
|
||||||
|
|
||||||
|
# STACK LAYOUT
|
||||||
|
# null
|
||||||
|
# argc
|
||||||
|
# argv
|
||||||
|
# envp
|
||||||
|
|
||||||
|
xorl %ebp, %ebp
|
||||||
|
|
||||||
|
# init libc (envp already as argument)
|
||||||
|
call _init_libc
|
||||||
|
|
||||||
|
# call global constructors
|
||||||
|
call _init
|
||||||
|
|
||||||
|
# call main
|
||||||
|
movl 0(%esp), %eax
|
||||||
|
xchgl %eax, 8(%esp)
|
||||||
|
movl %eax, (%esp)
|
||||||
|
call main
|
||||||
|
|
||||||
|
subl $12, %esp
|
||||||
|
pushl %eax
|
||||||
|
call exit
|
||||||
|
|
||||||
|
.size _start, . - _start
|
|
@ -1,4 +1,4 @@
|
||||||
/* i386 crti.s */
|
/* i686 crti.s */
|
||||||
.section .init
|
.section .init
|
||||||
.global _init
|
.global _init
|
||||||
.type _init, @function
|
.type _init, @function
|
|
@ -1,4 +1,4 @@
|
||||||
/* i386 crtn.s */
|
/* i686 crtn.s */
|
||||||
.section .init
|
.section .init
|
||||||
/* gcc will nicely put the contents of crtend.o's .init section here. */
|
/* gcc will nicely put the contents of crtend.o's .init section here. */
|
||||||
popl %ebp
|
popl %ebp
|
|
@ -2,34 +2,34 @@
|
||||||
|
|
||||||
.global _start
|
.global _start
|
||||||
_start:
|
_start:
|
||||||
# Set up end of the stack frame linked list.
|
pushq $0
|
||||||
movq $0, %rbp
|
|
||||||
pushq %rbp # rip=0
|
|
||||||
pushq %rbp # rbp=0
|
|
||||||
movq %rsp, %rbp
|
|
||||||
|
|
||||||
# Save argc, argv, environ
|
|
||||||
pushq %rdx
|
|
||||||
pushq %rsi
|
|
||||||
pushq %rdi
|
pushq %rdi
|
||||||
|
pushq %rsi
|
||||||
|
pushq %rdx
|
||||||
|
|
||||||
# Prepare malloc, environment
|
# STACK LAYOUT
|
||||||
movq %rdx, %rdi
|
# null
|
||||||
|
# argc
|
||||||
|
# argv
|
||||||
|
# envp
|
||||||
|
|
||||||
|
xorq %rbp, %rbp
|
||||||
|
|
||||||
|
# init libc
|
||||||
|
movq 0(%rsp), %rdi
|
||||||
call _init_libc
|
call _init_libc
|
||||||
|
|
||||||
# Call global constructos
|
# call global constructors
|
||||||
call _init
|
call _init
|
||||||
|
|
||||||
# Restore argc, argv, environ
|
# call main
|
||||||
popq %rdi
|
movq 16(%rsp), %rdi
|
||||||
popq %rsi
|
movq 8(%rsp), %rsi
|
||||||
popq %rdx
|
movq 0(%rsp), %rdx
|
||||||
|
|
||||||
# Run main
|
|
||||||
call main
|
call main
|
||||||
|
|
||||||
# Cleanly exit the process
|
# call exit
|
||||||
movl %eax, %edi
|
movq %rax, %rdi
|
||||||
call exit
|
call exit
|
||||||
|
|
||||||
.size _start, . - _start
|
.size _start, . - _start
|
||||||
|
|
|
@ -1,37 +1,41 @@
|
||||||
#include <icxxabi.h>
|
#include <BAN/Assert.h>
|
||||||
|
#include <stdint.h>
|
||||||
|
#include <stddef.h>
|
||||||
|
|
||||||
#define ATEXIT_MAX_FUNCS 128
|
#define ATEXIT_MAX_FUNCS 128
|
||||||
|
|
||||||
struct atexit_func_entry_t
|
struct atexit_func_entry_t
|
||||||
{
|
{
|
||||||
void (*destructor)(void*);
|
void(*func)(void*);
|
||||||
void* data;
|
void* arg;
|
||||||
void* dso_handle;
|
void* dso_handle;
|
||||||
};
|
};
|
||||||
|
|
||||||
static atexit_func_entry_t __atexit_funcs[ATEXIT_MAX_FUNCS];
|
static atexit_func_entry_t __atexit_funcs[ATEXIT_MAX_FUNCS];
|
||||||
static int __atexit_func_count = 0;
|
static size_t __atexit_func_count = 0;
|
||||||
|
|
||||||
int __cxa_atexit(void (*func)(void*), void* data, void* dso_handle)
|
extern "C" int __cxa_atexit(void(*func)(void*), void* arg, void* dso_handle)
|
||||||
{
|
{
|
||||||
if (__atexit_func_count >= ATEXIT_MAX_FUNCS)
|
if (__atexit_func_count >= ATEXIT_MAX_FUNCS)
|
||||||
return -1;;
|
return -1;
|
||||||
__atexit_funcs[__atexit_func_count].destructor = func;
|
auto& atexit_func = __atexit_funcs[__atexit_func_count++];
|
||||||
__atexit_funcs[__atexit_func_count].data = data;
|
atexit_func.func = func;
|
||||||
__atexit_funcs[__atexit_func_count].dso_handle = dso_handle;
|
atexit_func.arg = arg;
|
||||||
__atexit_func_count++;
|
atexit_func.dso_handle = dso_handle;
|
||||||
return 0;
|
return 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
void __cxa_finalize(void* func)
|
extern "C" void __cxa_finalize(void* f)
|
||||||
{
|
{
|
||||||
for (int i = __atexit_func_count - 1; i >= 0; i--)
|
for (size_t i = __atexit_func_count; i > 0; i--)
|
||||||
{
|
{
|
||||||
if (func && func != __atexit_funcs[i].destructor)
|
auto& atexit_func = __atexit_funcs[i - 1];
|
||||||
|
if (atexit_func.func == nullptr)
|
||||||
continue;
|
continue;
|
||||||
if (__atexit_funcs[i].destructor == nullptr)
|
if (f == nullptr || f == atexit_func.func)
|
||||||
continue;
|
{
|
||||||
__atexit_funcs[i].destructor(__atexit_funcs[i].data);
|
atexit_func.func(atexit_func.arg);
|
||||||
__atexit_funcs[i].destructor = nullptr;
|
atexit_func.func = nullptr;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
};
|
||||||
|
|
|
@ -2,6 +2,11 @@
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
|
if [[ -z $BANAN_ARCH ]]; then
|
||||||
|
echo "You must set the BANAN_ARCH environment variable" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
if [[ -z $BANAN_DISK_IMAGE_PATH ]]; then
|
if [[ -z $BANAN_DISK_IMAGE_PATH ]]; then
|
||||||
echo "You must set the BANAN_DISK_IMAGE_PATH environment variable" >&2
|
echo "You must set the BANAN_DISK_IMAGE_PATH environment variable" >&2
|
||||||
exit 1
|
exit 1
|
||||||
|
@ -26,7 +31,7 @@ ROOT_PARTITION_INDEX=2
|
||||||
ROOT_PARTITION_INFO=$(fdisk -x $BANAN_DISK_IMAGE_PATH | grep "^$BANAN_DISK_IMAGE_PATH" | head -$ROOT_PARTITION_INDEX | tail -1)
|
ROOT_PARTITION_INFO=$(fdisk -x $BANAN_DISK_IMAGE_PATH | grep "^$BANAN_DISK_IMAGE_PATH" | head -$ROOT_PARTITION_INDEX | tail -1)
|
||||||
ROOT_PARTITION_GUID=$(echo $ROOT_PARTITION_INFO | cut -d' ' -f6)
|
ROOT_PARTITION_GUID=$(echo $ROOT_PARTITION_INFO | cut -d' ' -f6)
|
||||||
|
|
||||||
INSTALLER_BUILD_DIR=$BANAN_ROOT_DIR/bootloader/installer/build
|
INSTALLER_BUILD_DIR=$BANAN_ROOT_DIR/bootloader/installer/build/$BANAN_ARCH
|
||||||
BOOTLOADER_ELF=$BANAN_BUILD_DIR/bootloader/bios/bootloader
|
BOOTLOADER_ELF=$BANAN_BUILD_DIR/bootloader/bios/bootloader
|
||||||
|
|
||||||
if ! [ -f $BOOTLOADER_ELF ]; then
|
if ! [ -f $BOOTLOADER_ELF ]; then
|
||||||
|
@ -37,11 +42,11 @@ fi
|
||||||
if ! [ -d $INSTALLER_BUILD_DIR ]; then
|
if ! [ -d $INSTALLER_BUILD_DIR ]; then
|
||||||
mkdir -p $INSTALLER_BUILD_DIR
|
mkdir -p $INSTALLER_BUILD_DIR
|
||||||
cd $INSTALLER_BUILD_DIR
|
cd $INSTALLER_BUILD_DIR
|
||||||
$CMAKE_COMMAND ..
|
$CMAKE_COMMAND ../..
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cd $INSTALLER_BUILD_DIR
|
cd $INSTALLER_BUILD_DIR
|
||||||
make
|
make
|
||||||
|
|
||||||
echo installing bootloader
|
echo installing bootloader
|
||||||
$INSTALLER_BUILD_DIR/x86_64-banan_os-bootloader-installer $BOOTLOADER_ELF $BANAN_DISK_IMAGE_PATH $ROOT_PARTITION_GUID
|
$INSTALLER_BUILD_DIR/banan_os-bootloader-installer $BOOTLOADER_ELF $BANAN_DISK_IMAGE_PATH $ROOT_PARTITION_GUID
|
||||||
|
|
|
@ -19,7 +19,12 @@ else
|
||||||
DISK_ARGS="-device ahci,id=ahci -device ide-hd,drive=disk,bus=ahci.0"
|
DISK_ARGS="-device ahci,id=ahci -device ide-hd,drive=disk,bus=ahci.0"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
qemu-system-$BANAN_ARCH \
|
QEMU_ARCH=$BANAN_ARCH
|
||||||
|
if [ $BANAN_ARCH = "i686" ]; then
|
||||||
|
QEMU_ARCH=i386
|
||||||
|
fi
|
||||||
|
|
||||||
|
qemu-system-$QEMU_ARCH \
|
||||||
-m 1G \
|
-m 1G \
|
||||||
-smp 4 \
|
-smp 4 \
|
||||||
$BIOS_ARGS \
|
$BIOS_ARGS \
|
||||||
|
|
|
@ -260,7 +260,7 @@ i64 puzzle2(FILE* fp)
|
||||||
clock_gettime(CLOCK_MONOTONIC, &time_stop);
|
clock_gettime(CLOCK_MONOTONIC, &time_stop);
|
||||||
|
|
||||||
u64 duration_us = (time_stop.tv_sec * 1'000'000 + time_stop.tv_nsec / 1'000) - (time_start.tv_sec * 1'000'000 + time_start.tv_nsec / 1'000);
|
u64 duration_us = (time_stop.tv_sec * 1'000'000 + time_stop.tv_nsec / 1'000) - (time_start.tv_sec * 1'000'000 + time_start.tv_nsec / 1'000);
|
||||||
printf("took %lu.%03lu ms, estimate %lu s\n", duration_us / 1000, duration_us % 1000, (values_sorted[0].size() - xi - 2) * duration_us / 1'000'000);
|
printf("step took %" PRIu64 ".%03" PRIu64 " ms, estimate %" PRIu64 " s\n", duration_us / 1000, duration_us % 1000, (values_sorted[0].size() - xi - 2) * duration_us / 1'000'000);
|
||||||
}
|
}
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
|
|
|
@ -87,7 +87,7 @@ BAN::ErrorOr<BAN::UniqPtr<Image>> load_netbpm(const void* mmap_addr, size_t size
|
||||||
return BAN::Error::from_errno(EINVAL);
|
return BAN::Error::from_errno(EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
printf("Netbpm image %" PRIuPTR "x%" PRIuPTR "\n", *width, *height);
|
printf("Netbpm image %" PRIu64 "x%" PRIu64 "\n", *width, *height);
|
||||||
|
|
||||||
BAN::Vector<Image::Color> bitmap;
|
BAN::Vector<Image::Color> bitmap;
|
||||||
TRY(bitmap.resize(*width * *height));
|
TRY(bitmap.resize(*width * *height));
|
||||||
|
|
|
@ -152,7 +152,7 @@ int test2_job2()
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
*(size_t*)addr = 0x123456789;
|
*(size_t*)addr = 0x12345678;
|
||||||
|
|
||||||
if (msync(addr, sizeof(size_t), MS_SYNC) == -1)
|
if (msync(addr, sizeof(size_t), MS_SYNC) == -1)
|
||||||
{
|
{
|
||||||
|
|
Loading…
Reference in New Issue