Compare commits
41 Commits
d73a270fb1
...
543bb3cc4b
Author | SHA1 | Date |
---|---|---|
|
543bb3cc4b | |
|
1c44d24b76 | |
|
5305f962c0 | |
|
b774f147da | |
|
2a5921b9c9 | |
|
60cb392e97 | |
|
d012c538c3 | |
|
1c88d0d7f7 | |
|
773b8de8ba | |
|
5d8dd090a9 | |
|
2cbfe70a28 | |
|
16dbfbb267 | |
|
99fc7817c6 | |
|
265b4c2b22 | |
|
db9db2cc40 | |
|
3642eabac0 | |
|
98e05fd179 | |
|
1ccff9478f | |
|
85f9b585f5 | |
|
01626b4c9f | |
|
a85841ca76 | |
|
fe6c4cd0b5 | |
|
f1585d09e1 | |
|
252784ff5b | |
|
6f9dc2a9b8 | |
|
7edfae8583 | |
|
28275d86ea | |
|
2d19b5074e | |
|
e62cc17797 | |
|
407eed212a | |
|
12d47858c1 | |
|
3d34e6e6d9 | |
|
dc188e601a | |
|
8a2df23260 | |
|
46079a8612 | |
|
201d752850 | |
|
63b3d9875d | |
|
8cd2d772b0 | |
|
2ff3f88b4d | |
|
ab4dd6a268 | |
|
0094efc7f4 |
|
@ -1,5 +1,7 @@
|
|||
#pragma once
|
||||
|
||||
#include <BAN/Traits.h>
|
||||
|
||||
namespace BAN
|
||||
{
|
||||
|
||||
|
@ -13,8 +15,36 @@ namespace BAN
|
|||
memory_order_seq_cst = __ATOMIC_SEQ_CST,
|
||||
};
|
||||
|
||||
template<typename T, MemoryOrder MEM_ORDER = MemoryOrder::memory_order_seq_cst>
|
||||
requires requires { __atomic_always_lock_free(sizeof(T), 0); }
|
||||
template<typename T> concept atomic_c = is_integral_v<T> || is_pointer_v<T>;
|
||||
template<typename T> concept atomic_lockfree_c = (is_integral_v<T> || is_pointer_v<T>) && __atomic_always_lock_free(sizeof(T), 0);
|
||||
|
||||
template<atomic_lockfree_c T, atomic_c U>
|
||||
inline void atomic_store(T& obj, U value, MemoryOrder mem_order = MemoryOrder::memory_order_seq_cst) { __atomic_store_n(&obj, value, mem_order); }
|
||||
template<atomic_lockfree_c T>
|
||||
inline T atomic_load(T& obj, MemoryOrder mem_order = MemoryOrder::memory_order_seq_cst) { return __atomic_load_n(&obj, mem_order); }
|
||||
|
||||
template<atomic_lockfree_c T, atomic_c U>
|
||||
inline T atomic_exchange(T& obj, U value, MemoryOrder mem_order = MemoryOrder::memory_order_seq_cst) { return __atomic_exchange_n(&obj, value, mem_order); }
|
||||
template<atomic_lockfree_c T, atomic_lockfree_c U, atomic_c V>
|
||||
inline bool atomic_compare_exchange(T& obj, U& expected, V value, MemoryOrder mem_order = MemoryOrder::memory_order_seq_cst) { return __atomic_compare_exchange_n(&obj, &expected, value, false, mem_order, mem_order); }
|
||||
|
||||
#define DECL_ATOMIC_INLINE template<atomic_lockfree_c T, atomic_c U> inline
|
||||
DECL_ATOMIC_INLINE T atomic_add_fetch (T& obj, U value, MemoryOrder mem_order = MemoryOrder::memory_order_seq_cst) { return __atomic_add_fetch (&obj, value, mem_order); }
|
||||
DECL_ATOMIC_INLINE T atomic_sub_fetch (T& obj, U value, MemoryOrder mem_order = MemoryOrder::memory_order_seq_cst) { return __atomic_sub_fetch (&obj, value, mem_order); }
|
||||
DECL_ATOMIC_INLINE T atomic_and_fetch (T& obj, U value, MemoryOrder mem_order = MemoryOrder::memory_order_seq_cst) { return __atomic_and_fetch (&obj, value, mem_order); }
|
||||
DECL_ATOMIC_INLINE T atomic_xor_fetch (T& obj, U value, MemoryOrder mem_order = MemoryOrder::memory_order_seq_cst) { return __atomic_xor_fetch (&obj, value, mem_order); }
|
||||
DECL_ATOMIC_INLINE T atomic_or_fetch (T& obj, U value, MemoryOrder mem_order = MemoryOrder::memory_order_seq_cst) { return __atomic_or_fetch (&obj, value, mem_order); }
|
||||
DECL_ATOMIC_INLINE T atomic_nand_fetch(T& obj, U value, MemoryOrder mem_order = MemoryOrder::memory_order_seq_cst) { return __atomic_nand_fetch(&obj, value, mem_order); }
|
||||
|
||||
DECL_ATOMIC_INLINE T atomic_fetch_add (T& obj, U value, MemoryOrder mem_order = MemoryOrder::memory_order_seq_cst) { return __atomic_fetch_add (&obj, value, mem_order); }
|
||||
DECL_ATOMIC_INLINE T atomic_fetch_sub (T& obj, U value, MemoryOrder mem_order = MemoryOrder::memory_order_seq_cst) { return __atomic_fetch_sub (&obj, value, mem_order); }
|
||||
DECL_ATOMIC_INLINE T atomic_fetch_and (T& obj, U value, MemoryOrder mem_order = MemoryOrder::memory_order_seq_cst) { return __atomic_fetch_and (&obj, value, mem_order); }
|
||||
DECL_ATOMIC_INLINE T atomic_fetch_xor (T& obj, U value, MemoryOrder mem_order = MemoryOrder::memory_order_seq_cst) { return __atomic_fetch_xor (&obj, value, mem_order); }
|
||||
DECL_ATOMIC_INLINE T atomic_fetch_or (T& obj, U value, MemoryOrder mem_order = MemoryOrder::memory_order_seq_cst) { return __atomic_fetch_or (&obj, value, mem_order); }
|
||||
DECL_ATOMIC_INLINE T atomic_fetch_nand(T& obj, U value, MemoryOrder mem_order = MemoryOrder::memory_order_seq_cst) { return __atomic_fetch_nand(&obj, value, mem_order); }
|
||||
#undef DECL_ATOMIC_INLINE
|
||||
|
||||
template<atomic_lockfree_c T, MemoryOrder MEM_ORDER = MemoryOrder::memory_order_seq_cst>
|
||||
class Atomic
|
||||
{
|
||||
Atomic(const Atomic&) = delete;
|
||||
|
@ -26,41 +56,41 @@ namespace BAN
|
|||
constexpr Atomic() : m_value(0) {}
|
||||
constexpr Atomic(T val) : m_value(val) {}
|
||||
|
||||
inline T load(MemoryOrder mem_order = MEM_ORDER) const volatile { return __atomic_load_n(&m_value, mem_order); }
|
||||
inline void store(T val, MemoryOrder mem_order = MEM_ORDER) volatile { __atomic_store_n(&m_value, val, mem_order); }
|
||||
inline T load(MemoryOrder mem_order = MEM_ORDER) const volatile { return atomic_load(m_value, mem_order); }
|
||||
inline void store(T val, MemoryOrder mem_order = MEM_ORDER) volatile { atomic_store(m_value, val, mem_order); }
|
||||
|
||||
inline T operator=(T val) volatile { store(val); return val; }
|
||||
|
||||
inline operator T() const volatile { return load(); }
|
||||
|
||||
inline T operator+=(T val) volatile { return __atomic_add_fetch(&m_value, val, MEM_ORDER); }
|
||||
inline T operator-=(T val) volatile { return __atomic_sub_fetch(&m_value, val, MEM_ORDER); }
|
||||
inline T operator&=(T val) volatile { return __atomic_and_fetch(&m_value, val, MEM_ORDER); }
|
||||
inline T operator^=(T val) volatile { return __atomic_xor_fetch(&m_value, val, MEM_ORDER); }
|
||||
inline T operator|=(T val) volatile { return __atomic_or_fetch(&m_value, val, MEM_ORDER); }
|
||||
inline T operator+=(T val) volatile { return atomic_add_fetch(m_value, val, MEM_ORDER); }
|
||||
inline T operator-=(T val) volatile { return atomic_sub_fetch(m_value, val, MEM_ORDER); }
|
||||
inline T operator&=(T val) volatile { return atomic_and_fetch(m_value, val, MEM_ORDER); }
|
||||
inline T operator^=(T val) volatile { return atomic_xor_fetch(m_value, val, MEM_ORDER); }
|
||||
inline T operator|=(T val) volatile { return atomic_or_fetch(m_value, val, MEM_ORDER); }
|
||||
|
||||
inline T operator--() volatile { return __atomic_sub_fetch(&m_value, 1, MEM_ORDER); }
|
||||
inline T operator++() volatile { return __atomic_add_fetch(&m_value, 1, MEM_ORDER); }
|
||||
inline T operator--() volatile { return atomic_sub_fetch(m_value, 1, MEM_ORDER); }
|
||||
inline T operator++() volatile { return atomic_add_fetch(m_value, 1, MEM_ORDER); }
|
||||
|
||||
inline T operator--(int) volatile { return __atomic_fetch_sub(&m_value, 1, MEM_ORDER); }
|
||||
inline T operator++(int) volatile { return __atomic_fetch_add(&m_value, 1, MEM_ORDER); }
|
||||
inline T operator--(int) volatile { return atomic_fetch_sub(m_value, 1, MEM_ORDER); }
|
||||
inline T operator++(int) volatile { return atomic_fetch_add(m_value, 1, MEM_ORDER); }
|
||||
|
||||
inline bool compare_exchange(T& expected, T desired, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_compare_exchange_n(&m_value, &expected, desired, false, mem_order, mem_order); }
|
||||
inline T exchange(T desired, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_exchange_n(&m_value, desired, mem_order); };
|
||||
inline bool compare_exchange(T& expected, T desired, MemoryOrder mem_order = MEM_ORDER) volatile { return atomic_compare_exchange(m_value, expected, desired, mem_order); }
|
||||
inline T exchange(T desired, MemoryOrder mem_order = MEM_ORDER) volatile { return atomic_exchange(m_value, desired, mem_order); };
|
||||
|
||||
inline T add_fetch (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_add_fetch (&m_value, val, mem_order); }
|
||||
inline T sub_fetch (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_sub_fetch (&m_value, val, mem_order); }
|
||||
inline T and_fetch (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_and_fetch (&m_value, val, mem_order); }
|
||||
inline T xor_fetch (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_xor_fetch (&m_value, val, mem_order); }
|
||||
inline T or_fetch (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_or_fetch (&m_value, val, mem_order); }
|
||||
inline T nand_fetch(T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_nand_fetch(&m_value, val, mem_order); }
|
||||
inline T add_fetch (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return atomic_add_fetch (m_value, val, mem_order); }
|
||||
inline T sub_fetch (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return atomic_sub_fetch (m_value, val, mem_order); }
|
||||
inline T and_fetch (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return atomic_and_fetch (m_value, val, mem_order); }
|
||||
inline T xor_fetch (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return atomic_xor_fetch (m_value, val, mem_order); }
|
||||
inline T or_fetch (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return atomic_or_fetch (m_value, val, mem_order); }
|
||||
inline T nand_fetch(T val, MemoryOrder mem_order = MEM_ORDER) volatile { return atomic_nand_fetch(m_value, val, mem_order); }
|
||||
|
||||
inline T fetch_add (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_fetch_add (&m_value, val, mem_order); }
|
||||
inline T fetch_sub (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_fetch_sub (&m_value, val, mem_order); }
|
||||
inline T fetch_and (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_fetch_and (&m_value, val, mem_order); }
|
||||
inline T fetch_xor (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_fetch_xor (&m_value, val, mem_order); }
|
||||
inline T fetch_or (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_fetch__or (&m_value, val, mem_order); }
|
||||
inline T fetch_nand(T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_nfetch_and(&m_value, val, mem_order); }
|
||||
inline T fetch_add (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return atomic_fetch_add (m_value, val, mem_order); }
|
||||
inline T fetch_sub (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return atomic_fetch_sub (m_value, val, mem_order); }
|
||||
inline T fetch_and (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return atomic_fetch_and (m_value, val, mem_order); }
|
||||
inline T fetch_xor (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return atomic_fetch_xor (m_value, val, mem_order); }
|
||||
inline T fetch_or (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return atomic_fetch_or (m_value, val, mem_order); }
|
||||
inline T fetch_nand(T val, MemoryOrder mem_order = MEM_ORDER) volatile { return atomic_fetch_nand(m_value, val, mem_order); }
|
||||
|
||||
private:
|
||||
T m_value;
|
||||
|
|
|
@ -16,9 +16,8 @@ signal_trampoline:
|
|||
movl 36(%esp), %eax
|
||||
|
||||
// align stack to 16 bytes
|
||||
movl %esp, %ebx
|
||||
andl $0x0F, %ebx
|
||||
subl %ebx, %esp
|
||||
movl %esp, %ebp
|
||||
andl $-16, %esp
|
||||
|
||||
subl $12, %esp
|
||||
pushl %edi
|
||||
|
@ -26,7 +25,7 @@ signal_trampoline:
|
|||
addl $16, %esp
|
||||
|
||||
// restore stack
|
||||
addl %ebx, %esp
|
||||
movl %ebp, %esp
|
||||
popa
|
||||
|
||||
leave
|
||||
|
|
|
@ -27,14 +27,13 @@ signal_trampoline:
|
|||
movq 120(%rsp), %rax
|
||||
|
||||
// align stack to 16 bytes
|
||||
movq %rsp, %rbx
|
||||
andq $0x0F, %rbx
|
||||
subq %rbx, %rsp
|
||||
movq %rsp, %rbp
|
||||
andq $-16, %rsp
|
||||
|
||||
call *%rax
|
||||
|
||||
// restore stack
|
||||
addq %rbx, %rsp
|
||||
movq %rbp, %rsp
|
||||
popq %r15
|
||||
popq %r14
|
||||
popq %r13
|
||||
|
|
|
@ -72,8 +72,9 @@ namespace Kernel
|
|||
BAN::ErrorOr<long> sys_nanosleep(const timespec* rqtp, timespec* rmtp);
|
||||
BAN::ErrorOr<long> sys_setitimer(int which, const itimerval* value, itimerval* ovalue);
|
||||
|
||||
BAN::ErrorOr<long> sys_setpwd(const char* path);
|
||||
BAN::ErrorOr<long> sys_getpwd(char* buffer, size_t size);
|
||||
BAN::ErrorOr<long> sys_getcwd(char* buffer, size_t size);
|
||||
BAN::ErrorOr<long> sys_chdir(const char* path);
|
||||
BAN::ErrorOr<long> sys_fchdir(int fildes);
|
||||
|
||||
BAN::ErrorOr<long> sys_setuid(uid_t);
|
||||
BAN::ErrorOr<long> sys_setgid(gid_t);
|
||||
|
|
|
@ -78,16 +78,13 @@ namespace Kernel
|
|||
{
|
||||
LockGuard _(m_mutex);
|
||||
|
||||
if (buffer.size() > m_buffer.size())
|
||||
buffer = buffer.slice(0, m_buffer.size());
|
||||
|
||||
while (m_buffer.size() - m_buffer_size < buffer.size())
|
||||
while (m_buffer_size >= m_buffer.size())
|
||||
{
|
||||
LockFreeGuard lock_free(m_mutex);
|
||||
TRY(Thread::current().block_or_eintr_or_timeout_ms(m_thread_blocker, 100, false));
|
||||
}
|
||||
|
||||
const size_t to_copy = buffer.size();
|
||||
const size_t to_copy = BAN::Math::min(buffer.size(), m_buffer.size() - m_buffer_size);
|
||||
const size_t buffer_head = (m_buffer_tail + m_buffer_size) % m_buffer.size();
|
||||
|
||||
if (buffer_head + to_copy <= m_buffer.size())
|
||||
|
|
|
@ -531,9 +531,9 @@ namespace Kernel
|
|||
break;
|
||||
if ((header.flags & (FIN | ACK)) == (FIN | ACK))
|
||||
m_next_state = State::TimeWait;
|
||||
if (header.flags & FIN)
|
||||
else if (header.flags & FIN)
|
||||
m_next_state = State::Closing;
|
||||
if (header.flags & ACK)
|
||||
else if (header.flags & ACK)
|
||||
m_state = State::FinWait2;
|
||||
else
|
||||
m_next_flags = ACK;
|
||||
|
|
|
@ -567,6 +567,11 @@ namespace Kernel
|
|||
|
||||
auto working_directory = TRY(m_working_directory.clone());
|
||||
|
||||
BAN::Vector<BAN::String> cmdline;
|
||||
TRY(cmdline.resize(m_cmdline.size()));
|
||||
for (size_t i = 0; i < m_cmdline.size(); i++)
|
||||
TRY(cmdline[i].append(m_cmdline[i]));
|
||||
|
||||
auto open_file_descriptors = TRY(BAN::UniqPtr<OpenFileDescriptorSet>::create(m_credentials));
|
||||
TRY(open_file_descriptors->clone_from(m_open_file_descriptors));
|
||||
|
||||
|
@ -578,6 +583,7 @@ namespace Kernel
|
|||
Process* forked = create_process(m_credentials, m_pid, m_sid, m_pgrp);
|
||||
forked->m_controlling_terminal = m_controlling_terminal;
|
||||
forked->m_working_directory = BAN::move(working_directory);
|
||||
forked->m_cmdline = BAN::move(m_cmdline);
|
||||
forked->m_page_table = BAN::move(page_table);
|
||||
forked->m_open_file_descriptors = BAN::move(*open_file_descriptors);
|
||||
forked->m_mapped_regions = BAN::move(mapped_regions);
|
||||
|
@ -1648,17 +1654,7 @@ namespace Kernel
|
|||
return TRY(m_open_file_descriptors.read_dir_entries(fd, list, list_len));
|
||||
}
|
||||
|
||||
BAN::ErrorOr<long> Process::sys_setpwd(const char* path)
|
||||
{
|
||||
LockGuard _(m_process_lock);
|
||||
|
||||
auto file = TRY(find_file(AT_FDCWD, path, O_SEARCH));
|
||||
m_working_directory = BAN::move(file);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
BAN::ErrorOr<long> Process::sys_getpwd(char* buffer, size_t size)
|
||||
BAN::ErrorOr<long> Process::sys_getcwd(char* buffer, size_t size)
|
||||
{
|
||||
LockGuard _(m_process_lock);
|
||||
|
||||
|
@ -1673,6 +1669,28 @@ namespace Kernel
|
|||
return (long)buffer;
|
||||
}
|
||||
|
||||
BAN::ErrorOr<long> Process::sys_chdir(const char* path)
|
||||
{
|
||||
LockGuard _(m_process_lock);
|
||||
|
||||
TRY(validate_string_access(path));
|
||||
|
||||
auto file = TRY(find_file(AT_FDCWD, path, O_SEARCH));
|
||||
m_working_directory = BAN::move(file);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
BAN::ErrorOr<long> Process::sys_fchdir(int fildes)
|
||||
{
|
||||
LockGuard _(m_process_lock);
|
||||
|
||||
auto file = TRY(m_open_file_descriptors.file_of(fildes));
|
||||
m_working_directory = BAN::move(file);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
BAN::ErrorOr<long> Process::sys_mmap(const sys_mmap_t* args)
|
||||
{
|
||||
{
|
||||
|
|
|
@ -593,7 +593,7 @@ namespace Kernel
|
|||
{
|
||||
if (is_interrupted_by_signal())
|
||||
return BAN::Error::from_errno(EINTR);
|
||||
thread_blocker.block_with_timeout_ns(wake_time_ns);
|
||||
thread_blocker.block_with_wake_time_ns(wake_time_ns);
|
||||
if (is_interrupted_by_signal())
|
||||
return BAN::Error::from_errno(EINTR);
|
||||
if (etimedout && SystemTimer::get().ms_since_boot() >= wake_time_ns)
|
||||
|
|
|
@ -9,6 +9,7 @@ CONFIGURE_OPTIONS=(
|
|||
'--enable-pc-files'
|
||||
'--enable-sigwinch'
|
||||
'--disable-widec'
|
||||
'--with-shared'
|
||||
'--without-ada'
|
||||
'--without-manpages'
|
||||
'--without-dlsym'
|
||||
|
|
|
@ -1,116 +1,6 @@
|
|||
diff -ruN ncurses-6.5/config.log ncurses-6.5-banan_os/config.log
|
||||
--- ncurses-6.5/config.log 1970-01-01 02:00:00.000000000 +0200
|
||||
+++ ncurses-6.5-banan_os/config.log 2024-08-05 12:15:48.028466613 +0300
|
||||
@@ -0,0 +1,106 @@
|
||||
+This file contains any messages produced by compilers while
|
||||
+running configure, to aid debugging if configure makes a mistake.
|
||||
+
|
||||
+It was created by configure, which was
|
||||
+generated by GNU Autoconf 2.52.20231210. Invocation command line was
|
||||
+
|
||||
+ $ ./configure --host=x86_64-pc-banan_os --prefix=/usr/local --disable-db-intall --disable-widec --without-ada --without-manpages --without-dlsym --without-cxx-binding
|
||||
+
|
||||
+## ---------- ##
|
||||
+## Platform. ##
|
||||
+## ---------- ##
|
||||
+
|
||||
+hostname = arch
|
||||
+uname -m = x86_64
|
||||
+uname -r = 6.10.2-arch1-2
|
||||
+uname -s = Linux
|
||||
+uname -v = #1 SMP PREEMPT_DYNAMIC Sat, 03 Aug 2024 17:56:17 +0000
|
||||
+
|
||||
+/usr/bin/uname -p = unknown
|
||||
+/bin/uname -X = unknown
|
||||
+
|
||||
+/bin/arch = unknown
|
||||
+/usr/bin/arch -k = unknown
|
||||
+/usr/convex/getsysinfo = unknown
|
||||
+hostinfo = unknown
|
||||
+/bin/machine = unknown
|
||||
+/usr/bin/oslevel = unknown
|
||||
+/bin/universe = unknown
|
||||
+
|
||||
+PATH = /home/oskari/dev/banan-os/ports/../toolchain/local/bin:/home/oskari/.wasmer/bin:/home/oskari/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/bin:/usr/lib/jvm/default/bin:/usr/bin/site_perl:/usr/bin/vendor_perl:/usr/bin/core_perl:/usr/lib/rustup/bin
|
||||
+
|
||||
+## ------------ ##
|
||||
+## Core tests. ##
|
||||
+## ------------ ##
|
||||
+
|
||||
+configure:1175: PATH=".;."; conftest.sh
|
||||
+./configure: line 1176: conftest.sh: command not found
|
||||
+configure:1178: $? = 127
|
||||
+configure:1195: checking for ggrep
|
||||
+configure:1221: result: no
|
||||
+configure:1195: checking for grep
|
||||
+configure:1210: found /usr/bin/grep
|
||||
+configure:1218: result: grep
|
||||
+configure:1229: checking for egrep
|
||||
+configure:1285: result: grep -E
|
||||
+configure:1296: result: Configuring NCURSES 6.5 ABI 6 (Mon Aug 5 12:15:47 EEST 2024)
|
||||
+configure:1300: checking for package version
|
||||
+configure:1321: result: 6.5
|
||||
+configure:1324: checking for package patch date
|
||||
+configure:1341: result: 20240427
|
||||
+configure:1352: testing ABI VERSION 5:0:10 ...
|
||||
+configure:1356: testing VERSION_MAJOR 6 ...
|
||||
+configure:1360: testing VERSION_MINOR 5 ...
|
||||
+configure:1364: testing VERSION_PATCH 20240427 ...
|
||||
+configure:1509: checking build system type
|
||||
+configure:1527: result: x86_64-pc-linux-gnu
|
||||
+configure:1534: checking host system type
|
||||
+configure:1543: error: /bin/sh ./config.sub x86_64-pc-banan_os failed
|
||||
+
|
||||
+## ----------------- ##
|
||||
+## Cache variables. ##
|
||||
+## ----------------- ##
|
||||
+
|
||||
+ac_cv_build=x86_64-pc-linux-gnu
|
||||
+ac_cv_build_alias=x86_64-pc-linux-gnu
|
||||
+ac_cv_env_CC_set=
|
||||
+ac_cv_env_CC_value=
|
||||
+ac_cv_env_CFLAGS_set=
|
||||
+ac_cv_env_CFLAGS_value=
|
||||
+ac_cv_env_CPPFLAGS_set=
|
||||
+ac_cv_env_CPPFLAGS_value=
|
||||
+ac_cv_env_CPP_set=
|
||||
+ac_cv_env_CPP_value=
|
||||
+ac_cv_env_CXXCPP_set=
|
||||
+ac_cv_env_CXXCPP_value=
|
||||
+ac_cv_env_CXXFLAGS_set=
|
||||
+ac_cv_env_CXXFLAGS_value=
|
||||
+ac_cv_env_CXX_set=
|
||||
+ac_cv_env_CXX_value=
|
||||
+ac_cv_env_LDFLAGS_set=
|
||||
+ac_cv_env_LDFLAGS_value=
|
||||
+ac_cv_env_build_alias_set=
|
||||
+ac_cv_env_build_alias_value=
|
||||
+ac_cv_env_host_alias_set=set
|
||||
+ac_cv_env_host_alias_value=x86_64-pc-banan_os
|
||||
+ac_cv_env_target_alias_set=
|
||||
+ac_cv_env_target_alias_value=
|
||||
+ac_cv_host=
|
||||
+ac_cv_host_alias=x86_64-pc-banan_os
|
||||
+ac_cv_path_EGREP='grep -E'
|
||||
+ac_cv_prog_GREP=grep
|
||||
+cf_cv_abi_default=6
|
||||
+cf_cv_abi_version=6
|
||||
+cf_cv_rel_version=6.5
|
||||
+cf_cv_timestamp='Mon Aug 5 12:15:47 EEST 2024'
|
||||
+
|
||||
+## ------------ ##
|
||||
+## confdefs.h. ##
|
||||
+## ------------ ##
|
||||
+
|
||||
+#define PACKAGE "ncurses"
|
||||
+#define NCURSES_VERSION "6.5"
|
||||
+#define NCURSES_PATCHDATE 20240427
|
||||
+
|
||||
+
|
||||
+configure: exit 1
|
||||
diff -ruN ncurses-6.5/config.sub ncurses-6.5-banan_os/config.sub
|
||||
--- ncurses-6.5/config.sub 2023-12-27 16:41:27.000000000 +0200
|
||||
+++ ncurses-6.5-banan_os/config.sub 2024-08-05 12:17:03.025286556 +0300
|
||||
+++ ncurses-6.5-banan_os/config.sub 2025-04-19 05:53:50.863635047 +0300
|
||||
@@ -1768,7 +1768,7 @@
|
||||
| onefs* | tirtos* | phoenix* | fuchsia* | redox* | bme* \
|
||||
| midnightbsd* | amdhsa* | unleashed* | emscripten* | wasi* \
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
diff -ruN ncurses-6.5/configure ncurses-6.5-banan_os/configure
|
||||
--- ncurses-6.5/configure 2024-04-10 11:09:48.000000000 +0300
|
||||
+++ ncurses-6.5-banan_os/configure 2025-04-19 05:54:18.828084370 +0300
|
||||
@@ -6646,6 +6646,9 @@
|
||||
MK_SHARED_LIB='${CC} ${LDFLAGS} ${CFLAGS} -G -Wl,-brtl -Wl,-blibpath:${RPATH_LIST}:/usr/lib -o $@'
|
||||
fi
|
||||
;;
|
||||
+ (banan_os*)
|
||||
+ MK_SHARED_LIB='${CC} ${LDFLAGS} ${CFLAGS} -shared -Wl,-soname,`basename $@` -o $@'
|
||||
+ ;;
|
||||
(beos*)
|
||||
MK_SHARED_LIB='${CC} ${LDFLAGS} ${CFLAGS} -o $@ -Xlinker -soname=`basename $@` -nostart -e 0'
|
||||
;;
|
|
@ -6,7 +6,7 @@ DOWNLOAD_URL="https://github.com/openssl/openssl/releases/download/openssl-$VERS
|
|||
DEPENDENCIES=('zlib')
|
||||
|
||||
configure() {
|
||||
./Configure --prefix=/usr --openssldir=/etc/ssl -DOPENSSL_USE_IPV6=0 no-asm no-tests no-threads banan_os-generic zlib
|
||||
./Configure --prefix=/usr --openssldir=/etc/ssl -DOPENSSL_USE_IPV6=0 no-asm no-tests banan_os-generic threads zlib
|
||||
}
|
||||
|
||||
install() {
|
||||
|
|
|
@ -11,6 +11,7 @@ set(LIBC_SOURCES
|
|||
ftw.cpp
|
||||
grp.cpp
|
||||
inttypes.cpp
|
||||
langinfo.cpp
|
||||
libgen.cpp
|
||||
locale.cpp
|
||||
malloc.cpp
|
||||
|
|
|
@ -4,82 +4,41 @@
|
|||
|
||||
__BEGIN_DECLS
|
||||
|
||||
#if !defined(__pthread_attr_t_defined) && (defined(__need_all_types) || defined(__need_pthread_attr_t))
|
||||
#if !defined(__pthread_attr_t_defined) && (defined(__need_all_types) || defined(__need_pthread_attr_t) || defined(__need_pthread_types))
|
||||
#define __pthread_attr_t_defined 1
|
||||
typedef int pthread_attr_t;
|
||||
#endif
|
||||
#undef __need_pthread_attr_t
|
||||
|
||||
#if !defined(__pthread_barrier_t_defined) && (defined(__need_all_types) || defined(__need_pthread_barrier_t))
|
||||
#define __pthread_barrier_t_defined 1
|
||||
typedef int pthread_barrier_t;
|
||||
#endif
|
||||
#undef __need_pthread_barrier_t
|
||||
|
||||
#if !defined(__pthread_barrierattr_t_defined) && (defined(__need_all_types) || defined(__need_pthread_barrierattr_t))
|
||||
#define __pthread_barrierattr_t_defined 1
|
||||
typedef int pthread_barrierattr_t;
|
||||
#endif
|
||||
#undef __need_pthread_barrierattr_t
|
||||
|
||||
#if !defined(__pthread_cond_t_defined) && (defined(__need_all_types) || defined(__need_pthread_cond_t))
|
||||
#define __pthread_cond_t_defined 1
|
||||
typedef int pthread_cond_t;
|
||||
#endif
|
||||
#undef __need_pthread_cond_t
|
||||
|
||||
#if !defined(__pthread_condattr_t_defined) && (defined(__need_all_types) || defined(__need_pthread_condattr_t))
|
||||
#define __pthread_condattr_t_defined 1
|
||||
typedef int pthread_condattr_t;
|
||||
#endif
|
||||
#undef __need_pthread_condattr_t
|
||||
|
||||
#if !defined(__pthread_key_t_defined) && (defined(__need_all_types) || defined(__need_pthread_key_t))
|
||||
#define __pthread_key_t_defined 1
|
||||
typedef int pthread_key_t;
|
||||
#endif
|
||||
#undef __need_pthread_key_t
|
||||
|
||||
#if !defined(__pthread_mutex_t_defined) && (defined(__need_all_types) || defined(__need_pthread_mutex_t))
|
||||
#define __pthread_mutex_t_defined 1
|
||||
typedef int pthread_mutex_t;
|
||||
#endif
|
||||
#undef __need_pthread_mutex_t
|
||||
|
||||
#if !defined(__pthread_mutexattr_t_defined) && (defined(__need_all_types) || defined(__need_pthread_mutexattr_t))
|
||||
#define __pthread_mutexattr_t_defined 1
|
||||
typedef int pthread_mutexattr_t;
|
||||
#endif
|
||||
#undef __need_pthread_mutexattr_t
|
||||
|
||||
#if !defined(__pthread_once_t_defined) && (defined(__need_all_types) || defined(__need_pthread_once_t))
|
||||
#define __pthread_once_t_defined 1
|
||||
typedef int pthread_once_t;
|
||||
#endif
|
||||
#undef __need_pthread_once_t
|
||||
|
||||
#if !defined(__pthread_rwlock_t_defined) && (defined(__need_all_types) || defined(__need_pthread_rwlock_t))
|
||||
#define __pthread_rwlock_t_defined 1
|
||||
typedef int pthread_rwlock_t;
|
||||
#endif
|
||||
#undef __need_pthread_rwlock_t
|
||||
|
||||
#if !defined(__pthread_rwlockattr_t_defined) && (defined(__need_all_types) || defined(__need_pthread_rwlockattr_t))
|
||||
#define __pthread_rwlockattr_t_defined 1
|
||||
typedef int pthread_rwlockattr_t;
|
||||
#endif
|
||||
#undef __need_pthread_rwlockattr_t
|
||||
|
||||
#if !defined(__pthread_t_defined) && (defined(__need_all_types) || defined(__need_pthread_t))
|
||||
#if !defined(__pthread_t_defined) && (defined(__need_all_types) || defined(__need_pthread_t) || defined(__need_pthread_types))
|
||||
#define __pthread_t_defined 1
|
||||
typedef pid_t pthread_t;
|
||||
#endif
|
||||
#undef __need_pthread_t
|
||||
|
||||
#if !defined(__pthread_spinlock_t_defined) && (defined(__need_all_types) || defined(__need_pthread_spinlock_t))
|
||||
#define __pthread_spinlock_t_defined 1
|
||||
typedef pthread_t pthread_spinlock_t;
|
||||
#if !defined(__pthread_types_defined) && (defined(__need_all_types) || defined(__need_pthread_types))
|
||||
#define __pthread_types_defined 1
|
||||
|
||||
typedef int pthread_once_t;
|
||||
|
||||
typedef unsigned pthread_key_t;
|
||||
|
||||
typedef pthread_t pthread_spinlock_t;
|
||||
|
||||
typedef struct { int type; int shared; } pthread_mutexattr_t;
|
||||
typedef struct { pthread_mutexattr_t attr; pthread_t locker; unsigned lock_depth; } pthread_mutex_t;
|
||||
|
||||
typedef struct { int shared; } pthread_barrierattr_t;
|
||||
typedef struct { pthread_barrierattr_t attr; unsigned target; unsigned waiting; } pthread_barrier_t;
|
||||
|
||||
typedef struct { int clock; int shared; } pthread_condattr_t;
|
||||
struct _pthread_cond_block { struct _pthread_cond_block* next; int signaled; };
|
||||
typedef struct { pthread_condattr_t attr; pthread_spinlock_t lock; struct _pthread_cond_block* block_list; } pthread_cond_t;
|
||||
|
||||
typedef struct { int shared; } pthread_rwlockattr_t;
|
||||
typedef struct { pthread_rwlockattr_t attr; unsigned lockers; unsigned writers; } pthread_rwlock_t;
|
||||
|
||||
#endif
|
||||
#undef __need_pthread_spinlock_t
|
||||
#undef __need_pthread_types
|
||||
|
||||
__END_DECLS
|
||||
|
|
|
@ -57,6 +57,7 @@ __BEGIN_DECLS
|
|||
#define _XOPEN_NAME_MAX 255
|
||||
#define _XOPEN_PATH_MAX 1024
|
||||
|
||||
#define ARG_MAX _POSIX_ARG_MAX
|
||||
#define OPEN_MAX 64
|
||||
#define NAME_MAX 255
|
||||
#define PATH_MAX 256
|
||||
|
@ -65,17 +66,31 @@ __BEGIN_DECLS
|
|||
#define HOST_NAME_MAX 255
|
||||
#define TTY_NAME_MAX PATH_MAX
|
||||
|
||||
#define SHRT_MAX __INT16_MAX__
|
||||
#define INT_MAX __INT32_MAX__
|
||||
#define LONG_MAX __INT64_MAX__
|
||||
#define PTHREAD_KEYS_MAX _POSIX_THREAD_KEYS_MAX
|
||||
#define PTHREAD_DESTRUCTOR_ITERATIONS _POSIX_THREAD_DESTRUCTOR_ITERATIONS
|
||||
|
||||
#define CHAR_MAX SCHAR_MAX
|
||||
#define CHAR_MIN SCHAR_MIN
|
||||
|
||||
#define SCHAR_MAX __SCHAR_MAX__
|
||||
#define SHRT_MAX __SHRT_MAX__
|
||||
#define INT_MAX __INT_MAX__
|
||||
#define LONG_MAX __LONG_MAX__
|
||||
#define LLONG_MAX __LONG_LONG_MAX__
|
||||
#define SSIZE_MAX __PTRDIFF_MAX__
|
||||
|
||||
#define SCHAR_MIN (-SCHAR_MAX - 1)
|
||||
#define SHRT_MIN (-SHRT_MAX - 1)
|
||||
#define INT_MIN (-INT_MAX - 1)
|
||||
#define LONG_MIN (-LONG_MAX - 1)
|
||||
#define LLONG_MIN (-LLONG_MAX - 1)
|
||||
#define SSIZE_MIN (-SSIZE_MAX - 1)
|
||||
|
||||
#define USHRT_MAX __UINT16_MAX__
|
||||
#define UINT_MAX __UINT32_MAX__
|
||||
#define ULONG_MAX __UINT64_MAX__
|
||||
#define USCHAR_MAX (SCHAR_MAX * 2 + 1)
|
||||
#define USHRT_MAX (SHRT_MAX * 2 + 1)
|
||||
#define UINT_MAX (INT_MAX * 2U + 1)
|
||||
#define ULONG_MAX (LONG_MAX * 2UL + 1)
|
||||
#define ULLONG_MAX (LLONG_MAX * 2ULL + 1)
|
||||
|
||||
__END_DECLS
|
||||
|
||||
|
|
|
@ -13,19 +13,7 @@ __BEGIN_DECLS
|
|||
|
||||
#define __need_size_t
|
||||
#define __need_clockid_t
|
||||
#define __need_pthread_attr_t
|
||||
#define __need_pthread_barrier_t
|
||||
#define __need_pthread_barrierattr_t
|
||||
#define __need_pthread_cond_t
|
||||
#define __need_pthread_condattr_t
|
||||
#define __need_pthread_key_t
|
||||
#define __need_pthread_mutex_t
|
||||
#define __need_pthread_mutexattr_t
|
||||
#define __need_pthread_once_t
|
||||
#define __need_pthread_rwlock_t
|
||||
#define __need_pthread_rwlockattr_t
|
||||
#define __need_pthread_spinlock_t
|
||||
#define __need_pthread_t
|
||||
#define __need_pthread_types
|
||||
#include <sys/types.h>
|
||||
|
||||
struct uthread
|
||||
|
@ -36,34 +24,41 @@ struct uthread
|
|||
uintptr_t dtv[];
|
||||
};
|
||||
|
||||
#define PTHREAD_BARRIER_SERIAL_THREAD 1
|
||||
#define PTHREAD_CANCEL_ASYNCHRONOUS 2
|
||||
#define PTHREAD_CANCEL_ENABLE 3
|
||||
#define PTHREAD_CANCEL_DEFERRED 4
|
||||
#define PTHREAD_CANCEL_DISABLE 5
|
||||
#define PTHREAD_CANCELED 6
|
||||
#define PTHREAD_CREATE_DETACHED 7
|
||||
#define PTHREAD_CREATE_JOINABLE 8
|
||||
#define PTHREAD_EXPLICIT_SCHED 9
|
||||
#define PTHREAD_INHERIT_SCHED 10
|
||||
#define PTHREAD_MUTEX_DEFAULT 11
|
||||
#define PTHREAD_MUTEX_ERRORCHECK 12
|
||||
#define PTHREAD_MUTEX_NORMAL 13
|
||||
#define PTHREAD_MUTEX_RECURSIVE 14
|
||||
#define PTHREAD_MUTEX_ROBUST 15
|
||||
#define PTHREAD_MUTEX_STALLED 16
|
||||
#define PTHREAD_ONCE_INIT 17
|
||||
#define PTHREAD_PRIO_INHERIT 18
|
||||
#define PTHREAD_PRIO_NONE 19
|
||||
#define PTHREAD_PRIO_PROTECT 20
|
||||
#define PTHREAD_PROCESS_SHARED 21
|
||||
#define PTHREAD_PROCESS_PRIVATE 22
|
||||
#define PTHREAD_SCOPE_PROCESS 23
|
||||
#define PTHREAD_SCOPE_SYSTEM 24
|
||||
|
||||
#define PTHREAD_COND_INITIALIZER (pthread_cond_t)0
|
||||
#define PTHREAD_MUTEX_INITIALIZER (pthread_mutex_t)0
|
||||
#define PTHREAD_RWLOCK_INITIALIZER (pthread_rwlock_t)0
|
||||
#define PTHREAD_CREATE_DETACHED 1
|
||||
#define PTHREAD_CREATE_JOINABLE 0
|
||||
|
||||
#define PTHREAD_BARRIER_SERIAL_THREAD 1
|
||||
|
||||
#define PTHREAD_ONCE_INIT 0
|
||||
|
||||
#define PTHREAD_PROCESS_SHARED 0
|
||||
#define PTHREAD_PROCESS_PRIVATE 1
|
||||
|
||||
#define PTHREAD_MUTEX_ROBUST 0
|
||||
#define PTHREAD_MUTEX_STALLED 1
|
||||
|
||||
#define PTHREAD_MUTEX_DEFAULT 0
|
||||
#define PTHREAD_MUTEX_ERRORCHECK 1
|
||||
#define PTHREAD_MUTEX_NORMAL 2
|
||||
#define PTHREAD_MUTEX_RECURSIVE 3
|
||||
|
||||
#define PTHREAD_SPIN_INITIALIZER (pthread_spinlock_t)0
|
||||
#define PTHREAD_COND_INITIALIZER (pthread_cond_t){ { CLOCK_REALTIME, 0 }, PTHREAD_SPIN_INITIALIZER, NULL }
|
||||
#define PTHREAD_MUTEX_INITIALIZER (pthread_mutex_t){ { PTHREAD_MUTEX_DEFAULT, false }, 0, 0 }
|
||||
#define PTHREAD_RWLOCK_INITIALIZER (pthread_rwlock_t){ { false }, 0, 0 }
|
||||
|
||||
int pthread_atfork(void (*prepare)(void), void (*parent)(void), void(*child)(void));
|
||||
int pthread_attr_destroy(pthread_attr_t* attr);
|
||||
|
|
|
@ -32,6 +32,12 @@ __BEGIN_DECLS
|
|||
#define POSIX_MADV_SEQUENTIAL 4
|
||||
#define POSIX_MADV_WILLNEED 5
|
||||
|
||||
#define MADV_DONTNEED POSIX_MADV_DONTNEED
|
||||
#define MADV_NORMAL POSIX_MADV_NORMAL
|
||||
#define MADV_RANDOM POSIX_MADV_RANDOM
|
||||
#define MADV_SEQUENTIAL POSIX_MADV_SEQUENTIAL
|
||||
#define MADV_WILLNEED POSIX_MADV_WILLNEED
|
||||
|
||||
#define POSIX_TYPED_MEM_ALLOCATE 0x01
|
||||
#define POSIX_TYPED_MEM_ALLOCATE_CONTIG 0x02
|
||||
#define POSIX_TYPED_MEM_MAP_ALLOCATABLE 0x04
|
||||
|
@ -71,6 +77,8 @@ int posix_typed_mem_open(const char* name, int oflag, int tflag);
|
|||
int shm_open(const char* name, int oflag, mode_t mode);
|
||||
int shm_unlink(const char* name);
|
||||
|
||||
#define madvise posix_madvise
|
||||
|
||||
__END_DECLS
|
||||
|
||||
#endif
|
||||
|
|
|
@ -21,6 +21,7 @@ typedef unsigned int rlim_t;
|
|||
#define RLIM_INFINITY ((rlim_t)-1)
|
||||
#define RLIM_SAVED_MAX RLIM_INFINITY
|
||||
#define RLIM_SAVED_CUR RLIM_INFINITY
|
||||
#define RLIM_NLIMITS 7
|
||||
|
||||
#define RUSAGE_SELF 0
|
||||
#define RUSAGE_CHILDREN 1
|
||||
|
|
|
@ -32,8 +32,9 @@ __BEGIN_DECLS
|
|||
O(SYS_GET_GID, getgid) \
|
||||
O(SYS_GET_EUID, geteuid) \
|
||||
O(SYS_GET_EGID, getegid) \
|
||||
O(SYS_GET_PWD, getpwd) \
|
||||
O(SYS_SET_PWD, setpwd) \
|
||||
O(SYS_GETCWD, getcwd) \
|
||||
O(SYS_CHDIR, chdir) \
|
||||
O(SYS_FCHDIR, fchdir) \
|
||||
O(SYS_CLOCK_GETTIME, clock_gettime) \
|
||||
O(SYS_PIPE, pipe) \
|
||||
O(SYS_DUP2, dup2) \
|
||||
|
|
|
@ -22,18 +22,8 @@ __BEGIN_DECLS
|
|||
&& !defined(__need_off_t) \
|
||||
&& !defined(__need_pid_t) \
|
||||
&& !defined(__need_pthread_attr_t) \
|
||||
&& !defined(__need_pthread_barrier_t) \
|
||||
&& !defined(__need_pthread_barrierattr_t) \
|
||||
&& !defined(__need_pthread_cond_t) \
|
||||
&& !defined(__need_pthread_condattr_t) \
|
||||
&& !defined(__need_pthread_key_t) \
|
||||
&& !defined(__need_pthread_mutex_t) \
|
||||
&& !defined(__need_pthread_mutexattr_t) \
|
||||
&& !defined(__need_pthread_once_t) \
|
||||
&& !defined(__need_pthread_rwlock_t) \
|
||||
&& !defined(__need_pthread_rwlockattr_t) \
|
||||
&& !defined(__need_pthread_spinlock_t) \
|
||||
&& !defined(__need_pthread_t) \
|
||||
&& !defined(__need_pthread_types) \
|
||||
&& !defined(__need_size_t) \
|
||||
&& !defined(__need_ssize_t) \
|
||||
&& !defined(__need_suseconds_t) \
|
||||
|
|
|
@ -41,6 +41,7 @@ __BEGIN_DECLS
|
|||
#define LOG_LOCAL7 (16 << 3)
|
||||
|
||||
#define LOG_MASK(pri) (1 << (pri))
|
||||
#define LOG_UPTO(pri) (LOG_MASK((pri) + 1) - 1)
|
||||
|
||||
void closelog(void);
|
||||
void openlog(const char* ident, int logopt, int facility);
|
||||
|
|
|
@ -0,0 +1,90 @@
|
|||
#include <langinfo.h>
|
||||
#include <locale.h>
|
||||
|
||||
#include <BAN/Assert.h>
|
||||
|
||||
static const char* nl_langinfo_impl(nl_item item)
|
||||
{
|
||||
// only codeset is affected by current locales
|
||||
if (item == CODESET)
|
||||
{
|
||||
switch (__getlocale(LC_CTYPE))
|
||||
{
|
||||
case LOCALE_INVALID: ASSERT_NOT_REACHED();
|
||||
case LOCALE_UTF8: return "UTF-8";
|
||||
case LOCALE_POSIX: return "ANSI_X3.4-1968";
|
||||
}
|
||||
ASSERT_NOT_REACHED();
|
||||
}
|
||||
|
||||
// other values from POSIX locale
|
||||
switch (item)
|
||||
{
|
||||
// LC_TIME
|
||||
case D_T_FMT: return "%a %b %e %H:%M:%S %Y";
|
||||
case D_FMT: return "%m/%d/%y";
|
||||
case T_FMT: return "%H:%M:%S";
|
||||
case AM_STR: return "AM";
|
||||
case PM_STR: return "PM";
|
||||
case T_FMT_AMPM: return "%I:%M:%S %p";
|
||||
case ERA: return "";
|
||||
case ERA_D_T_FMT: return "";
|
||||
case ERA_D_FMT: return "";
|
||||
case ERA_T_FMT: return "";
|
||||
case DAY_1: return "Sunday";
|
||||
case DAY_2: return "Monday";
|
||||
case DAY_3: return "Tuesday";
|
||||
case DAY_4: return "Wednesday";
|
||||
case DAY_5: return "Thursday";
|
||||
case DAY_6: return "Friday";
|
||||
case DAY_7: return "Saturday";
|
||||
case ABDAY_1: return "Sun";
|
||||
case ABDAY_2: return "Mon";
|
||||
case ABDAY_3: return "Tue";
|
||||
case ABDAY_4: return "Wed";
|
||||
case ABDAY_5: return "Thu";
|
||||
case ABDAY_6: return "Fri";
|
||||
case ABDAY_7: return "Sat";
|
||||
case MON_1: return "January";
|
||||
case MON_2: return "February";
|
||||
case MON_3: return "March";
|
||||
case MON_4: return "April";
|
||||
case MON_5: return "May";
|
||||
case MON_6: return "June";
|
||||
case MON_7: return "July";
|
||||
case MON_8: return "August";
|
||||
case MON_9: return "September";
|
||||
case MON_10: return "October";
|
||||
case MON_11: return "November";
|
||||
case MON_12: return "December";
|
||||
case ABMON_1: return "Jan";
|
||||
case ABMON_2: return "Feb";
|
||||
case ABMON_3: return "Mar";
|
||||
case ABMON_4: return "Apr";
|
||||
case ABMON_5: return "May";
|
||||
case ABMON_6: return "Jun";
|
||||
case ABMON_7: return "Jul";
|
||||
case ABMON_8: return "Aug";
|
||||
case ABMON_9: return "Sep";
|
||||
case ABMON_10: return "Oct";
|
||||
case ABMON_11: return "Nov";
|
||||
case ABMON_12: return "Dec";
|
||||
// LC_NUMERIC
|
||||
case RADIXCHAR: return ".";
|
||||
case THOUSEP: return "";
|
||||
// LC_MESSAGES
|
||||
case YESEXPR: return "^[yY]";
|
||||
case NOEXPR: return "^[nN]";
|
||||
// LC_MONETARY
|
||||
case CRNCYSTR: return "";
|
||||
}
|
||||
|
||||
return "";
|
||||
}
|
||||
|
||||
char* nl_langinfo(nl_item item)
|
||||
{
|
||||
// NOTE: POSIX says "The application shall not modify the string returned"
|
||||
// so const_cast from string literal *should* be fine
|
||||
return const_cast<char*>(nl_langinfo_impl(item));
|
||||
}
|
|
@ -47,8 +47,8 @@ struct malloc_pool_t
|
|||
|
||||
malloc_node_t* free_list;
|
||||
|
||||
uint8_t* end() { return start + size; }
|
||||
bool contains(malloc_node_t* node) { return start <= (uint8_t*)node && (uint8_t*)node < end(); }
|
||||
uint8_t* end() const { return start + size; }
|
||||
bool contains(malloc_node_t* node) const { return start <= (uint8_t*)node && (uint8_t*)node->next() <= end(); }
|
||||
};
|
||||
|
||||
struct malloc_info_t
|
||||
|
@ -73,7 +73,7 @@ struct malloc_info_t
|
|||
static malloc_info_t s_malloc_info;
|
||||
static auto& s_malloc_pools = s_malloc_info.pools;
|
||||
|
||||
static pthread_spinlock_t s_malloc_lock;
|
||||
static pthread_mutex_t s_malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
static bool allocate_pool(size_t pool_index)
|
||||
{
|
||||
|
@ -117,21 +117,8 @@ static void remove_node_from_pool_free_list(malloc_pool_t& pool, malloc_node_t*
|
|||
}
|
||||
}
|
||||
|
||||
static void* allocate_from_pool(size_t pool_index, size_t size)
|
||||
static void merge_following_free_nodes(malloc_pool_t& pool, malloc_node_t* node)
|
||||
{
|
||||
assert(size % s_malloc_default_align == 0);
|
||||
|
||||
auto& pool = s_malloc_pools[pool_index];
|
||||
assert(pool.start != nullptr);
|
||||
|
||||
if (!pool.free_list)
|
||||
return nullptr;
|
||||
|
||||
for (auto* node = pool.free_list; node; node = node->next_free)
|
||||
{
|
||||
assert(!node->allocated);
|
||||
|
||||
// merge nodes right after current one
|
||||
while (!node->last && !node->next()->allocated)
|
||||
{
|
||||
auto* next = node->next();
|
||||
|
@ -139,16 +126,14 @@ static void* allocate_from_pool(size_t pool_index, size_t size)
|
|||
node->last = next->last;
|
||||
node->size += next->size;
|
||||
}
|
||||
}
|
||||
|
||||
if (node->data_size() < size)
|
||||
continue;
|
||||
static void shrink_node_if_needed(malloc_pool_t& pool, malloc_node_t* node, size_t size)
|
||||
{
|
||||
assert(size <= node->data_size());
|
||||
if (node->data_size() - size < sizeof(malloc_node_t) + s_malloc_shrink_threshold)
|
||||
return;
|
||||
|
||||
node->allocated = true;
|
||||
remove_node_from_pool_free_list(pool, node);
|
||||
|
||||
// shrink node if needed
|
||||
if (node->data_size() - size >= sizeof(malloc_node_t) + s_malloc_shrink_threshold)
|
||||
{
|
||||
uint8_t* node_end = (uint8_t*)node->next();
|
||||
|
||||
node->size = sizeof(malloc_node_t) + size;
|
||||
|
@ -166,8 +151,30 @@ static void* allocate_from_pool(size_t pool_index, size_t size)
|
|||
next->next_free = pool.free_list;
|
||||
next->prev_free = nullptr;
|
||||
pool.free_list = next;
|
||||
}
|
||||
}
|
||||
|
||||
static void* allocate_from_pool(size_t pool_index, size_t size)
|
||||
{
|
||||
assert(size % s_malloc_default_align == 0);
|
||||
|
||||
auto& pool = s_malloc_pools[pool_index];
|
||||
assert(pool.start != nullptr);
|
||||
|
||||
if (!pool.free_list)
|
||||
return nullptr;
|
||||
|
||||
for (auto* node = pool.free_list; node; node = node->next_free)
|
||||
{
|
||||
assert(!node->allocated);
|
||||
|
||||
merge_following_free_nodes(pool, node);
|
||||
if (node->data_size() < size)
|
||||
continue;
|
||||
|
||||
node->allocated = true;
|
||||
remove_node_from_pool_free_list(pool, node);
|
||||
|
||||
shrink_node_if_needed(pool, node, size);
|
||||
return node->data;
|
||||
}
|
||||
|
||||
|
@ -199,17 +206,18 @@ void* malloc(size_t size)
|
|||
size_t first_usable_pool = 0;
|
||||
while (s_malloc_pools[first_usable_pool].size - sizeof(malloc_node_t) < size)
|
||||
first_usable_pool++;
|
||||
// first_usable_pool = ceil(log(size/s_malloc_smallest_pool, s_malloc_pool_size_mult))
|
||||
|
||||
pthread_mutex_lock(&s_malloc_mutex);
|
||||
|
||||
// try to find any already existing pools that we can allocate in
|
||||
for (size_t i = first_usable_pool; i < s_malloc_pool_count; i++)
|
||||
{
|
||||
if (s_malloc_pools[i].start == nullptr)
|
||||
continue;
|
||||
pthread_spin_lock(&s_malloc_lock);
|
||||
void* ret = allocate_from_pool(i, size);
|
||||
pthread_spin_unlock(&s_malloc_lock);
|
||||
if (ret != nullptr)
|
||||
if (ret == nullptr)
|
||||
continue;
|
||||
pthread_mutex_unlock(&s_malloc_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -218,18 +226,17 @@ void* malloc(size_t size)
|
|||
{
|
||||
if (s_malloc_pools[i].start != nullptr)
|
||||
continue;
|
||||
|
||||
pthread_spin_lock(&s_malloc_lock);
|
||||
void* ret = nullptr;
|
||||
if (allocate_pool(i))
|
||||
ret = allocate_from_pool(i, size);
|
||||
pthread_spin_unlock(&s_malloc_lock);
|
||||
|
||||
void* ret = allocate_pool(i)
|
||||
? allocate_from_pool(i, size)
|
||||
: nullptr;
|
||||
if (ret == nullptr)
|
||||
break;
|
||||
pthread_mutex_unlock(&s_malloc_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&s_malloc_mutex);
|
||||
|
||||
errno = ENOMEM;
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -245,21 +252,35 @@ void* realloc(void* ptr, size_t size)
|
|||
if (size_t ret = size % s_malloc_default_align)
|
||||
size += s_malloc_default_align - ret;
|
||||
|
||||
pthread_mutex_lock(&s_malloc_mutex);
|
||||
|
||||
auto* node = node_from_data_pointer(ptr);
|
||||
size_t oldsize = node->data_size();
|
||||
auto& pool = pool_from_node(node);
|
||||
|
||||
if (oldsize == size)
|
||||
assert(node->allocated);
|
||||
|
||||
const size_t oldsize = node->data_size();
|
||||
|
||||
// try to grow the node if needed
|
||||
if (size > oldsize)
|
||||
merge_following_free_nodes(pool, node);
|
||||
|
||||
const bool needs_allocation = node->data_size() < size;
|
||||
|
||||
shrink_node_if_needed(pool, node, needs_allocation ? oldsize : size);
|
||||
|
||||
pthread_mutex_unlock(&s_malloc_mutex);
|
||||
|
||||
if (!needs_allocation)
|
||||
return ptr;
|
||||
|
||||
// TODO: try to shrink or expand allocation
|
||||
|
||||
// allocate new pointer
|
||||
void* new_ptr = malloc(size);
|
||||
if (new_ptr == nullptr)
|
||||
return nullptr;
|
||||
|
||||
// move data to the new pointer
|
||||
size_t bytes_to_copy = oldsize < size ? oldsize : size;
|
||||
const size_t bytes_to_copy = (oldsize < size) ? oldsize : size;
|
||||
memcpy(new_ptr, ptr, bytes_to_copy);
|
||||
free(ptr);
|
||||
|
||||
|
@ -273,22 +294,15 @@ void free(void* ptr)
|
|||
if (ptr == nullptr)
|
||||
return;
|
||||
|
||||
pthread_spin_lock(&s_malloc_lock);
|
||||
pthread_mutex_lock(&s_malloc_mutex);
|
||||
|
||||
auto* node = node_from_data_pointer(ptr);
|
||||
|
||||
node->allocated = false;
|
||||
|
||||
auto& pool = pool_from_node(node);
|
||||
|
||||
// merge nodes right after freed one
|
||||
while (!node->last && !node->next()->allocated)
|
||||
{
|
||||
auto* next = node->next();
|
||||
remove_node_from_pool_free_list(pool, next);
|
||||
node->last = next->last;
|
||||
node->size += next->size;
|
||||
}
|
||||
assert(node->allocated);
|
||||
node->allocated = false;
|
||||
|
||||
merge_following_free_nodes(pool, node);
|
||||
|
||||
// add node to free list
|
||||
if (pool.free_list)
|
||||
|
@ -297,22 +311,24 @@ void free(void* ptr)
|
|||
node->next_free = pool.free_list;
|
||||
pool.free_list = node;
|
||||
|
||||
pthread_spin_unlock(&s_malloc_lock);
|
||||
pthread_mutex_unlock(&s_malloc_mutex);
|
||||
}
|
||||
|
||||
void* calloc(size_t nmemb, size_t size)
|
||||
{
|
||||
dprintln_if(DEBUG_MALLOC, "calloc({}, {})", nmemb, size);
|
||||
|
||||
size_t total = nmemb * size;
|
||||
const size_t total = nmemb * size;
|
||||
if (size != 0 && total / size != nmemb)
|
||||
{
|
||||
errno = ENOMEM;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void* ptr = malloc(total);
|
||||
if (ptr == nullptr)
|
||||
return nullptr;
|
||||
|
||||
memset(ptr, 0, total);
|
||||
return ptr;
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
#include <BAN/Assert.h>
|
||||
#include <BAN/Atomic.h>
|
||||
#include <BAN/Debug.h>
|
||||
#include <BAN/PlacementNew.h>
|
||||
|
||||
#include <kernel/Arch.h>
|
||||
|
@ -19,6 +20,8 @@ struct pthread_trampoline_info_t
|
|||
void* arg;
|
||||
};
|
||||
|
||||
static constexpr unsigned rwlock_writer_locked = -1;
|
||||
|
||||
// stack is 16 byte aligned on entry, this `call` is used to align it
|
||||
extern "C" void _pthread_trampoline(void*);
|
||||
asm(
|
||||
|
@ -108,12 +111,104 @@ void pthread_cleanup_push(void (*routine)(void*), void* arg)
|
|||
}
|
||||
#endif
|
||||
|
||||
#if not __disable_thread_local_storage
|
||||
static thread_local struct {
|
||||
void* value;
|
||||
void (*destructor)(void*);
|
||||
} s_pthread_keys[PTHREAD_KEYS_MAX] {};
|
||||
static thread_local uint8_t s_pthread_keys_allocated[(PTHREAD_KEYS_MAX + 7) / 8];
|
||||
|
||||
static inline bool is_pthread_key_allocated(pthread_key_t key)
|
||||
{
|
||||
if (key >= PTHREAD_KEYS_MAX)
|
||||
return false;
|
||||
return s_pthread_keys_allocated[key / 8] & (1 << (key % 8));
|
||||
}
|
||||
|
||||
int pthread_key_create(pthread_key_t* key, void (*destructor)(void*))
|
||||
{
|
||||
for (pthread_key_t i = 0; i < PTHREAD_KEYS_MAX; i++)
|
||||
{
|
||||
if (is_pthread_key_allocated(i))
|
||||
continue;
|
||||
s_pthread_keys[i].value = nullptr;
|
||||
s_pthread_keys[i].destructor = destructor;
|
||||
s_pthread_keys_allocated[i / 8] |= 1 << (i % 8);
|
||||
*key = i;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return EAGAIN;
|
||||
}
|
||||
|
||||
int pthread_key_delete(pthread_key_t key)
|
||||
{
|
||||
if (!is_pthread_key_allocated(key))
|
||||
return EINVAL;
|
||||
s_pthread_keys[key].value = nullptr;
|
||||
s_pthread_keys[key].destructor = nullptr;
|
||||
s_pthread_keys_allocated[key / 8] &= ~(1 << (key % 8));
|
||||
return 0;
|
||||
}
|
||||
|
||||
void* pthread_getspecific(pthread_key_t key)
|
||||
{
|
||||
if (!is_pthread_key_allocated(key))
|
||||
return nullptr;
|
||||
return s_pthread_keys[key].value;
|
||||
}
|
||||
|
||||
int pthread_setspecific(pthread_key_t key, const void* value)
|
||||
{
|
||||
if (!is_pthread_key_allocated(key))
|
||||
return EINVAL;
|
||||
s_pthread_keys[key].value = const_cast<void*>(value);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
int pthread_attr_destroy(pthread_attr_t* attr)
|
||||
{
|
||||
(void)attr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_attr_init(pthread_attr_t* attr)
|
||||
{
|
||||
*attr = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_attr_setstacksize(pthread_attr_t* attr, size_t stacksize)
|
||||
{
|
||||
(void)attr;
|
||||
(void)stacksize;
|
||||
dwarnln("TODO: ignoring pthread_attr_setstacksize");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_attr_getdetachstate(const pthread_attr_t* attr, int* detachstate)
|
||||
{
|
||||
(void)attr;
|
||||
*detachstate = PTHREAD_CREATE_JOINABLE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_attr_setdetachstate(pthread_attr_t* attr, int detachstate)
|
||||
{
|
||||
(void)attr;
|
||||
switch (detachstate)
|
||||
{
|
||||
case PTHREAD_CREATE_DETACHED:
|
||||
dwarnln("TODO: pthread_attr_setdetachstate");
|
||||
return ENOTSUP;
|
||||
case PTHREAD_CREATE_JOINABLE:
|
||||
return 0;
|
||||
default:
|
||||
return EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
int pthread_create(pthread_t* __restrict thread_id, const pthread_attr_t* __restrict attr, void* (*start_routine)(void*), void* __restrict arg)
|
||||
{
|
||||
auto* info = static_cast<pthread_trampoline_info_t*>(malloc(sizeof(pthread_trampoline_info_t)));
|
||||
|
@ -184,17 +279,46 @@ pthread_create_error:
|
|||
return return_code;
|
||||
}
|
||||
|
||||
int pthread_detach(pthread_t thread)
|
||||
{
|
||||
(void)thread;
|
||||
dwarnln("TODO: pthread_detach");
|
||||
return ENOTSUP;
|
||||
}
|
||||
|
||||
void pthread_exit(void* value_ptr)
|
||||
{
|
||||
#if not __disable_thread_local_storage
|
||||
while (s_cleanup_stack)
|
||||
pthread_cleanup_pop(1);
|
||||
for (size_t iteration = 0; iteration < PTHREAD_DESTRUCTOR_ITERATIONS; iteration++)
|
||||
{
|
||||
bool called = false;
|
||||
for (pthread_key_t i = 0; i < PTHREAD_KEYS_MAX; i++)
|
||||
{
|
||||
if (!is_pthread_key_allocated(i))
|
||||
continue;
|
||||
if (!s_pthread_keys[i].value || !s_pthread_keys[i].destructor)
|
||||
continue;
|
||||
void* old_value = s_pthread_keys[i].value;
|
||||
s_pthread_keys[i].value = nullptr;
|
||||
s_pthread_keys[i].destructor(old_value);
|
||||
called = true;
|
||||
}
|
||||
if (!called)
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
free_uthread(get_uthread());
|
||||
syscall(SYS_PTHREAD_EXIT, value_ptr);
|
||||
ASSERT_NOT_REACHED();
|
||||
}
|
||||
|
||||
int pthread_equal(pthread_t t1, pthread_t t2)
|
||||
{
|
||||
return t1 == t2;
|
||||
}
|
||||
|
||||
int pthread_join(pthread_t thread, void** value_ptr)
|
||||
{
|
||||
return syscall(SYS_PTHREAD_JOIN, thread, value_ptr);
|
||||
|
@ -212,65 +336,587 @@ pthread_t pthread_self(void)
|
|||
#endif
|
||||
}
|
||||
|
||||
static inline BAN::Atomic<pthread_t>& pthread_spin_get_atomic(pthread_spinlock_t* lock)
|
||||
int pthread_once(pthread_once_t* once_control, void (*init_routine)(void))
|
||||
{
|
||||
static_assert(sizeof(pthread_spinlock_t) <= sizeof(BAN::Atomic<pthread_t>));
|
||||
static_assert(alignof(pthread_spinlock_t) <= alignof(BAN::Atomic<pthread_t>));
|
||||
return *reinterpret_cast<BAN::Atomic<pthread_t>*>(lock);
|
||||
static_assert(PTHREAD_ONCE_INIT == 0);
|
||||
|
||||
pthread_once_t expected = 0;
|
||||
if (BAN::atomic_compare_exchange(*once_control, expected, 1))
|
||||
{
|
||||
init_routine();
|
||||
BAN::atomic_store(*once_control, 2);
|
||||
}
|
||||
|
||||
while (BAN::atomic_load(*once_control) != 2)
|
||||
sched_yield();
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_spin_destroy(pthread_spinlock_t* lock)
|
||||
{
|
||||
pthread_spin_get_atomic(lock).~Atomic<pthread_t>();
|
||||
(void)lock;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_spin_init(pthread_spinlock_t* lock, int pshared)
|
||||
{
|
||||
(void)pshared;
|
||||
new (lock) BAN::Atomic<pthread_t>();
|
||||
pthread_spin_get_atomic(lock) = false;
|
||||
*lock = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_spin_lock(pthread_spinlock_t* lock)
|
||||
{
|
||||
auto& atomic = pthread_spin_get_atomic(lock);
|
||||
|
||||
const pthread_t tid = pthread_self();
|
||||
ASSERT(atomic.load(BAN::MemoryOrder::memory_order_relaxed) != tid);
|
||||
const auto tid = pthread_self();
|
||||
ASSERT(BAN::atomic_load(*lock, BAN::MemoryOrder::memory_order_relaxed) != tid);
|
||||
|
||||
pthread_t expected = 0;
|
||||
while (!atomic.compare_exchange(expected, tid, BAN::MemoryOrder::memory_order_acquire))
|
||||
{
|
||||
sched_yield();
|
||||
while (!BAN::atomic_compare_exchange(*lock, expected, tid, BAN::MemoryOrder::memory_order_acquire))
|
||||
expected = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_spin_trylock(pthread_spinlock_t* lock)
|
||||
{
|
||||
auto& atomic = pthread_spin_get_atomic(lock);
|
||||
|
||||
const pthread_t tid = pthread_self();
|
||||
ASSERT(atomic.load(BAN::MemoryOrder::memory_order_relaxed) != tid);
|
||||
const auto tid = pthread_self();
|
||||
ASSERT(BAN::atomic_load(*lock, BAN::MemoryOrder::memory_order_relaxed) != tid);
|
||||
|
||||
pthread_t expected = 0;
|
||||
if (atomic.compare_exchange(expected, tid, BAN::MemoryOrder::memory_order_acquire))
|
||||
return 0;
|
||||
if (!BAN::atomic_compare_exchange(*lock, expected, tid, BAN::MemoryOrder::memory_order_acquire))
|
||||
return EBUSY;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_spin_unlock(pthread_spinlock_t* lock)
|
||||
{
|
||||
auto& atomic = pthread_spin_get_atomic(lock);
|
||||
ASSERT(atomic.load(BAN::MemoryOrder::memory_order_relaxed) == pthread_self());
|
||||
atomic.store(0, BAN::MemoryOrder::memory_order_release);
|
||||
ASSERT(BAN::atomic_load(*lock, BAN::MemoryOrder::memory_order_relaxed) == pthread_self());
|
||||
BAN::atomic_store(*lock, 0, BAN::MemoryOrder::memory_order_release);
|
||||
return 0;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static int _pthread_timedlock(T* __restrict lock, const struct timespec* __restrict abstime, int (*trylock)(T*))
|
||||
{
|
||||
if (trylock(lock) == 0)
|
||||
return 0;
|
||||
|
||||
constexpr auto has_timed_out =
|
||||
[](const struct timespec* abstime) -> bool
|
||||
{
|
||||
struct timespec curtime;
|
||||
clock_gettime(CLOCK_REALTIME, &curtime);
|
||||
if (curtime.tv_sec < abstime->tv_sec)
|
||||
return false;
|
||||
if (curtime.tv_sec > abstime->tv_sec)
|
||||
return true;
|
||||
return curtime.tv_nsec >= abstime->tv_nsec;
|
||||
};
|
||||
|
||||
while (!has_timed_out(abstime))
|
||||
{
|
||||
if (trylock(lock) == 0)
|
||||
return 0;
|
||||
sched_yield();
|
||||
}
|
||||
|
||||
return ETIMEDOUT;
|
||||
}
|
||||
|
||||
int pthread_mutexattr_destroy(pthread_mutexattr_t* attr)
|
||||
{
|
||||
(void)attr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_mutexattr_init(pthread_mutexattr_t* attr)
|
||||
{
|
||||
*attr = {
|
||||
.type = PTHREAD_MUTEX_DEFAULT,
|
||||
.shared = false,
|
||||
};
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_mutexattr_getpshared(const pthread_mutexattr_t* __restrict attr, int* __restrict pshared)
|
||||
{
|
||||
*pshared = attr->shared ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_mutexattr_setpshared(pthread_mutexattr_t* attr, int pshared)
|
||||
{
|
||||
switch (pshared)
|
||||
{
|
||||
case PTHREAD_PROCESS_PRIVATE:
|
||||
attr->shared = false;
|
||||
return 0;
|
||||
case PTHREAD_PROCESS_SHARED:
|
||||
attr->shared = true;
|
||||
return 0;
|
||||
}
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
int pthread_mutexattr_gettype(const pthread_mutexattr_t* __restrict attr, int* __restrict type)
|
||||
{
|
||||
*type = attr->type;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_mutexattr_settype(pthread_mutexattr_t* attr, int type)
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
case PTHREAD_MUTEX_DEFAULT:
|
||||
case PTHREAD_MUTEX_ERRORCHECK:
|
||||
case PTHREAD_MUTEX_NORMAL:
|
||||
case PTHREAD_MUTEX_RECURSIVE:
|
||||
attr->type = type;
|
||||
return 0;
|
||||
}
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
int pthread_mutex_destroy(pthread_mutex_t* mutex)
|
||||
{
|
||||
(void)mutex;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_mutex_init(pthread_mutex_t* __restrict mutex, const pthread_mutexattr_t* __restrict attr)
|
||||
{
|
||||
const pthread_mutexattr_t default_attr = {
|
||||
.type = PTHREAD_MUTEX_DEFAULT,
|
||||
.shared = false,
|
||||
};
|
||||
if (attr == nullptr)
|
||||
attr = &default_attr;
|
||||
*mutex = {
|
||||
.attr = *attr,
|
||||
.locker = 0,
|
||||
.lock_depth = 0,
|
||||
};
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_mutex_lock(pthread_mutex_t* mutex)
|
||||
{
|
||||
// NOTE: current yielding implementation supports shared
|
||||
|
||||
const auto tid = pthread_self();
|
||||
|
||||
switch (mutex->attr.type)
|
||||
{
|
||||
case PTHREAD_MUTEX_RECURSIVE:
|
||||
if (mutex->locker != tid)
|
||||
break;
|
||||
mutex->lock_depth++;
|
||||
return 0;
|
||||
case PTHREAD_MUTEX_ERRORCHECK:
|
||||
if (mutex->locker != tid)
|
||||
break;
|
||||
return EDEADLK;
|
||||
}
|
||||
|
||||
pthread_t expected = 0;
|
||||
while (!BAN::atomic_compare_exchange(mutex->locker, expected, tid, BAN::MemoryOrder::memory_order_acquire))
|
||||
{
|
||||
sched_yield();
|
||||
expected = 0;
|
||||
}
|
||||
|
||||
mutex->lock_depth = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_mutex_trylock(pthread_mutex_t* mutex)
|
||||
{
|
||||
// NOTE: current yielding implementation supports shared
|
||||
|
||||
const auto tid = pthread_self();
|
||||
|
||||
switch (mutex->attr.type)
|
||||
{
|
||||
case PTHREAD_MUTEX_RECURSIVE:
|
||||
if (mutex->locker != tid)
|
||||
break;
|
||||
mutex->lock_depth++;
|
||||
return 0;
|
||||
case PTHREAD_MUTEX_ERRORCHECK:
|
||||
if (mutex->locker != tid)
|
||||
break;
|
||||
return EDEADLK;
|
||||
}
|
||||
|
||||
pthread_t expected = 0;
|
||||
if (!BAN::atomic_compare_exchange(mutex->locker, expected, tid, BAN::MemoryOrder::memory_order_acquire))
|
||||
return EBUSY;
|
||||
|
||||
mutex->lock_depth = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_mutex_timedlock(pthread_mutex_t* __restrict mutex, const struct timespec* __restrict abstime)
|
||||
{
|
||||
return _pthread_timedlock(mutex, abstime, &pthread_mutex_trylock);
|
||||
}
|
||||
|
||||
int pthread_mutex_unlock(pthread_mutex_t* mutex)
|
||||
{
|
||||
// NOTE: current yielding implementation supports shared
|
||||
|
||||
ASSERT(mutex->locker == pthread_self());
|
||||
|
||||
mutex->lock_depth--;
|
||||
if (mutex->lock_depth == 0)
|
||||
BAN::atomic_store(mutex->locker, 0, BAN::MemoryOrder::memory_order_release);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_rwlockattr_destroy(pthread_rwlockattr_t* attr)
|
||||
{
|
||||
(void)attr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_rwlockattr_init(pthread_rwlockattr_t* attr)
|
||||
{
|
||||
*attr = {
|
||||
.shared = false,
|
||||
};
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_rwlockattr_getpshared(const pthread_rwlockattr_t* __restrict attr, int* __restrict pshared)
|
||||
{
|
||||
*pshared = attr->shared ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_rwlockattr_setpshared(pthread_rwlockattr_t* attr, int pshared)
|
||||
{
|
||||
switch (pshared)
|
||||
{
|
||||
case PTHREAD_PROCESS_PRIVATE:
|
||||
attr->shared = false;
|
||||
return 0;
|
||||
case PTHREAD_PROCESS_SHARED:
|
||||
attr->shared = true;
|
||||
return 0;
|
||||
}
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
int pthread_rwlock_destroy(pthread_rwlock_t* rwlock)
|
||||
{
|
||||
(void)rwlock;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_rwlock_init(pthread_rwlock_t* __restrict rwlock, const pthread_rwlockattr_t* __restrict attr)
|
||||
{
|
||||
const pthread_rwlockattr_t default_attr = {
|
||||
.shared = false,
|
||||
};
|
||||
if (attr == nullptr)
|
||||
attr = &default_attr;
|
||||
*rwlock = {
|
||||
.attr = *attr,
|
||||
.lockers = 0,
|
||||
.writers = 0,
|
||||
};
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_rwlock_rdlock(pthread_rwlock_t* rwlock)
|
||||
{
|
||||
unsigned expected = BAN::atomic_load(rwlock->lockers);
|
||||
for (;;)
|
||||
{
|
||||
if (expected == rwlock_writer_locked || BAN::atomic_load(rwlock->writers))
|
||||
sched_yield();
|
||||
else if (BAN::atomic_compare_exchange(rwlock->lockers, expected, expected + 1))
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_rwlock_tryrdlock(pthread_rwlock_t* rwlock)
|
||||
{
|
||||
unsigned expected = BAN::atomic_load(rwlock->lockers);
|
||||
while (expected != rwlock_writer_locked && BAN::atomic_load(rwlock->writers) == 0)
|
||||
if (BAN::atomic_compare_exchange(rwlock->lockers, expected, expected + 1))
|
||||
return 0;
|
||||
return EBUSY;
|
||||
}
|
||||
|
||||
int pthread_rwlock_timedrdlock(pthread_rwlock_t* __restrict rwlock, const struct timespec* __restrict abstime)
|
||||
{
|
||||
return _pthread_timedlock(rwlock, abstime, &pthread_rwlock_tryrdlock);
|
||||
}
|
||||
|
||||
int pthread_rwlock_wrlock(pthread_rwlock_t* rwlock)
|
||||
{
|
||||
BAN::atomic_add_fetch(rwlock->writers, 1);
|
||||
unsigned expected = 0;
|
||||
while (!BAN::atomic_compare_exchange(rwlock->lockers, expected, rwlock_writer_locked))
|
||||
{
|
||||
sched_yield();
|
||||
expected = 0;
|
||||
}
|
||||
BAN::atomic_sub_fetch(rwlock->writers, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_rwlock_trywrlock(pthread_rwlock_t* rwlock)
|
||||
{
|
||||
unsigned expected = 0;
|
||||
if (!BAN::atomic_compare_exchange(rwlock->lockers, expected, rwlock_writer_locked))
|
||||
return EBUSY;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_rwlock_timedwrlock(pthread_rwlock_t* __restrict rwlock, const struct timespec* __restrict abstime)
|
||||
{
|
||||
return _pthread_timedlock(rwlock, abstime, &pthread_rwlock_trywrlock);
|
||||
}
|
||||
|
||||
int pthread_rwlock_unlock(pthread_rwlock_t* rwlock)
|
||||
{
|
||||
if (BAN::atomic_load(rwlock->lockers) == rwlock_writer_locked)
|
||||
BAN::atomic_store(rwlock->lockers, 0);
|
||||
else
|
||||
BAN::atomic_sub_fetch(rwlock->lockers, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_condattr_destroy(pthread_condattr_t* attr)
|
||||
{
|
||||
(void)attr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_condattr_init(pthread_condattr_t* attr)
|
||||
{
|
||||
*attr = {
|
||||
.clock = CLOCK_REALTIME,
|
||||
.shared = false,
|
||||
};
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_condattr_getclock(const pthread_condattr_t* __restrict attr, clockid_t* __restrict clock_id)
|
||||
{
|
||||
*clock_id = attr->clock;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_condattr_setclock(pthread_condattr_t* attr, clockid_t clock_id)
|
||||
{
|
||||
switch (clock_id)
|
||||
{
|
||||
case CLOCK_MONOTONIC:
|
||||
case CLOCK_REALTIME:
|
||||
break;
|
||||
default:
|
||||
return EINVAL;
|
||||
}
|
||||
attr->clock = clock_id;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_condattr_getpshared(const pthread_condattr_t* __restrict attr, int* __restrict pshared)
|
||||
{
|
||||
*pshared = attr->shared ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_condattr_setpshared(pthread_barrierattr_t* attr, int pshared)
|
||||
{
|
||||
switch (pshared)
|
||||
{
|
||||
case PTHREAD_PROCESS_PRIVATE:
|
||||
attr->shared = false;
|
||||
return 0;
|
||||
case PTHREAD_PROCESS_SHARED:
|
||||
attr->shared = true;
|
||||
return 0;
|
||||
}
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
int pthread_cond_destroy(pthread_cond_t* cond)
|
||||
{
|
||||
(void)cond;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_cond_init(pthread_cond_t* __restrict cond, const pthread_condattr_t* __restrict attr)
|
||||
{
|
||||
const pthread_condattr_t default_attr = {
|
||||
.clock = CLOCK_MONOTONIC,
|
||||
.shared = false,
|
||||
};
|
||||
if (attr == nullptr)
|
||||
attr = &default_attr;
|
||||
*cond = {
|
||||
.attr = *attr,
|
||||
.lock = PTHREAD_SPIN_INITIALIZER,
|
||||
.block_list = nullptr,
|
||||
};
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_cond_broadcast(pthread_cond_t* cond)
|
||||
{
|
||||
pthread_spin_lock(&cond->lock);
|
||||
for (auto* block = cond->block_list; block; block = block->next)
|
||||
BAN::atomic_store(block->signaled, 1);
|
||||
pthread_spin_unlock(&cond->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_cond_signal(pthread_cond_t* cond)
|
||||
{
|
||||
pthread_spin_lock(&cond->lock);
|
||||
if (cond->block_list)
|
||||
BAN::atomic_store(cond->block_list->signaled, 1);
|
||||
pthread_spin_unlock(&cond->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_cond_wait(pthread_cond_t* __restrict cond, pthread_mutex_t* __restrict mutex)
|
||||
{
|
||||
return pthread_cond_timedwait(cond, mutex, nullptr);
|
||||
}
|
||||
|
||||
int pthread_cond_timedwait(pthread_cond_t* __restrict cond, pthread_mutex_t* __restrict mutex, const struct timespec* __restrict abstime)
|
||||
{
|
||||
constexpr auto has_timed_out =
|
||||
[](const struct timespec* abstime, clockid_t clock_id) -> bool
|
||||
{
|
||||
if (abstime == nullptr)
|
||||
return false;
|
||||
struct timespec curtime;
|
||||
clock_gettime(clock_id, &curtime);
|
||||
if (curtime.tv_sec < abstime->tv_sec)
|
||||
return false;
|
||||
if (curtime.tv_sec > abstime->tv_sec)
|
||||
return true;
|
||||
return curtime.tv_nsec >= abstime->tv_nsec;
|
||||
};
|
||||
|
||||
pthread_spin_lock(&cond->lock);
|
||||
_pthread_cond_block block = {
|
||||
.next = cond->block_list,
|
||||
.signaled = 0,
|
||||
};
|
||||
cond->block_list = █
|
||||
pthread_spin_unlock(&cond->lock);
|
||||
|
||||
pthread_mutex_unlock(mutex);
|
||||
|
||||
while (BAN::atomic_load(block.signaled) == 0)
|
||||
{
|
||||
if (has_timed_out(abstime, cond->attr.clock))
|
||||
return ETIMEDOUT;
|
||||
sched_yield();
|
||||
}
|
||||
|
||||
pthread_spin_lock(&cond->lock);
|
||||
if (&block == cond->block_list)
|
||||
cond->block_list = block.next;
|
||||
else
|
||||
{
|
||||
_pthread_cond_block* prev = cond->block_list;
|
||||
while (prev->next != &block)
|
||||
prev = prev->next;
|
||||
prev->next = block.next;
|
||||
}
|
||||
pthread_spin_unlock(&cond->lock);
|
||||
|
||||
pthread_mutex_lock(mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_barrierattr_destroy(pthread_barrierattr_t* attr)
|
||||
{
|
||||
(void)attr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_barrierattr_init(pthread_barrierattr_t* attr)
|
||||
{
|
||||
*attr = {
|
||||
.shared = false,
|
||||
};
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_barrierattr_getpshared(const pthread_barrierattr_t* __restrict attr, int* __restrict pshared)
|
||||
{
|
||||
*pshared = attr->shared ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_barrierattr_setpshared(pthread_barrierattr_t* attr, int pshared)
|
||||
{
|
||||
switch (pshared)
|
||||
{
|
||||
case PTHREAD_PROCESS_PRIVATE:
|
||||
attr->shared = false;
|
||||
return 0;
|
||||
case PTHREAD_PROCESS_SHARED:
|
||||
attr->shared = true;
|
||||
return 0;
|
||||
}
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
int pthread_barrier_destroy(pthread_barrier_t* barrier)
|
||||
{
|
||||
(void)barrier;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_barrier_init(pthread_barrier_t* __restrict barrier, const pthread_barrierattr_t* __restrict attr, unsigned count)
|
||||
{
|
||||
if (count == 0)
|
||||
return EINVAL;
|
||||
const pthread_barrierattr_t default_attr = {
|
||||
.shared = false,
|
||||
};
|
||||
if (attr == nullptr)
|
||||
attr = &default_attr;
|
||||
*barrier = {
|
||||
.attr = *attr,
|
||||
.target = count,
|
||||
.waiting = 0,
|
||||
};
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_barrier_wait(pthread_barrier_t* barrier)
|
||||
{
|
||||
const unsigned index = BAN::atomic_add_fetch(barrier->waiting, 1);
|
||||
|
||||
// FIXME: this case should be handled, but should be relatively uncommon
|
||||
// so i'll just roll with the easy implementation
|
||||
ASSERT(index <= barrier->target);
|
||||
|
||||
if (index == barrier->target)
|
||||
{
|
||||
BAN::atomic_store(barrier->waiting, 0);
|
||||
return PTHREAD_BARRIER_SERIAL_THREAD;
|
||||
}
|
||||
|
||||
while (BAN::atomic_load(barrier->waiting))
|
||||
sched_yield();
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if not __disable_thread_local_storage
|
||||
struct tls_index
|
||||
{
|
||||
unsigned long int ti_module;
|
||||
|
@ -288,3 +934,4 @@ extern "C" void* __attribute__((__regparm__(1))) ___tls_get_addr(tls_index* ti)
|
|||
return reinterpret_cast<void*>(get_uthread()->dtv[ti->ti_module] + ti->ti_offset);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -382,25 +382,22 @@ int fputs(const char* str, FILE* file)
|
|||
size_t fread(void* buffer, size_t size, size_t nitems, FILE* file)
|
||||
{
|
||||
ScopeLock _(file);
|
||||
if (file->eof || nitems * size == 0)
|
||||
if (file->eof || size == 0 || nitems == 0)
|
||||
return 0;
|
||||
|
||||
size_t target = size * nitems;
|
||||
size_t nread = 0;
|
||||
|
||||
if (target == 0)
|
||||
return 0;
|
||||
|
||||
unsigned char* ubuffer = static_cast<unsigned char*>(buffer);
|
||||
while (nread < target)
|
||||
auto* ubuffer = static_cast<unsigned char*>(buffer);
|
||||
for (size_t item = 0; item < nitems; item++)
|
||||
{
|
||||
for (size_t byte = 0; byte < size; byte++)
|
||||
{
|
||||
int ch = getc_unlocked(file);
|
||||
if (ch == EOF)
|
||||
break;
|
||||
ubuffer[nread++] = ch;
|
||||
return item;
|
||||
*ubuffer++ = ch;
|
||||
}
|
||||
}
|
||||
|
||||
return nread / size;
|
||||
return nitems;
|
||||
}
|
||||
|
||||
FILE* freopen(const char* pathname, const char* mode_str, FILE* file)
|
||||
|
@ -507,10 +504,14 @@ void funlockfile(FILE* fp)
|
|||
size_t fwrite(const void* buffer, size_t size, size_t nitems, FILE* file)
|
||||
{
|
||||
ScopeLock _(file);
|
||||
unsigned char* ubuffer = (unsigned char*)buffer;
|
||||
for (size_t byte = 0; byte < nitems * size; byte++)
|
||||
if (putc_unlocked(ubuffer[byte], file) == EOF)
|
||||
return byte / size;
|
||||
if (size == 0 || nitems == 0)
|
||||
return 0;
|
||||
|
||||
const auto* ubuffer = static_cast<const unsigned char*>(buffer);
|
||||
for (size_t item = 0; item < nitems; item++)
|
||||
for (size_t byte = 0; byte < size; byte++)
|
||||
if (putc_unlocked(*ubuffer++, file) == EOF)
|
||||
return item;
|
||||
return nitems;
|
||||
}
|
||||
|
||||
|
|
|
@ -28,6 +28,14 @@ int msync(void* addr, size_t len, int flags)
|
|||
return syscall(SYS_MSYNC, addr, len, flags);
|
||||
}
|
||||
|
||||
int posix_madvise(void* addr, size_t len, int advice)
|
||||
{
|
||||
(void)addr;
|
||||
(void)len;
|
||||
(void)advice;
|
||||
fprintf(stddbg, "TODO: posix_madvise");
|
||||
return 0;
|
||||
}
|
||||
|
||||
#include <BAN/Assert.h>
|
||||
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
|
||||
#include <ctype.h>
|
||||
#include <errno.h>
|
||||
#include <langinfo.h>
|
||||
#include <string.h>
|
||||
#include <sys/syscall.h>
|
||||
#include <time.h>
|
||||
|
@ -201,20 +202,6 @@ size_t strftime(char* __restrict s, size_t maxsize, const char* __restrict forma
|
|||
char modifier = '\0';
|
||||
};
|
||||
|
||||
static constexpr const char* abbr_wday[] {
|
||||
"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"
|
||||
};
|
||||
static constexpr const char* full_wday[] {
|
||||
"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"
|
||||
};
|
||||
|
||||
static constexpr const char* abbr_mon[] {
|
||||
"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"
|
||||
};
|
||||
static constexpr const char* full_mon[] {
|
||||
"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"
|
||||
};
|
||||
|
||||
const auto append_string =
|
||||
[&s, &len, &maxsize](const char* string) -> bool
|
||||
{
|
||||
|
@ -226,12 +213,23 @@ size_t strftime(char* __restrict s, size_t maxsize, const char* __restrict forma
|
|||
return true;
|
||||
};
|
||||
|
||||
const auto append_string_from_list =
|
||||
[&append_string]<size_t LIST_SIZE>(int index, const char* const (&list)[LIST_SIZE]) -> bool
|
||||
const auto append_month =
|
||||
[&append_string](int index, bool abbreviated)
|
||||
{
|
||||
const nl_item base = abbreviated ? ABMON_1 : MON_1;
|
||||
const char* string = "INVALID";
|
||||
if (index >= 0 && index < (int)LIST_SIZE)
|
||||
string = list[index];
|
||||
if (index >= 0 && index < 12)
|
||||
string = nl_langinfo(base + index);
|
||||
return append_string(string);
|
||||
};
|
||||
|
||||
const auto append_weekday =
|
||||
[&append_string](int index, bool abbreviated)
|
||||
{
|
||||
const nl_item base = abbreviated ? ABDAY_1 : DAY_1;
|
||||
const char* string = "INVALID";
|
||||
if (index >= 0 && index < 7)
|
||||
string = nl_langinfo(base + index);
|
||||
return append_string(string);
|
||||
};
|
||||
|
||||
|
@ -308,24 +306,24 @@ size_t strftime(char* __restrict s, size_t maxsize, const char* __restrict forma
|
|||
switch (*format)
|
||||
{
|
||||
case 'a':
|
||||
if (!append_string_from_list(timeptr->tm_wday, abbr_wday))
|
||||
if (!append_weekday(timeptr->tm_wday, true))
|
||||
return 0;
|
||||
break;
|
||||
case 'A':
|
||||
if (!append_string_from_list(timeptr->tm_wday, full_wday))
|
||||
if (!append_weekday(timeptr->tm_wday, false))
|
||||
return 0;
|
||||
break;
|
||||
case 'b':
|
||||
case 'h':
|
||||
if (!append_string_from_list(timeptr->tm_mon, abbr_mon))
|
||||
if (!append_month(timeptr->tm_mon, true))
|
||||
return 0;
|
||||
break;
|
||||
case 'B':
|
||||
if (!append_string_from_list(timeptr->tm_mon, full_mon))
|
||||
if (!append_month(timeptr->tm_mon, false))
|
||||
return 0;
|
||||
break;
|
||||
case 'c':
|
||||
if (size_t ret = strftime(s + len, maxsize - len, "%a %b %e %H:%M:%S %Y", timeptr))
|
||||
if (size_t ret = strftime(s + len, maxsize - len, nl_langinfo(D_T_FMT), timeptr))
|
||||
len += ret;
|
||||
else return 0;
|
||||
break;
|
||||
|
@ -401,11 +399,11 @@ size_t strftime(char* __restrict s, size_t maxsize, const char* __restrict forma
|
|||
s[len++] = '\n';
|
||||
break;
|
||||
case 'p':
|
||||
if (!append_string(timeptr->tm_hour < 12 ? "AM" : "PM"))
|
||||
if (!append_string(timeptr->tm_hour < 12 ? nl_langinfo(AM_STR) : nl_langinfo(PM_STR)))
|
||||
return 0;
|
||||
break;
|
||||
case 'r':
|
||||
if (size_t ret = strftime(s + len, maxsize - len, "%I:%M:%S %p", timeptr))
|
||||
if (size_t ret = strftime(s + len, maxsize - len, nl_langinfo(T_FMT_AMPM), timeptr))
|
||||
len += ret;
|
||||
else return 0;
|
||||
break;
|
||||
|
@ -501,12 +499,12 @@ size_t strftime(char* __restrict s, size_t maxsize, const char* __restrict forma
|
|||
return 0;
|
||||
break;
|
||||
case 'x':
|
||||
if (size_t ret = strftime(s + len, maxsize - len, "%m/%d/%y", timeptr))
|
||||
if (size_t ret = strftime(s + len, maxsize - len, nl_langinfo(D_FMT), timeptr))
|
||||
len += ret;
|
||||
else return 0;
|
||||
break;
|
||||
case 'X':
|
||||
if (size_t ret = strftime(s + len, maxsize - len, "%H:%M:%S", timeptr))
|
||||
if (size_t ret = strftime(s + len, maxsize - len, nl_langinfo(T_FMT), timeptr))
|
||||
len += ret;
|
||||
else return 0;
|
||||
break;
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
#include <BAN/Assert.h>
|
||||
#include <BAN/Debug.h>
|
||||
#include <BAN/StringView.h>
|
||||
|
||||
#include <kernel/Memory/Types.h>
|
||||
#include <kernel/Syscall.h>
|
||||
|
||||
#include <ctype.h>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <pthread.h>
|
||||
|
@ -165,8 +167,16 @@ int gethostname(char* name, size_t namelen)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int exec_impl(const char* pathname, char* const* argv, char* const* envp, bool do_path_resolution)
|
||||
static int exec_impl_shebang(FILE* fp, const char* pathname, char* const* argv, char* const* envp, int shebang_depth);
|
||||
|
||||
static int exec_impl(const char* pathname, char* const* argv, char* const* envp, bool do_path_resolution, int shebang_depth = 0)
|
||||
{
|
||||
if (shebang_depth > 100)
|
||||
{
|
||||
errno = ELOOP;
|
||||
return -1;
|
||||
}
|
||||
|
||||
char buffer[PATH_MAX];
|
||||
|
||||
if (do_path_resolution && strchr(pathname, '/') == nullptr)
|
||||
|
@ -211,9 +221,101 @@ static int exec_impl(const char* pathname, char* const* argv, char* const* envp,
|
|||
pathname = resolved;
|
||||
}
|
||||
|
||||
if (access(pathname, X_OK) == -1)
|
||||
return -1;
|
||||
|
||||
if (FILE* fp = fopen(pathname, "r"))
|
||||
{
|
||||
char shebang[2];
|
||||
if (fread(shebang, 1, 2, fp) == 2 && shebang[0] == '#' && shebang[1] == '!')
|
||||
return exec_impl_shebang(fp, pathname, argv, envp, shebang_depth);
|
||||
fclose(fp);
|
||||
}
|
||||
|
||||
return syscall(SYS_EXEC, pathname, argv, envp);
|
||||
}
|
||||
|
||||
static int exec_impl_shebang(FILE* fp, const char* pathname, char* const* argv, char* const* envp, int shebang_depth)
|
||||
{
|
||||
constexpr size_t buffer_len = PATH_MAX + 1 + ARG_MAX + 1;
|
||||
char* buffer = static_cast<char*>(malloc(buffer_len));
|
||||
if (buffer == nullptr)
|
||||
{
|
||||
fclose(fp);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (fgets(buffer, buffer_len, fp) == nullptr)
|
||||
{
|
||||
free(buffer);
|
||||
return -1;
|
||||
}
|
||||
|
||||
const auto sv_trim_whitespace =
|
||||
[](BAN::StringView sv) -> BAN::StringView
|
||||
{
|
||||
while (!sv.empty() && isspace(sv.front()))
|
||||
sv = sv.substring(1);
|
||||
while (!sv.empty() && isspace(sv.back()))
|
||||
sv = sv.substring(0, sv.size() - 1);
|
||||
return sv;
|
||||
};
|
||||
|
||||
BAN::StringView buffer_sv = buffer;
|
||||
if (buffer_sv.back() != '\n')
|
||||
{
|
||||
free(buffer);
|
||||
errno = ENOEXEC;
|
||||
return -1;
|
||||
}
|
||||
buffer_sv = sv_trim_whitespace(buffer_sv);
|
||||
|
||||
BAN::StringView interpreter, argument;
|
||||
if (auto space = buffer_sv.find([](char ch) -> bool { return isspace(ch); }); !space.has_value())
|
||||
interpreter = buffer_sv;
|
||||
else
|
||||
{
|
||||
interpreter = sv_trim_whitespace(buffer_sv.substring(0, space.value()));
|
||||
argument = sv_trim_whitespace(buffer_sv.substring(space.value()));
|
||||
}
|
||||
|
||||
if (interpreter.empty())
|
||||
{
|
||||
free(buffer);
|
||||
errno = ENOEXEC;
|
||||
return -1;
|
||||
}
|
||||
|
||||
// null terminate interpreter and argument
|
||||
const_cast<char*>(interpreter.data())[interpreter.size()] = '\0';
|
||||
if (!argument.empty())
|
||||
const_cast<char*>(argument.data())[argument.size()] = '\0';
|
||||
|
||||
size_t old_argc = 0;
|
||||
while (argv[old_argc])
|
||||
old_argc++;
|
||||
|
||||
const size_t extra_args = 1 + !argument.empty();
|
||||
char** new_argv = static_cast<char**>(malloc((extra_args + old_argc + 1) * sizeof(char*)));;
|
||||
if (new_argv == nullptr)
|
||||
{
|
||||
free(buffer);
|
||||
return -1;
|
||||
}
|
||||
|
||||
new_argv[0] = const_cast<char*>(pathname);
|
||||
if (!argument.empty())
|
||||
new_argv[1] = const_cast<char*>(argument.data());
|
||||
for (size_t i = 0; i < old_argc; i++)
|
||||
new_argv[i + extra_args] = argv[i];
|
||||
new_argv[old_argc + extra_args] = nullptr;
|
||||
|
||||
exec_impl(interpreter.data(), new_argv, envp, true, shebang_depth + 1);
|
||||
free(new_argv);
|
||||
free(buffer);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int execl_impl(const char* pathname, const char* arg0, va_list ap, bool has_env, bool do_path_resolution)
|
||||
{
|
||||
int argc = 1;
|
||||
|
@ -243,7 +345,9 @@ static int execl_impl(const char* pathname, const char* arg0, va_list ap, bool h
|
|||
envp = va_arg(ap, char**);;
|
||||
}
|
||||
|
||||
return exec_impl(pathname, argv, envp, do_path_resolution);
|
||||
exec_impl(pathname, argv, envp, do_path_resolution);
|
||||
free(argv);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int execl(const char* pathname, const char* arg0, ...)
|
||||
|
@ -322,16 +426,20 @@ char* getcwd(char* buf, size_t size)
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
if ((char*)syscall(SYS_GET_PWD, buf, size) == nullptr)
|
||||
if (syscall(SYS_GETCWD, buf, size) == 0)
|
||||
return nullptr;
|
||||
|
||||
setenv("PWD", buf, 1);
|
||||
return buf;
|
||||
}
|
||||
|
||||
int chdir(const char* path)
|
||||
{
|
||||
return syscall(SYS_SET_PWD, path);
|
||||
return syscall(SYS_CHDIR, path);
|
||||
}
|
||||
|
||||
int fchdir(int fildes)
|
||||
{
|
||||
return syscall(SYS_FCHDIR, fildes);
|
||||
}
|
||||
|
||||
int chown(const char* path, uid_t owner, gid_t group)
|
||||
|
|
|
@ -24,3 +24,161 @@ int wcsncmp(const wchar_t* ws1, const wchar_t* ws2, size_t n)
|
|||
break;
|
||||
return *ws1 - *ws2;
|
||||
}
|
||||
|
||||
size_t wcslen(const wchar_t* ws)
|
||||
{
|
||||
size_t len = 0;
|
||||
for (; ws[len]; len++)
|
||||
continue;
|
||||
return len;
|
||||
}
|
||||
|
||||
size_t wcsnlen(const wchar_t* ws, size_t maxlen)
|
||||
{
|
||||
size_t len = 0;
|
||||
for (; ws[len] && len < maxlen; len++)
|
||||
continue;
|
||||
return len;
|
||||
}
|
||||
|
||||
wchar_t* wcpcpy(wchar_t* __restrict ws1, const wchar_t* __restrict ws2)
|
||||
{
|
||||
while (*ws2)
|
||||
*ws1++ = *ws2++;
|
||||
*ws1 = L'\0';
|
||||
return ws1;
|
||||
}
|
||||
|
||||
wchar_t* wcscpy(wchar_t* __restrict ws1, const wchar_t* __restrict ws2)
|
||||
{
|
||||
wcpcpy(ws1, ws2);
|
||||
return ws1;
|
||||
}
|
||||
|
||||
wchar_t* wcpncpy(wchar_t* __restrict ws1, const wchar_t* __restrict ws2, size_t n)
|
||||
{
|
||||
size_t i = 0;
|
||||
for (; ws2[i] && i < n; i++)
|
||||
ws1[i] = ws2[i];
|
||||
for (; i < n; i++)
|
||||
ws1[i] = L'\0';
|
||||
return &ws1[i];
|
||||
}
|
||||
|
||||
wchar_t* wcsncpy(wchar_t* __restrict ws1, const wchar_t* __restrict ws2, size_t n)
|
||||
{
|
||||
wcpncpy(ws1, ws2, n);
|
||||
return ws1;
|
||||
}
|
||||
|
||||
wchar_t* wcscat(wchar_t* __restrict ws1, const wchar_t* __restrict ws2)
|
||||
{
|
||||
wcscpy(ws1 + wcslen(ws1), ws2);
|
||||
return ws1;
|
||||
}
|
||||
|
||||
wchar_t* wcsncat(wchar_t* __restrict ws1, const wchar_t* __restrict ws2, size_t n)
|
||||
{
|
||||
size_t i = 0;
|
||||
for (; ws2[i] && i < n; i++)
|
||||
ws1[i] = ws2[i];
|
||||
ws1[i] = L'\0';
|
||||
return ws1;
|
||||
}
|
||||
|
||||
static size_t wcsspn_impl(const wchar_t* ws1, const wchar_t* ws2, bool accept)
|
||||
{
|
||||
size_t len = 0;
|
||||
for (; ws1[len]; len++)
|
||||
{
|
||||
bool found = false;
|
||||
for (size_t i = 0; ws2[i] && !found; i++)
|
||||
if (ws1[len] == ws2[i])
|
||||
found = true;
|
||||
if (found != accept)
|
||||
break;
|
||||
}
|
||||
return len;
|
||||
}
|
||||
|
||||
size_t wcsspn(const wchar_t* ws1, const wchar_t* ws2)
|
||||
{
|
||||
return wcsspn_impl(ws1, ws2, true);
|
||||
}
|
||||
|
||||
size_t wcscspn(const wchar_t* ws1, const wchar_t* ws2)
|
||||
{
|
||||
return wcsspn_impl(ws1, ws2, false);
|
||||
}
|
||||
|
||||
wchar_t* wcschr(const wchar_t* ws, wchar_t wc)
|
||||
{
|
||||
for (size_t i = 0; ws[i]; i++)
|
||||
if (ws[i] == wc)
|
||||
return const_cast<wchar_t*>(&ws[i]);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
wchar_t* wcsrchr(const wchar_t* ws, wchar_t wc)
|
||||
{
|
||||
wchar_t* result = nullptr;
|
||||
for (size_t i = 0; ws[i]; i++)
|
||||
if (ws[i] == wc)
|
||||
result = const_cast<wchar_t*>(&ws[i]);
|
||||
return result;
|
||||
}
|
||||
|
||||
wchar_t* wcsdup(const wchar_t* string)
|
||||
{
|
||||
const size_t len = wcslen(string);
|
||||
wchar_t* result = static_cast<wchar_t*>(malloc((len + 1) * sizeof(wchar_t)));
|
||||
if (result == nullptr)
|
||||
return nullptr;
|
||||
wmemcpy(result, string, len + 1);
|
||||
return result;
|
||||
}
|
||||
|
||||
wchar_t* wmemchr(const wchar_t* ws, wchar_t wc, size_t n)
|
||||
{
|
||||
for (size_t i = 0; i < n; i++)
|
||||
if (ws[i] == wc)
|
||||
return const_cast<wchar_t*>(&ws[i]);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
int wmemcmp(const wchar_t* ws1, const wchar_t* ws2, size_t n)
|
||||
{
|
||||
for (size_t i = 0; i < n; i++)
|
||||
if (ws1[i] != ws2[i])
|
||||
return ws1[i] - ws2[i];
|
||||
return 0;
|
||||
}
|
||||
|
||||
wchar_t* wmemcpy(wchar_t* __restrict ws1, const wchar_t* __restrict ws2, size_t n)
|
||||
{
|
||||
for (size_t i = 0; i < n; i++)
|
||||
ws1[i] = ws2[i];
|
||||
return ws1;
|
||||
}
|
||||
|
||||
wchar_t* wmemmove(wchar_t* ws1, const wchar_t* ws2, size_t n)
|
||||
{
|
||||
if (ws1 < ws2)
|
||||
{
|
||||
for (size_t i = 0; i < n; i++)
|
||||
ws1[i] = ws2[i];
|
||||
}
|
||||
else
|
||||
{
|
||||
for (size_t i = 1; i <= n; i++)
|
||||
ws1[n - i] = ws2[n - i];
|
||||
}
|
||||
return ws1;
|
||||
}
|
||||
|
||||
wchar_t* wmemset(wchar_t* ws, wchar_t wc, size_t n)
|
||||
{
|
||||
for (size_t i = 0; i < n; i++)
|
||||
ws[i] = wc;
|
||||
return ws;
|
||||
}
|
||||
|
|
|
@ -700,11 +700,7 @@ static void handle_dynamic(LoadedElf& elf)
|
|||
if (auto ret = syscall(SYS_REALPATH, path_buffer, realpath); ret < 0)
|
||||
print_error_and_exit("realpath", ret);
|
||||
|
||||
int library_fd = syscall(SYS_OPENAT, AT_FDCWD, realpath, O_RDONLY);
|
||||
if (library_fd < 0)
|
||||
print_error_and_exit("could not open library", library_fd);
|
||||
|
||||
const auto& loaded_elf = load_elf(realpath, library_fd);
|
||||
const auto& loaded_elf = load_elf(realpath, -1);
|
||||
dynamic.d_un.d_ptr = reinterpret_cast<uintptr_t>(&loaded_elf);
|
||||
}
|
||||
|
||||
|
@ -844,6 +840,9 @@ static LoadedElf& load_elf(const char* path, int fd)
|
|||
if (strcmp(s_loaded_files[i].path, path) == 0)
|
||||
return s_loaded_files[i];
|
||||
|
||||
if (fd == -1 && (fd = syscall(SYS_OPENAT, AT_FDCWD, path, O_RDONLY)) < 0)
|
||||
print_error_and_exit("could not open library", fd);
|
||||
|
||||
ElfNativeFileHeader file_header;
|
||||
if (auto ret = syscall(SYS_READ, fd, &file_header, sizeof(file_header)); ret != sizeof(file_header))
|
||||
print_error_and_exit("could not read file header", ret);
|
||||
|
|
|
@ -39,20 +39,17 @@ void Builtin::initialize()
|
|||
MUST(m_builtin_commands.emplace("export"_sv,
|
||||
[](Execute&, BAN::Span<const BAN::String> arguments, FILE*, FILE*) -> int
|
||||
{
|
||||
bool first = false;
|
||||
for (const auto& argument : arguments)
|
||||
for (size_t i = 1; i < arguments.size(); i++)
|
||||
{
|
||||
if (first)
|
||||
{
|
||||
first = false;
|
||||
continue;
|
||||
}
|
||||
const auto argument = arguments[i].sv();
|
||||
|
||||
auto split = MUST(argument.sv().split('=', true));
|
||||
if (split.size() != 2)
|
||||
const auto idx = argument.find('=');
|
||||
if (!idx.has_value())
|
||||
continue;
|
||||
|
||||
if (setenv(BAN::String(split[0]).data(), BAN::String(split[1]).data(), true) == -1)
|
||||
auto name = BAN::String(argument.substring(0, idx.value()));
|
||||
const char* value = argument.data() + idx.value() + 1;
|
||||
if (setenv(name.data(), value, true) == -1)
|
||||
ERROR_RETURN("setenv", 1);
|
||||
}
|
||||
return 0;
|
||||
|
@ -134,21 +131,20 @@ void Builtin::initialize()
|
|||
return 1;
|
||||
}
|
||||
|
||||
BAN::StringView path;
|
||||
|
||||
if (arguments.size() == 1)
|
||||
{
|
||||
if (const char* path_env = getenv("HOME"))
|
||||
path = path_env;
|
||||
const char* path = nullptr;
|
||||
if (arguments.size() == 2)
|
||||
path = arguments[1].data();
|
||||
else
|
||||
path = getenv("HOME");
|
||||
|
||||
if (path == nullptr)
|
||||
return 0;
|
||||
}
|
||||
else
|
||||
path = arguments[1];
|
||||
|
||||
if (chdir(path.data()) == -1)
|
||||
if (chdir(path) == -1)
|
||||
ERROR_RETURN("chdir", 1);
|
||||
|
||||
setenv("PWD", path, 1);
|
||||
|
||||
return 0;
|
||||
}, true
|
||||
));
|
||||
|
|
|
@ -100,6 +100,7 @@ BAN::ErrorOr<Execute::ExecuteResult> Execute::execute_command_no_wait(const Inte
|
|||
CHECK_FD_OR_PERROR_AND_EXIT(command.fd_out, STDOUT_FILENO);
|
||||
|
||||
execv(command.command.get<BAN::String>().data(), const_cast<char* const*>(exec_args.data()));
|
||||
perror("execv");
|
||||
exit(errno);
|
||||
}
|
||||
|
||||
|
|
|
@ -28,6 +28,12 @@ int main(int argc, char** argv)
|
|||
sigaction(SIGTTOU, &sa, nullptr);
|
||||
}
|
||||
|
||||
{
|
||||
char cwd_buffer[PATH_MAX];
|
||||
if (getcwd(cwd_buffer, sizeof(cwd_buffer)))
|
||||
setenv("PWD", cwd_buffer, 1);
|
||||
}
|
||||
|
||||
Builtin::get().initialize();
|
||||
|
||||
for (int i = 1; i < argc; i++)
|
||||
|
|
Loading…
Reference in New Issue