Kernel: Implement byte ring buffer
This maps the ring twice right next to each other so we don't have to care about wrapping around when doing memcpy or accessing the data
This commit is contained in:
parent
1ecd7cc2fe
commit
493b5cb9b1
|
|
@ -50,6 +50,7 @@ set(KERNEL_SOURCES
|
|||
kernel/InterruptController.cpp
|
||||
kernel/kernel.cpp
|
||||
kernel/Lock/SpinLock.cpp
|
||||
kernel/Memory/ByteRingBuffer.cpp
|
||||
kernel/Memory/DMARegion.cpp
|
||||
kernel/Memory/FileBackedRegion.cpp
|
||||
kernel/Memory/Heap.cpp
|
||||
|
|
|
|||
|
|
@ -0,0 +1,58 @@
|
|||
#pragma once
|
||||
|
||||
#include <BAN/ByteSpan.h>
|
||||
#include <BAN/UniqPtr.h>
|
||||
#include <BAN/Vector.h>
|
||||
|
||||
#include <kernel/Memory/Types.h>
|
||||
|
||||
namespace Kernel
|
||||
{
|
||||
|
||||
class ByteRingBuffer
|
||||
{
|
||||
public:
|
||||
static BAN::ErrorOr<BAN::UniqPtr<ByteRingBuffer>> create(size_t size);
|
||||
~ByteRingBuffer();
|
||||
|
||||
void push(BAN::ConstByteSpan data)
|
||||
{
|
||||
ASSERT(data.size() + m_size <= m_capacity);
|
||||
uint8_t* buffer_head = reinterpret_cast<uint8_t*>(m_vaddr) + (m_tail + m_size) % m_capacity;
|
||||
memcpy(buffer_head, data.data(), data.size());
|
||||
m_size += data.size();
|
||||
}
|
||||
|
||||
void pop(size_t size)
|
||||
{
|
||||
ASSERT(size <= m_size);
|
||||
m_tail = (m_tail + size) % m_capacity;
|
||||
m_size -= size;
|
||||
}
|
||||
|
||||
BAN::ConstByteSpan get_data() const
|
||||
{
|
||||
const uint8_t* base = reinterpret_cast<const uint8_t*>(m_vaddr);
|
||||
return { base + m_tail, m_size };
|
||||
}
|
||||
|
||||
bool empty() const { return m_size == 0; }
|
||||
bool full() const { return m_size == m_capacity; }
|
||||
size_t free() const { return m_capacity - m_size; }
|
||||
size_t size() const { return m_size; }
|
||||
size_t capacity() const { return m_capacity; }
|
||||
|
||||
private:
|
||||
ByteRingBuffer(size_t capacity)
|
||||
: m_capacity(capacity)
|
||||
{ }
|
||||
|
||||
private:
|
||||
size_t m_size { 0 };
|
||||
size_t m_tail { 0 };
|
||||
const size_t m_capacity;
|
||||
|
||||
vaddr_t m_vaddr { 0 };
|
||||
};
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
#include <kernel/Memory/ByteRingBuffer.h>
|
||||
#include <kernel/Memory/Heap.h>
|
||||
#include <kernel/Memory/PageTable.h>
|
||||
|
||||
namespace Kernel
|
||||
{
|
||||
|
||||
BAN::ErrorOr<BAN::UniqPtr<ByteRingBuffer>> ByteRingBuffer::create(size_t size)
|
||||
{
|
||||
ASSERT(size % PAGE_SIZE == 0);
|
||||
|
||||
const size_t page_count = size / PAGE_SIZE;
|
||||
|
||||
auto* buffer_ptr = new ByteRingBuffer(size);
|
||||
if (buffer_ptr == nullptr)
|
||||
return BAN::Error::from_errno(ENOMEM);
|
||||
auto buffer = BAN::UniqPtr<ByteRingBuffer>::adopt(buffer_ptr);
|
||||
|
||||
buffer->m_vaddr = PageTable::kernel().reserve_free_contiguous_pages(page_count * 2, KERNEL_OFFSET);
|
||||
if (buffer->m_vaddr == 0)
|
||||
return BAN::Error::from_errno(ENOMEM);
|
||||
|
||||
for (size_t i = 0; i < page_count; i++)
|
||||
{
|
||||
const paddr_t paddr = Heap::get().take_free_page();
|
||||
if (paddr == 0)
|
||||
return BAN::Error::from_errno(ENOMEM);
|
||||
PageTable::kernel().map_page_at(paddr, buffer->m_vaddr + i * PAGE_SIZE, PageTable::ReadWrite | PageTable::Present);
|
||||
PageTable::kernel().map_page_at(paddr, buffer->m_vaddr + size + i * PAGE_SIZE, PageTable::ReadWrite | PageTable::Present);
|
||||
}
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
ByteRingBuffer::~ByteRingBuffer()
|
||||
{
|
||||
if (m_vaddr == 0)
|
||||
return;
|
||||
for (size_t i = 0; i < m_capacity / PAGE_SIZE; i++)
|
||||
{
|
||||
const paddr_t paddr = PageTable::kernel().physical_address_of(m_vaddr + i * PAGE_SIZE);
|
||||
if (paddr == 0)
|
||||
break;
|
||||
Heap::get().release_page(paddr);
|
||||
}
|
||||
PageTable::kernel().unmap_range(m_vaddr, m_capacity * 2);
|
||||
}
|
||||
|
||||
}
|
||||
Loading…
Reference in New Issue