Kernel: NVMe queues now supports upto 64 simultaneous operations

This commit is contained in:
Bananymous 2024-03-15 13:46:35 +02:00
parent 48ea9e1c1d
commit 090c3c9930
3 changed files with 67 additions and 28 deletions

View File

@ -3,7 +3,6 @@
#include <BAN/UniqPtr.h>
#include <BAN/Vector.h>
#include <kernel/Interruptable.h>
#include <kernel/Lock/Mutex.h>
#include <kernel/Memory/DMARegion.h>
#include <kernel/Semaphore.h>
#include <kernel/Storage/NVMe/Definitions.h>
@ -21,7 +20,9 @@ namespace Kernel
virtual void handle_irq() final override;
private:
Mutex m_mutex;
uint16_t reserve_cid();
private:
BAN::UniqPtr<Kernel::DMARegion> m_completion_queue;
BAN::UniqPtr<Kernel::DMARegion> m_submission_queue;
volatile NVMe::DoorbellRegisters& m_doorbell;
@ -30,9 +31,11 @@ namespace Kernel
uint32_t m_cq_head { 0 };
uint16_t m_cq_valid_phase { 1 };
Semaphore m_semaphore;
volatile uint16_t m_status;
volatile bool m_done { false };
Semaphore m_semaphore;
SpinLock m_lock;
BAN::Atomic<uint64_t> m_used_mask { 0 };
BAN::Atomic<uint64_t> m_done_mask { 0 };
volatile uint16_t m_status_codes[64] { };
};
}

View File

@ -92,10 +92,10 @@ namespace Kernel
TRY(wait_until_ready(true));
cc.en = 0;
TRY(wait_until_ready(false));
dprintln_if(DEBUG_NVMe, " controller reset");
dprintln_if(DEBUG_NVMe, " controller reset");
TRY(create_admin_queue());
dprintln_if(DEBUG_NVMe, " created admin queue");
dprintln_if(DEBUG_NVMe, " created admin queue");
// Configure controller
cc.ams = 0;

View File

@ -15,6 +15,8 @@ namespace Kernel
, m_doorbell(db)
, m_qdepth(qdepth)
{
for (uint32_t i = qdepth; i < 64; i++)
m_used_mask |= (uint64_t)1 << i;
set_irq(irq);
enable_interrupt();
}
@ -27,12 +29,13 @@ namespace Kernel
{
uint16_t sts = cq_ptr[m_cq_head].sts >> 1;
uint16_t cid = cq_ptr[m_cq_head].cid;
ASSERT(cid == 0);
uint64_t cid_mask = (uint64_t)1 << cid;
ASSERT(cid < 64);
ASSERT(!m_done);
m_status = sts;
m_done = true;
m_semaphore.unblock();
ASSERT((m_done_mask & cid_mask) == 0);
m_status_codes[cid] = sts;
m_done_mask |= cid_mask;
m_cq_head = (m_cq_head + 1) % m_qdepth;
if (m_cq_head == 0)
@ -40,42 +43,75 @@ namespace Kernel
}
m_doorbell.cq_head = m_cq_head;
m_semaphore.unblock();
}
uint16_t NVMeQueue::submit_command(NVMe::SubmissionQueueEntry& sqe)
{
LockGuard _(m_mutex);
uint16_t cid = reserve_cid();
uint64_t cid_mask = (uint64_t)1 << cid;
ASSERT(m_done == false);
m_status = 0;
{
SpinLockGuard _(m_lock);
sqe.cid = 0;
m_done_mask &= ~cid_mask;
m_status_codes[cid] = 0;
auto* sqe_ptr = reinterpret_cast<NVMe::SubmissionQueueEntry*>(m_submission_queue->vaddr());
memcpy(&sqe_ptr[m_sq_tail], &sqe, sizeof(NVMe::SubmissionQueueEntry));
m_sq_tail = (m_sq_tail + 1) % m_qdepth;
m_doorbell.sq_tail = m_sq_tail;
sqe.cid = cid;
auto* sqe_ptr = reinterpret_cast<NVMe::SubmissionQueueEntry*>(m_submission_queue->vaddr());
memcpy(&sqe_ptr[m_sq_tail], &sqe, sizeof(NVMe::SubmissionQueueEntry));
m_sq_tail = (m_sq_tail + 1) % m_qdepth;
m_doorbell.sq_tail = m_sq_tail;
}
const uint64_t start_time = SystemTimer::get().ms_since_boot();
while (SystemTimer::get().ms_since_boot() < start_time + s_nvme_command_poll_timeout_ms)
{
if (!m_done)
continue;
m_done = false;
return m_status;
if (m_done_mask & cid_mask)
{
uint16_t status = m_status_codes[cid];
m_used_mask &= ~cid_mask;
return status;
}
}
while (SystemTimer::get().ms_since_boot() < start_time + s_nvme_command_timeout_ms)
{
if (m_done)
if (m_done_mask & cid_mask)
{
m_done = false;
return m_status;
uint16_t status = m_status_codes[cid];
m_used_mask &= ~cid_mask;
return status;
}
m_semaphore.block_with_wake_time(start_time + s_nvme_command_timeout_ms);
}
m_used_mask &= ~cid_mask;
return 0xFFFF;
}
uint16_t NVMeQueue::reserve_cid()
{
auto state = m_lock.lock();
while (~m_used_mask == 0)
{
m_lock.unlock(state);
m_semaphore.block_with_timeout(s_nvme_command_timeout_ms);
state = m_lock.lock();
}
uint16_t cid = 0;
for (; cid < 64; cid++)
if ((m_used_mask & ((uint64_t)1 << cid)) == 0)
break;
ASSERT(cid < 64);
ASSERT(cid < m_qdepth);
m_used_mask |= (uint64_t)1 << cid;
m_lock.unlock(state);
return cid;
}
}