From 33b6536e6badb46d62cc2bd258134c19366ed6d4 Mon Sep 17 00:00:00 2001 From: Bananymous Date: Thu, 1 Jan 2026 17:36:56 +0200 Subject: [PATCH] Kernel: Make disk cache entry lookup O(log n) I have absolutely no idea why i was doing a linear lookup here :D --- kernel/include/kernel/Storage/DiskCache.h | 2 + kernel/kernel/Storage/DiskCache.cpp | 128 ++++++++++++---------- 2 files changed, 70 insertions(+), 60 deletions(-) diff --git a/kernel/include/kernel/Storage/DiskCache.h b/kernel/include/kernel/Storage/DiskCache.h index 0a8ffa0f..e25d05ee 100644 --- a/kernel/include/kernel/Storage/DiskCache.h +++ b/kernel/include/kernel/Storage/DiskCache.h @@ -28,6 +28,8 @@ namespace Kernel private: BAN::ErrorOr sync_cache_index(size_t index); + size_t find_sector_cache_index(uint64_t sector) const; + private: struct PageCache { diff --git a/kernel/kernel/Storage/DiskCache.cpp b/kernel/kernel/Storage/DiskCache.cpp index 80b47536..aae81c89 100644 --- a/kernel/kernel/Storage/DiskCache.cpp +++ b/kernel/kernel/Storage/DiskCache.cpp @@ -21,85 +21,95 @@ namespace Kernel release_all_pages(); } + size_t DiskCache::find_sector_cache_index(uint64_t sector) const + { + const uint64_t sectors_per_page = PAGE_SIZE / m_sector_size; + const uint64_t page_cache_offset = sector % sectors_per_page; + const uint64_t page_cache_start = sector - page_cache_offset; + + size_t l = 0, r = m_cache.size(); + + while (l < r) + { + const size_t mid = (l + r) / 2; + + if (m_cache[mid].first_sector == page_cache_start) + return mid; + + if (m_cache[mid].first_sector < page_cache_start) + l = mid + 1; + else + r = mid; + } + + return l; + } + bool DiskCache::read_from_cache(uint64_t sector, BAN::ByteSpan buffer) { ASSERT(buffer.size() >= m_sector_size); - uint64_t sectors_per_page = PAGE_SIZE / m_sector_size; - uint64_t page_cache_offset = sector % sectors_per_page; - uint64_t page_cache_start = sector - page_cache_offset; + const uint64_t sectors_per_page = PAGE_SIZE / m_sector_size; + const uint64_t page_cache_offset = sector % sectors_per_page; + const uint64_t page_cache_start = sector - page_cache_offset; - for (auto& cache : m_cache) - { - if (cache.first_sector + sectors_per_page <= page_cache_start) - continue; - if (cache.first_sector > page_cache_start) - break; + const auto index = find_sector_cache_index(sector); + if (index >= m_cache.size()) + return false; - if (!(cache.sector_mask & (1 << page_cache_offset))) - continue; + const auto& cache = m_cache[index]; + if (cache.first_sector != page_cache_start) + return false; + if (!(cache.sector_mask & (1 << page_cache_offset))) + return false; - PageTable::with_fast_page(cache.paddr, [&] { - memcpy(buffer.data(), PageTable::fast_page_as_ptr(page_cache_offset * m_sector_size), m_sector_size); - }); + PageTable::with_fast_page(cache.paddr, [&] { + memcpy(buffer.data(), PageTable::fast_page_as_ptr(page_cache_offset * m_sector_size), m_sector_size); + }); - return true; - } - - return false; + return true; }; BAN::ErrorOr DiskCache::write_to_cache(uint64_t sector, BAN::ConstByteSpan buffer, bool dirty) { ASSERT(buffer.size() >= m_sector_size); - uint64_t sectors_per_page = PAGE_SIZE / m_sector_size; - uint64_t page_cache_offset = sector % sectors_per_page; - uint64_t page_cache_start = sector - page_cache_offset; - size_t index = 0; + const uint64_t sectors_per_page = PAGE_SIZE / m_sector_size; + const uint64_t page_cache_offset = sector % sectors_per_page; + const uint64_t page_cache_start = sector - page_cache_offset; - // Search the cache if the have this sector in memory - for (; index < m_cache.size(); index++) + const auto index = find_sector_cache_index(sector); + + if (index >= m_cache.size() || m_cache[index].first_sector != page_cache_start) { - auto& cache = m_cache[index]; + paddr_t paddr = Heap::get().take_free_page(); + if (paddr == 0) + return BAN::Error::from_errno(ENOMEM); - if (cache.first_sector + sectors_per_page <= page_cache_start) - continue; - if (cache.first_sector > page_cache_start) - break; + PageCache cache { + .paddr = paddr, + .first_sector = page_cache_start, + .sector_mask = 0, + .dirty_mask = 0, + }; - PageTable::with_fast_page(cache.paddr, [&] { - memcpy(PageTable::fast_page_as_ptr(page_cache_offset * m_sector_size), buffer.data(), m_sector_size); - }); - - cache.sector_mask |= 1 << page_cache_offset; - if (dirty) - cache.dirty_mask |= 1 << page_cache_offset; - - return {}; + if (auto ret = m_cache.insert(index, cache); ret.is_error()) + { + Heap::get().release_page(paddr); + return ret.error(); + } } - // Try to add new page to the cache - paddr_t paddr = Heap::get().take_free_page(); - if (paddr == 0) - return BAN::Error::from_errno(ENOMEM); - - PageCache cache; - cache.paddr = paddr; - cache.first_sector = page_cache_start; - cache.sector_mask = 1 << page_cache_offset; - cache.dirty_mask = dirty ? cache.sector_mask : 0; - - if (auto ret = m_cache.insert(index, cache); ret.is_error()) - { - Heap::get().release_page(paddr); - return ret.error(); - } + auto& cache = m_cache[index]; PageTable::with_fast_page(cache.paddr, [&] { memcpy(PageTable::fast_page_as_ptr(page_cache_offset * m_sector_size), buffer.data(), m_sector_size); }); + cache.sector_mask |= 1 << page_cache_offset; + if (dirty) + cache.dirty_mask |= 1 << page_cache_offset; + return {}; } @@ -158,15 +168,13 @@ namespace Kernel if (g_disable_disk_write) return {}; - uint64_t sectors_per_page = PAGE_SIZE / m_sector_size; - uint64_t page_cache_offset = sector % sectors_per_page; - uint64_t page_cache_start = sector - page_cache_offset; + const uint64_t sectors_per_page = PAGE_SIZE / m_sector_size; + const uint64_t page_cache_offset = sector % sectors_per_page; + const uint64_t page_cache_start = sector - page_cache_offset; - for (size_t i = 0; i < m_cache.size(); i++) + for (size_t i = find_sector_cache_index(sector); i < m_cache.size(); i++) { auto& cache = m_cache[i]; - if (cache.first_sector + sectors_per_page <= page_cache_start) - continue; if (cache.first_sector * sectors_per_page >= page_cache_start * sectors_per_page + block_count) break; TRY(sync_cache_index(i));