2023-07-27 18:35:08 +03:00
|
|
|
#include <kernel/CriticalScope.h>
|
2023-06-03 02:23:14 +03:00
|
|
|
#include <kernel/LockGuard.h>
|
|
|
|
#include <kernel/Memory/Heap.h>
|
2023-06-09 00:37:43 +03:00
|
|
|
#include <kernel/Memory/PageTable.h>
|
2023-06-03 02:23:14 +03:00
|
|
|
#include <kernel/Storage/DiskCache.h>
|
|
|
|
#include <kernel/Storage/StorageDevice.h>
|
|
|
|
|
|
|
|
namespace Kernel
|
|
|
|
{
|
|
|
|
|
2023-09-11 01:25:16 +03:00
|
|
|
DiskCache::DiskCache(size_t sector_size, StorageDevice& device)
|
2023-07-27 21:57:32 +03:00
|
|
|
: m_sector_size(sector_size)
|
2023-09-11 01:25:16 +03:00
|
|
|
, m_device(device)
|
2023-07-27 21:57:32 +03:00
|
|
|
{
|
|
|
|
ASSERT(PAGE_SIZE % m_sector_size == 0);
|
|
|
|
ASSERT(PAGE_SIZE / m_sector_size <= sizeof(PageCache::sector_mask) * 8);
|
|
|
|
ASSERT(PAGE_SIZE / m_sector_size <= sizeof(PageCache::dirty_mask) * 8);
|
|
|
|
}
|
2023-06-03 02:23:14 +03:00
|
|
|
|
|
|
|
DiskCache::~DiskCache()
|
|
|
|
{
|
|
|
|
release_all_pages();
|
|
|
|
}
|
|
|
|
|
2023-07-27 21:57:32 +03:00
|
|
|
bool DiskCache::read_from_cache(uint64_t sector, uint8_t* buffer)
|
2023-06-03 02:23:14 +03:00
|
|
|
{
|
2023-07-27 21:57:32 +03:00
|
|
|
uint64_t sectors_per_page = PAGE_SIZE / m_sector_size;
|
|
|
|
uint64_t page_cache_offset = sector % sectors_per_page;
|
|
|
|
uint64_t page_cache_start = sector - page_cache_offset;
|
2023-06-03 02:23:14 +03:00
|
|
|
|
2023-07-27 21:57:32 +03:00
|
|
|
PageTable& page_table = PageTable::current();
|
|
|
|
LockGuard page_table_locker(page_table);
|
|
|
|
ASSERT(page_table.is_page_free(0));
|
2023-06-03 02:23:14 +03:00
|
|
|
|
2023-07-27 21:57:32 +03:00
|
|
|
for (auto& cache : m_cache)
|
2023-06-03 02:23:14 +03:00
|
|
|
{
|
2023-07-27 21:57:32 +03:00
|
|
|
if (cache.first_sector < page_cache_start)
|
2023-06-19 10:31:47 +03:00
|
|
|
continue;
|
2023-07-27 21:57:32 +03:00
|
|
|
if (cache.first_sector > page_cache_start)
|
2023-06-19 10:31:47 +03:00
|
|
|
break;
|
2023-06-03 02:23:14 +03:00
|
|
|
|
2023-07-27 21:57:32 +03:00
|
|
|
if (!(cache.sector_mask & (1 << page_cache_offset)))
|
|
|
|
continue;
|
|
|
|
|
2023-09-11 01:25:16 +03:00
|
|
|
CriticalScope _;
|
2023-07-27 21:57:32 +03:00
|
|
|
page_table.map_page_at(cache.paddr, 0, PageTable::Flags::Present);
|
|
|
|
memcpy(buffer, (void*)(page_cache_offset * m_sector_size), m_sector_size);
|
|
|
|
page_table.unmap_page(0);
|
|
|
|
|
|
|
|
return true;
|
2023-06-03 02:23:14 +03:00
|
|
|
}
|
|
|
|
|
2023-07-27 21:57:32 +03:00
|
|
|
return false;
|
|
|
|
};
|
2023-06-03 02:23:14 +03:00
|
|
|
|
2023-07-27 21:57:32 +03:00
|
|
|
BAN::ErrorOr<void> DiskCache::write_to_cache(uint64_t sector, const uint8_t* buffer, bool dirty)
|
2023-06-03 02:23:14 +03:00
|
|
|
{
|
2023-07-27 21:57:32 +03:00
|
|
|
uint64_t sectors_per_page = PAGE_SIZE / m_sector_size;
|
|
|
|
uint64_t page_cache_offset = sector % sectors_per_page;
|
|
|
|
uint64_t page_cache_start = sector - page_cache_offset;
|
2023-06-03 02:23:14 +03:00
|
|
|
|
2023-07-27 21:57:32 +03:00
|
|
|
PageTable& page_table = PageTable::current();
|
|
|
|
LockGuard page_table_locker(page_table);
|
|
|
|
ASSERT(page_table.is_page_free(0));
|
2023-06-19 10:31:47 +03:00
|
|
|
|
|
|
|
size_t index = 0;
|
2023-07-27 21:57:32 +03:00
|
|
|
|
|
|
|
// Search the cache if the have this sector in memory
|
2023-06-19 10:31:47 +03:00
|
|
|
for (; index < m_cache.size(); index++)
|
2023-06-03 02:23:14 +03:00
|
|
|
{
|
2023-07-27 21:57:32 +03:00
|
|
|
auto& cache = m_cache[index];
|
|
|
|
|
|
|
|
if (cache.first_sector < page_cache_start)
|
2023-06-19 10:31:47 +03:00
|
|
|
continue;
|
2023-07-27 21:57:32 +03:00
|
|
|
if (cache.first_sector > page_cache_start)
|
2023-06-19 10:31:47 +03:00
|
|
|
break;
|
2023-07-27 21:57:32 +03:00
|
|
|
|
2023-09-11 01:25:16 +03:00
|
|
|
{
|
|
|
|
CriticalScope _;
|
|
|
|
page_table.map_page_at(cache.paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
|
|
|
|
memcpy((void*)(page_cache_offset * m_sector_size), buffer, m_sector_size);
|
|
|
|
page_table.unmap_page(0);
|
|
|
|
}
|
2023-07-27 21:57:32 +03:00
|
|
|
|
|
|
|
cache.sector_mask |= 1 << page_cache_offset;
|
|
|
|
if (dirty)
|
|
|
|
cache.dirty_mask |= 1 << page_cache_offset;
|
|
|
|
|
2023-06-19 10:31:47 +03:00
|
|
|
return {};
|
2023-06-03 02:23:14 +03:00
|
|
|
}
|
|
|
|
|
2023-07-27 21:57:32 +03:00
|
|
|
// Try to add new page to the cache
|
|
|
|
paddr_t paddr = Heap::get().take_free_page();
|
|
|
|
if (paddr == 0)
|
|
|
|
return BAN::Error::from_errno(ENOMEM);
|
|
|
|
|
|
|
|
PageCache cache;
|
|
|
|
cache.paddr = paddr;
|
|
|
|
cache.first_sector = page_cache_start;
|
|
|
|
cache.sector_mask = 1 << page_cache_offset;
|
|
|
|
cache.dirty_mask = dirty ? cache.sector_mask : 0;
|
|
|
|
|
|
|
|
if (auto ret = m_cache.insert(index, cache); ret.is_error())
|
2023-06-03 02:23:14 +03:00
|
|
|
{
|
2023-07-27 21:57:32 +03:00
|
|
|
Heap::get().release_page(paddr);
|
|
|
|
return ret.error();
|
2023-06-03 02:23:14 +03:00
|
|
|
}
|
|
|
|
|
2023-09-11 01:25:16 +03:00
|
|
|
{
|
|
|
|
CriticalScope _;
|
|
|
|
page_table.map_page_at(cache.paddr, 0, PageTable::Flags::Present);
|
|
|
|
memcpy((void*)(page_cache_offset * m_sector_size), buffer, m_sector_size);
|
|
|
|
page_table.unmap_page(0);
|
|
|
|
}
|
2023-07-27 21:57:32 +03:00
|
|
|
|
2023-06-03 02:23:14 +03:00
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2023-09-11 01:25:16 +03:00
|
|
|
BAN::ErrorOr<void> DiskCache::sync()
|
2023-06-03 02:23:14 +03:00
|
|
|
{
|
2023-09-27 00:32:13 +03:00
|
|
|
ASSERT(&PageTable::current() == &PageTable::kernel());
|
|
|
|
auto& page_table = PageTable::kernel();
|
2023-09-11 01:25:16 +03:00
|
|
|
|
2023-07-27 21:57:32 +03:00
|
|
|
for (auto& cache : m_cache)
|
2023-09-11 01:25:16 +03:00
|
|
|
{
|
2023-09-27 00:32:13 +03:00
|
|
|
if (cache.dirty_mask == 0)
|
|
|
|
continue;
|
|
|
|
|
2023-09-11 01:25:16 +03:00
|
|
|
{
|
2023-09-27 00:32:13 +03:00
|
|
|
LockGuard _(page_table);
|
|
|
|
ASSERT(page_table.is_page_free(0));
|
|
|
|
|
|
|
|
page_table.map_page_at(cache.paddr, 0, PageTable::Flags::Present);
|
|
|
|
memcpy(m_sync_cache.data(), (void*)0, PAGE_SIZE);
|
|
|
|
page_table.unmap_page(0);
|
|
|
|
}
|
2023-09-11 01:25:16 +03:00
|
|
|
|
2023-09-27 00:32:13 +03:00
|
|
|
uint8_t sector_start = 0;
|
|
|
|
uint8_t sector_count = 0;
|
|
|
|
|
|
|
|
while (sector_start + sector_count <= PAGE_SIZE / m_sector_size)
|
|
|
|
{
|
|
|
|
if (cache.dirty_mask & (1 << (sector_start + sector_count)))
|
|
|
|
sector_count++;
|
|
|
|
else if (sector_count == 0)
|
|
|
|
sector_start++;
|
|
|
|
else
|
2023-09-11 01:25:16 +03:00
|
|
|
{
|
2023-09-27 00:32:13 +03:00
|
|
|
dprintln("syncing {}->{}", cache.first_sector + sector_start, cache.first_sector + sector_start + sector_count);
|
|
|
|
TRY(m_device.write_sectors_impl(cache.first_sector + sector_start, sector_count, m_sync_cache.data() + sector_start * m_sector_size));
|
|
|
|
sector_start += sector_count + 1;
|
|
|
|
sector_count = 0;
|
2023-09-11 01:25:16 +03:00
|
|
|
}
|
2023-09-27 00:32:13 +03:00
|
|
|
}
|
2023-09-11 01:25:16 +03:00
|
|
|
|
2023-09-27 00:32:13 +03:00
|
|
|
if (sector_count > 0)
|
|
|
|
{
|
|
|
|
dprintln("syncing {}->{}", cache.first_sector + sector_start, cache.first_sector + sector_start + sector_count);
|
|
|
|
TRY(m_device.write_sectors_impl(cache.first_sector + sector_start, sector_count, m_sync_cache.data() + sector_start * m_sector_size));
|
2023-09-11 01:25:16 +03:00
|
|
|
}
|
2023-09-27 00:32:13 +03:00
|
|
|
|
|
|
|
cache.dirty_mask = 0;
|
2023-09-11 01:25:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return {};
|
2023-06-19 10:31:47 +03:00
|
|
|
}
|
2023-06-03 02:23:14 +03:00
|
|
|
|
2023-06-19 10:31:47 +03:00
|
|
|
size_t DiskCache::release_clean_pages(size_t page_count)
|
|
|
|
{
|
|
|
|
// NOTE: There might not actually be page_count pages after this
|
|
|
|
// function returns. The synchronization must be done elsewhere.
|
|
|
|
|
2023-06-03 02:23:14 +03:00
|
|
|
size_t released = 0;
|
|
|
|
for (size_t i = 0; i < m_cache.size() && released < page_count;)
|
|
|
|
{
|
2023-06-19 10:31:47 +03:00
|
|
|
if (m_cache[i].dirty_mask == 0)
|
2023-06-03 02:23:14 +03:00
|
|
|
{
|
2023-06-19 10:31:47 +03:00
|
|
|
Heap::get().release_page(m_cache[i].paddr);
|
|
|
|
m_cache.remove(i);
|
|
|
|
released++;
|
2023-06-03 02:23:14 +03:00
|
|
|
continue;
|
|
|
|
}
|
2023-06-19 10:31:47 +03:00
|
|
|
i++;
|
2023-06-03 02:23:14 +03:00
|
|
|
}
|
|
|
|
|
2023-06-03 02:37:37 +03:00
|
|
|
(void)m_cache.shrink_to_fit();
|
|
|
|
|
2023-06-03 02:23:14 +03:00
|
|
|
return released;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t DiskCache::release_pages(size_t page_count)
|
|
|
|
{
|
|
|
|
size_t released = release_clean_pages(page_count);
|
|
|
|
if (released >= page_count)
|
2023-06-19 10:31:47 +03:00
|
|
|
return released;
|
2023-09-11 01:25:16 +03:00
|
|
|
if (!sync().is_error())
|
|
|
|
released += release_clean_pages(page_count - released);
|
|
|
|
return released;
|
2023-06-03 02:23:14 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void DiskCache::release_all_pages()
|
|
|
|
{
|
2023-07-27 21:57:32 +03:00
|
|
|
release_pages(m_cache.size());
|
2023-06-03 02:23:14 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|