Compare commits

...

2 Commits

Author SHA1 Message Date
Bananymous a1ab44d39f Kernel: Optimize disk reads to read multiple sectors at once
Old StorageDevice::read_sectors() read each sector separately if the
underlying disk had a disk cache. This patch allows multiple sectors to
be read even if the disk cache exists and contains some of the sectors.

Only sectors that could not be found from the disk cache are actually
read from the disk. This optimization is not done for writing, which
still will write each sector separately, if disk cache has no memory to
store new sectors. It would feel kind of unnecessary optimization as you
have greater problems if disk cache cannot allocate a single page.
2024-05-27 15:52:34 +03:00
Bananymous 8b1514e575 Kernel: Make all storage devices readable and writable
I only had a {read,write}_impl defined for ATABaseDevice. This patch
moves that implmentation to general storage device.
2024-05-27 13:41:55 +03:00
4 changed files with 55 additions and 41 deletions

View File

@ -32,9 +32,6 @@ namespace Kernel
virtual dev_t rdev() const override { return m_rdev; }
virtual BAN::ErrorOr<size_t> read_impl(off_t, BAN::ByteSpan) override;
virtual BAN::ErrorOr<size_t> write_impl(off_t, BAN::ConstByteSpan) override;
protected:
ATABaseDevice();
BAN::ErrorOr<void> initialize(BAN::Span<const uint16_t> identify_data);

View File

@ -40,6 +40,9 @@ namespace Kernel
virtual BAN::ErrorOr<void> write_sectors_impl(uint64_t lba, uint64_t sector_count, BAN::ConstByteSpan) = 0;
void add_disk_cache();
virtual BAN::ErrorOr<size_t> read_impl(off_t, BAN::ByteSpan) override;
virtual BAN::ErrorOr<size_t> write_impl(off_t, BAN::ConstByteSpan) override;
virtual bool can_read_impl() const override { return true; }
virtual bool can_write_impl() const override { return true; }
virtual bool has_error_impl() const override { return false; }

View File

@ -77,26 +77,6 @@ namespace Kernel
return {};
}
BAN::ErrorOr<size_t> detail::ATABaseDevice::read_impl(off_t offset, BAN::ByteSpan buffer)
{
if (offset % sector_size())
return BAN::Error::from_errno(EINVAL);
if (buffer.size() % sector_size())
return BAN::Error::from_errno(EINVAL);
TRY(read_sectors(offset / sector_size(), buffer.size() / sector_size(), buffer));
return buffer.size();
}
BAN::ErrorOr<size_t> detail::ATABaseDevice::write_impl(off_t offset, BAN::ConstByteSpan buffer)
{
if (offset % sector_size())
return BAN::Error::from_errno(EINVAL);
if (buffer.size() % sector_size())
return BAN::Error::from_errno(EINVAL);
TRY(write_sectors(offset / sector_size(), buffer.size() / sector_size(), buffer));
return buffer.size();
}
BAN::ErrorOr<BAN::RefPtr<ATADevice>> ATADevice::create(BAN::RefPtr<ATABus> bus, ATABus::DeviceType type, bool is_secondary, BAN::Span<const uint16_t> identify_data)
{
auto* device_ptr = new ATADevice(bus, type, is_secondary);

View File

@ -224,20 +224,36 @@ namespace Kernel
{
ASSERT(buffer.size() >= sector_count * sector_size());
{
LockGuard _(m_mutex);
if (!m_disk_cache.has_value())
return read_sectors_impl(lba, sector_count, buffer);
}
LockGuard _(m_mutex);
for (uint64_t offset = 0; offset < sector_count; offset++)
if (!m_disk_cache.has_value())
return read_sectors_impl(lba, sector_count, buffer);
uint64_t sectors_done = 0;
while (sectors_done < sector_count)
{
LockGuard _(m_mutex);
auto sector_buffer = buffer.slice(offset * sector_size(), sector_size());
if (m_disk_cache->read_from_cache(lba + offset, sector_buffer))
continue;
TRY(read_sectors_impl(lba + offset, 1, sector_buffer));
(void)m_disk_cache->write_to_cache(lba + offset, sector_buffer, false);
const uint32_t segment_sector_count = BAN::Math::min<uint32_t>(sector_count - sectors_done, 64);
uint64_t needed_sector_bitmask = (static_cast<uint64_t>(1) << segment_sector_count) - 1;
for (uint32_t i = 0; i < segment_sector_count; i++)
if (m_disk_cache->read_from_cache(lba + sectors_done + i, buffer.slice((sectors_done + i) * sector_size(), sector_size())))
needed_sector_bitmask &= ~(static_cast<uint64_t>(1) << i);
for (uint32_t i = 0; i < segment_sector_count && needed_sector_bitmask; i++)
{
if (!(needed_sector_bitmask & (static_cast<uint64_t>(1) << i)))
continue;
uint32_t len = 1;
while (needed_sector_bitmask & (static_cast<uint64_t>(1) << (i + len)))
len++;
auto segment_buffer = buffer.slice((sectors_done + i) * sector_size(), len * sector_size());
TRY(read_sectors_impl(lba + sectors_done + i, len, segment_buffer));
for (uint32_t j = 0; j < len; j++)
(void)m_disk_cache->write_to_cache(lba + sectors_done + i + j, segment_buffer.slice(j * sector_size(), sector_size()), false);
needed_sector_bitmask &= ~(((static_cast<uint64_t>(1) << len) - 1) << i);
i += len;
}
sectors_done += segment_sector_count;
}
return {};
@ -247,15 +263,13 @@ namespace Kernel
{
ASSERT(buffer.size() >= sector_count * sector_size());
{
LockGuard _(m_mutex);
if (!m_disk_cache.has_value())
return write_sectors_impl(lba, sector_count, buffer);
}
LockGuard _(m_mutex);
if (!m_disk_cache.has_value())
return write_sectors_impl(lba, sector_count, buffer);
for (uint8_t offset = 0; offset < sector_count; offset++)
{
LockGuard _(m_mutex);
auto sector_buffer = buffer.slice(offset * sector_size(), sector_size());
if (m_disk_cache->write_to_cache(lba + offset, sector_buffer, true).is_error())
TRY(write_sectors_impl(lba + offset, 1, sector_buffer));
@ -264,4 +278,24 @@ namespace Kernel
return {};
}
BAN::ErrorOr<size_t> StorageDevice::read_impl(off_t offset, BAN::ByteSpan buffer)
{
if (offset % sector_size())
return BAN::Error::from_errno(EINVAL);
if (buffer.size() % sector_size())
return BAN::Error::from_errno(EINVAL);
TRY(read_sectors(offset / sector_size(), buffer.size() / sector_size(), buffer));
return buffer.size();
}
BAN::ErrorOr<size_t> StorageDevice::write_impl(off_t offset, BAN::ConstByteSpan buffer)
{
if (offset % sector_size())
return BAN::Error::from_errno(EINVAL);
if (buffer.size() % sector_size())
return BAN::Error::from_errno(EINVAL);
TRY(write_sectors(offset / sector_size(), buffer.size() / sector_size(), buffer));
return buffer.size();
}
}