diff --git a/kernel/include/kernel/Memory/FileBackedRegion.h b/kernel/include/kernel/Memory/FileBackedRegion.h index a96cad22..94f47e5e 100644 --- a/kernel/include/kernel/Memory/FileBackedRegion.h +++ b/kernel/include/kernel/Memory/FileBackedRegion.h @@ -33,6 +33,7 @@ namespace Kernel BAN::ErrorOr msync(vaddr_t, size_t, int) override; BAN::ErrorOr> clone(PageTable& new_page_table) override; + BAN::ErrorOr> split(size_t offset) override; protected: BAN::ErrorOr allocate_page_containing_impl(vaddr_t vaddr, bool wants_write) override; diff --git a/kernel/include/kernel/Memory/MemoryBackedRegion.h b/kernel/include/kernel/Memory/MemoryBackedRegion.h index 47c60e31..de1790d6 100644 --- a/kernel/include/kernel/Memory/MemoryBackedRegion.h +++ b/kernel/include/kernel/Memory/MemoryBackedRegion.h @@ -15,6 +15,7 @@ namespace Kernel ~MemoryBackedRegion(); BAN::ErrorOr> clone(PageTable& new_page_table) override; + BAN::ErrorOr> split(size_t offset) override; BAN::ErrorOr msync(vaddr_t, size_t, int) override { return {}; } diff --git a/kernel/include/kernel/Memory/MemoryRegion.h b/kernel/include/kernel/Memory/MemoryRegion.h index 20df478c..9a441f6e 100644 --- a/kernel/include/kernel/Memory/MemoryRegion.h +++ b/kernel/include/kernel/Memory/MemoryRegion.h @@ -60,6 +60,7 @@ namespace Kernel BAN::ErrorOr allocate_page_containing(vaddr_t address, bool wants_write); virtual BAN::ErrorOr> clone(PageTable& new_page_table) = 0; + virtual BAN::ErrorOr> split(size_t offset) = 0; protected: MemoryRegion(PageTable&, size_t size, Type type, PageTable::flags_t flags, int status_flags); @@ -69,7 +70,7 @@ namespace Kernel protected: PageTable& m_page_table; - const size_t m_size; + size_t m_size { 0 }; const Type m_type; PageTable::flags_t m_flags; const int m_status_flags; diff --git a/kernel/include/kernel/Memory/SharedMemoryObject.h b/kernel/include/kernel/Memory/SharedMemoryObject.h index 7ad614c9..fafa5bfa 100644 --- a/kernel/include/kernel/Memory/SharedMemoryObject.h +++ b/kernel/include/kernel/Memory/SharedMemoryObject.h @@ -58,6 +58,7 @@ namespace Kernel static BAN::ErrorOr> create(BAN::RefPtr, PageTable&, AddressRange); BAN::ErrorOr> clone(PageTable& new_page_table) override; + BAN::ErrorOr> split(size_t offset) override; BAN::ErrorOr msync(vaddr_t, size_t, int) override { return {}; } diff --git a/kernel/include/kernel/Process.h b/kernel/include/kernel/Process.h index 8a46c6ac..41c85638 100644 --- a/kernel/include/kernel/Process.h +++ b/kernel/include/kernel/Process.h @@ -172,6 +172,7 @@ namespace Kernel BAN::ErrorOr sys_readdir(int fd, struct dirent* list, size_t list_len); + BAN::ErrorOr>> split_memory_region(BAN::UniqPtr&& region, vaddr_t base, size_t length); BAN::ErrorOr sys_mmap(const sys_mmap_t*); BAN::ErrorOr sys_munmap(void* addr, size_t len); BAN::ErrorOr sys_mprotect(void* addr, size_t len, int prot); diff --git a/kernel/kernel/Device/FramebufferDevice.cpp b/kernel/kernel/Device/FramebufferDevice.cpp index 1f4ad793..071a343d 100644 --- a/kernel/kernel/Device/FramebufferDevice.cpp +++ b/kernel/kernel/Device/FramebufferDevice.cpp @@ -301,6 +301,13 @@ namespace Kernel return BAN::UniqPtr(BAN::move(region)); } + BAN::ErrorOr> split(size_t offset) override + { + (void)offset; + dwarnln("TODO: FramebufferMemoryRegion::split"); + return BAN::Error::from_errno(ENOTSUP); + } + protected: // Returns error if no memory was available // Returns true if page was succesfully allocated diff --git a/kernel/kernel/Memory/FileBackedRegion.cpp b/kernel/kernel/Memory/FileBackedRegion.cpp index b80c9173..79cde713 100644 --- a/kernel/kernel/Memory/FileBackedRegion.cpp +++ b/kernel/kernel/Memory/FileBackedRegion.cpp @@ -232,4 +232,33 @@ namespace Kernel return BAN::UniqPtr(BAN::move(result)); } + BAN::ErrorOr> FileBackedRegion::split(size_t offset) + { + ASSERT(offset && offset < m_size); + ASSERT(offset % PAGE_SIZE == 0); + + const bool has_dirty_pages = (m_type == Type::PRIVATE); + + BAN::Vector dirty_pages; + if (has_dirty_pages) + { + TRY(dirty_pages.resize(BAN::Math::div_round_up(m_size - offset, PAGE_SIZE))); + for (size_t i = 0; i < dirty_pages.size(); i++) + dirty_pages[i] = m_dirty_pages[i + offset / PAGE_SIZE]; + } + + auto* new_region = new FileBackedRegion(m_inode, m_page_table, m_offset + offset, m_size - offset, m_type, m_flags, m_status_flags); + if (new_region == nullptr) + return BAN::Error::from_errno(ENOTSUP); + new_region->m_vaddr = m_vaddr + offset; + new_region->m_shared_data = m_shared_data; + new_region->m_dirty_pages = BAN::move(dirty_pages); + + m_size = offset; + if (has_dirty_pages) + MUST(m_dirty_pages.resize(offset / PAGE_SIZE)); + + return BAN::UniqPtr::adopt(new_region); + } + } diff --git a/kernel/kernel/Memory/MemoryBackedRegion.cpp b/kernel/kernel/Memory/MemoryBackedRegion.cpp index 99f8b22c..34ca07eb 100644 --- a/kernel/kernel/Memory/MemoryBackedRegion.cpp +++ b/kernel/kernel/Memory/MemoryBackedRegion.cpp @@ -82,6 +82,21 @@ namespace Kernel return BAN::UniqPtr(BAN::move(result)); } + BAN::ErrorOr> MemoryBackedRegion::split(size_t offset) + { + ASSERT(offset && offset < m_size); + ASSERT(offset % PAGE_SIZE == 0); + + auto* new_region = new MemoryBackedRegion(m_page_table, m_size - offset, m_type, m_flags, m_status_flags); + if (new_region == nullptr) + return BAN::Error::from_errno(ENOMEM); + new_region->m_vaddr = m_vaddr + offset; + + m_size = offset; + + return BAN::UniqPtr::adopt(new_region); + } + BAN::ErrorOr MemoryBackedRegion::copy_data_to_region(size_t offset_into_region, const uint8_t* buffer, size_t buffer_size) { ASSERT(offset_into_region + buffer_size <= m_size); diff --git a/kernel/kernel/Memory/SharedMemoryObject.cpp b/kernel/kernel/Memory/SharedMemoryObject.cpp index 76480bea..354c4e43 100644 --- a/kernel/kernel/Memory/SharedMemoryObject.cpp +++ b/kernel/kernel/Memory/SharedMemoryObject.cpp @@ -87,6 +87,13 @@ namespace Kernel return BAN::UniqPtr(BAN::move(region)); } + BAN::ErrorOr> SharedMemoryObject::split(size_t offset) + { + (void)offset; + dwarnln("TODO: SharedMemoryObject::split"); + return BAN::Error::from_errno(ENOTSUP); + } + BAN::ErrorOr SharedMemoryObject::allocate_page_containing_impl(vaddr_t address, bool wants_write) { ASSERT(contains(address)); diff --git a/kernel/kernel/Process.cpp b/kernel/kernel/Process.cpp index 940eb221..633b627a 100644 --- a/kernel/kernel/Process.cpp +++ b/kernel/kernel/Process.cpp @@ -2189,6 +2189,38 @@ namespace Kernel return 0; } + BAN::ErrorOr>> Process::split_memory_region(BAN::UniqPtr&& region, vaddr_t base, size_t length) + { + ASSERT(base % PAGE_SIZE == 0); + ASSERT(base < region->vaddr() + region->size()); + + if (auto rem = length % PAGE_SIZE) + length += PAGE_SIZE - rem; + if (base + length > region->vaddr() + region->size()) + length = region->vaddr() + region->size() - base; + + BAN::Vector> result; + TRY(result.reserve(3)); + + if (region->vaddr() < base) + { + auto temp = TRY(region->split(base - region->vaddr())); + MUST(result.push_back(BAN::move(region))); + region = BAN::move(temp); + } + + if (base + length < region->vaddr() + region->size()) + { + auto temp = TRY(region->split(base + length - region->vaddr())); + MUST(result.push_back(BAN::move(region))); + region = BAN::move(temp); + } + + MUST(result.push_back(BAN::move(region))); + + return result; + } + BAN::ErrorOr Process::sys_mmap(const sys_mmap_t* user_args) { sys_mmap_t args; @@ -2201,6 +2233,9 @@ namespace Kernel if (args.prot != PROT_NONE && (args.prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))) return BAN::Error::from_errno(EINVAL); + if (!(args.flags & MAP_ANONYMOUS) && (args.off % PAGE_SIZE)) + return BAN::Error::from_errno(EINVAL); + if (!(args.flags & MAP_PRIVATE) == !(args.flags & MAP_SHARED)) return BAN::Error::from_errno(EINVAL); auto region_type = (args.flags & MAP_PRIVATE) ? MemoryRegion::Type::PRIVATE : MemoryRegion::Type::SHARED; @@ -2241,10 +2276,18 @@ namespace Kernel { if (!m_mapped_regions[i]->overlaps(vaddr, args.len)) continue; - if (!m_mapped_regions[i]->is_contained_by(vaddr, args.len)) - derrorln("VERY BROKEN MAP_FIXED UNMAP"); + m_mapped_regions[i]->wait_not_pinned(); + auto temp = BAN::move(m_mapped_regions[i]); m_mapped_regions.remove(i--); + + if (!temp->is_contained_by(vaddr, args.len)) + { + auto new_regions = TRY(split_memory_region(BAN::move(temp), vaddr, args.len)); + for (auto& new_region : new_regions) + if (!new_region->overlaps(vaddr, args.len)) + TRY(m_mapped_regions.push_back(BAN::move(new_region))); + } } } else if (const vaddr_t vaddr = reinterpret_cast(args.addr); vaddr == 0) @@ -2267,9 +2310,6 @@ namespace Kernel if (args.flags & MAP_ANONYMOUS) { - if (args.off != 0) - return BAN::Error::from_errno(EINVAL); - auto region = TRY(MemoryBackedRegion::create( page_table(), args.len, @@ -2330,7 +2370,7 @@ namespace Kernel if (auto rem = vaddr % PAGE_SIZE) { vaddr -= rem; - len += PAGE_SIZE - rem; + len += rem; } if (auto rem = len % PAGE_SIZE) @@ -2338,22 +2378,22 @@ namespace Kernel LockGuard _(m_process_lock); - // FIXME: We should unmap partial regions. - // This is a hack to only unmap if the whole mmap region - // is contained within [addr, addr + len] for (size_t i = 0; i < m_mapped_regions.size(); i++) { - auto& region = m_mapped_regions[i]; + if (!m_mapped_regions[i]->overlaps(vaddr, len)) + continue; - const vaddr_t region_s = region->vaddr(); - const vaddr_t region_e = region->vaddr() + region->size(); - if (vaddr <= region_s && region_e <= vaddr + len) + m_mapped_regions[i]->wait_not_pinned(); + auto temp = BAN::move(m_mapped_regions[i]); + m_mapped_regions.remove(i--); + + if (!temp->is_contained_by(vaddr, len)) { - region->wait_not_pinned(); - m_mapped_regions.remove(i--); + auto new_regions = TRY(split_memory_region(BAN::move(temp), vaddr, len)); + for (auto& new_region : new_regions) + if (!new_region->overlaps(vaddr, len)) + TRY(m_mapped_regions.push_back(BAN::move(new_region))); } - else if (region->overlaps(vaddr, len)) - dwarnln("TODO: partial region munmap"); } return 0; @@ -2386,38 +2426,34 @@ namespace Kernel LockGuard _(m_process_lock); - // FIXME: We should protect partial regions. - // This is a hack to only protect if the whole mmap region - // is contained within [addr, addr + len] for (size_t i = 0; i < m_mapped_regions.size(); i++) { + if (!m_mapped_regions[i]->overlaps(vaddr, len)) + continue; + + if (!m_mapped_regions[i]->is_contained_by(vaddr, len)) + { + m_mapped_regions[i]->wait_not_pinned(); + auto temp = BAN::move(m_mapped_regions[i]); + m_mapped_regions.remove(i--); + + auto new_regions = TRY(split_memory_region(BAN::move(temp), vaddr, len)); + for (auto& new_region : new_regions) + TRY(m_mapped_regions.push_back(BAN::move(new_region))); + + continue; + } + auto& region = m_mapped_regions[i]; + const bool is_shared = (region->type() == MemoryRegion::Type::SHARED); + const bool is_writable = (region->status_flags() & O_WRONLY); + const bool want_write = (prot & PROT_WRITE); + if (is_shared && want_write && !is_writable) + return BAN::Error::from_errno(EACCES); - const vaddr_t region_s = region->vaddr(); - const vaddr_t region_e = region->vaddr() + region->size(); - if (vaddr <= region_s && region_e <= vaddr + len) - { - const bool is_shared = (region->type() == MemoryRegion::Type::SHARED); - const bool is_writable = (region->status_flags() & O_WRONLY); - const bool want_write = (prot & PROT_WRITE); - if (is_shared && want_write && !is_writable) - return BAN::Error::from_errno(EACCES); - - // FIXME: if the region is pinned writable, this may - // cause some problems :D - TRY(region->mprotect(flags)); - } - else if (region->overlaps(vaddr, len)) - { - const bool is_shared = (region->type() == MemoryRegion::Type::SHARED); - const bool is_writable = (region->status_flags() & O_WRONLY); - const bool want_write = (prot & PROT_WRITE); - if (is_shared && want_write && !is_writable) - return BAN::Error::from_errno(EACCES); - - dwarnln("TODO: partial region mprotect"); - TRY(region->mprotect(flags | region->flags())); - } + // NOTE: don't change protection of regions in use + region->wait_not_pinned(); + TRY(region->mprotect(flags)); } return 0;