forked from Bananymous/banan-os
				
			
			update main #1
			
				
			
		
		
		
	| 
						 | 
				
			
			@ -21,6 +21,9 @@ namespace Kernel
 | 
			
		|||
		paddr_t take_free_page();
 | 
			
		||||
		void release_page(paddr_t);
 | 
			
		||||
 | 
			
		||||
		paddr_t take_free_contiguous_pages(size_t pages);
 | 
			
		||||
		void release_contiguous_pages(paddr_t paddr, size_t pages);
 | 
			
		||||
 | 
			
		||||
		size_t used_pages() const;
 | 
			
		||||
		size_t free_pages() const;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -29,10 +29,13 @@ namespace Kernel
 | 
			
		|||
 | 
			
		||||
	private:
 | 
			
		||||
		unsigned long long* ull_bitmap_ptr() { return (unsigned long long*)m_vaddr; }
 | 
			
		||||
		const unsigned long long* ull_bitmap_ptr() const { return (const unsigned long long*)m_vaddr; }
 | 
			
		||||
		
 | 
			
		||||
		paddr_t paddr_for_bit(unsigned long long) const;
 | 
			
		||||
		unsigned long long bit_for_paddr(paddr_t paddr) const;
 | 
			
		||||
 | 
			
		||||
		unsigned long long contiguous_bits_set(unsigned long long start, unsigned long long count) const;
 | 
			
		||||
 | 
			
		||||
	private:
 | 
			
		||||
		const paddr_t m_paddr { 0 };
 | 
			
		||||
		const size_t m_size	{ 0 };
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -79,6 +79,25 @@ namespace Kernel
 | 
			
		|||
		ASSERT_NOT_REACHED();
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	paddr_t Heap::take_free_contiguous_pages(size_t pages)
 | 
			
		||||
	{
 | 
			
		||||
		LockGuard _(m_lock);
 | 
			
		||||
		for (auto& range : m_physical_ranges)
 | 
			
		||||
			if (range.free_pages() >= pages)
 | 
			
		||||
				if (paddr_t paddr = range.reserve_contiguous_pages(pages))
 | 
			
		||||
					return paddr;
 | 
			
		||||
		return 0;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	void Heap::release_contiguous_pages(paddr_t paddr, size_t pages)
 | 
			
		||||
	{
 | 
			
		||||
		LockGuard _(m_lock);
 | 
			
		||||
		for (auto& range : m_physical_ranges)
 | 
			
		||||
			if (range.contains(paddr))
 | 
			
		||||
				return range.release_contiguous_pages(paddr, pages);
 | 
			
		||||
		ASSERT_NOT_REACHED();
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	size_t Heap::used_pages() const
 | 
			
		||||
	{
 | 
			
		||||
		LockGuard _(m_lock);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -27,7 +27,7 @@ namespace Kernel
 | 
			
		|||
 | 
			
		||||
		memset((void*)m_vaddr, 0x00, m_bitmap_pages * PAGE_SIZE);
 | 
			
		||||
		memset((void*)m_vaddr, 0xFF, m_data_pages / 8);
 | 
			
		||||
		for (int i = 0; i < m_data_pages % 8; i++)
 | 
			
		||||
		for (ull i = 0; i < m_data_pages % 8; i++)
 | 
			
		||||
			((uint8_t*)m_vaddr)[m_data_pages / 8] |= 1 << i;
 | 
			
		||||
 | 
			
		||||
		dprintln("physical range needs {} pages for bitmap", m_bitmap_pages);
 | 
			
		||||
| 
						 | 
				
			
			@ -43,6 +43,18 @@ namespace Kernel
 | 
			
		|||
		return (paddr - m_paddr) / PAGE_SIZE - m_bitmap_pages;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ull PhysicalRange::contiguous_bits_set(ull start, ull count) const
 | 
			
		||||
	{
 | 
			
		||||
		for (ull i = 0; i < count; i++)
 | 
			
		||||
		{
 | 
			
		||||
			ull off = (start + i) / ull_bits;
 | 
			
		||||
			ull bit = (start + i) % ull_bits;
 | 
			
		||||
			if (!(ull_bitmap_ptr()[off] & (1ull << bit)))
 | 
			
		||||
				return i;
 | 
			
		||||
		}
 | 
			
		||||
		return count;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	paddr_t PhysicalRange::reserve_page()
 | 
			
		||||
	{
 | 
			
		||||
		ASSERT(free_pages() > 0);
 | 
			
		||||
| 
						 | 
				
			
			@ -80,4 +92,61 @@ namespace Kernel
 | 
			
		|||
		m_free_pages++;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	paddr_t PhysicalRange::reserve_contiguous_pages(size_t pages)
 | 
			
		||||
	{
 | 
			
		||||
		ASSERT(pages > 0);
 | 
			
		||||
		ASSERT(free_pages() > 0);
 | 
			
		||||
 | 
			
		||||
		if (pages == 1)
 | 
			
		||||
			return reserve_page();
 | 
			
		||||
 | 
			
		||||
		ull ull_count = BAN::Math::div_round_up<ull>(m_data_pages, ull_bits);
 | 
			
		||||
 | 
			
		||||
		// NOTE: This feels kinda slow, but I don't want to be
 | 
			
		||||
		//       doing premature optimization. This will be only
 | 
			
		||||
		//       used when creating DMA regions.
 | 
			
		||||
 | 
			
		||||
		for (ull i = 0; i < ull_count; i++)
 | 
			
		||||
		{
 | 
			
		||||
			if (ull_bitmap_ptr()[i] == 0)
 | 
			
		||||
				continue;
 | 
			
		||||
 | 
			
		||||
			for (ull bit = 0; bit < ull_bits;)
 | 
			
		||||
			{
 | 
			
		||||
				ull start = i * ull_bits + bit;
 | 
			
		||||
				ull set_cnt = contiguous_bits_set(start, pages);
 | 
			
		||||
				if (set_cnt == pages)
 | 
			
		||||
				{
 | 
			
		||||
					for (ull j = 0; j < pages; j++)
 | 
			
		||||
						ull_bitmap_ptr()[(start + j) / ull_bits] &= ~(1ull << ((start + j) % ull_bits));
 | 
			
		||||
					m_free_pages -= pages;
 | 
			
		||||
					return paddr_for_bit(start);
 | 
			
		||||
				}
 | 
			
		||||
				bit += set_cnt + 1;
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		ASSERT_NOT_REACHED();
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	void PhysicalRange::release_contiguous_pages(paddr_t paddr, size_t pages)
 | 
			
		||||
	{
 | 
			
		||||
		ASSERT(paddr % PAGE_SIZE == 0);
 | 
			
		||||
		ASSERT(paddr - m_paddr <= m_size);
 | 
			
		||||
		ASSERT(pages > 0);
 | 
			
		||||
 | 
			
		||||
		ull start_bit = bit_for_paddr(paddr);
 | 
			
		||||
		for (size_t i = 0; i < pages; i++)
 | 
			
		||||
		{
 | 
			
		||||
			ull off = (start_bit + i) / ull_bits;
 | 
			
		||||
			ull bit = (start_bit + i) % ull_bits;
 | 
			
		||||
			ull mask = 1ull << bit;
 | 
			
		||||
 | 
			
		||||
			ASSERT(!(ull_bitmap_ptr()[off] & mask));
 | 
			
		||||
			ull_bitmap_ptr()[off] |= mask;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		m_free_pages += pages;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in New Issue