#include <exec/lists.h>
//#include <exec/memory.h>
#include <exec/execbase.h>
#include <exec/kmem.h>
#include "memory/paging.h"

#include "pmm.h"



//
// The structure of a range of kernel-space virtual addresses
//
struct kmem_range
{
	struct MinNode		Node;		// Node for linked list
	vir_addr			Base;		// Virtual base address
	ULONG				Pages;		// # of pages
	struct kslab *		Slab;		// The slab owning this range
};



/*
//
// Initialize a new range that maps a given area as free or as already used.
//
kmem_range * create_range(BOOL is_free, vir_addr base, vir_addr top, kslab *slab)
{
	// Range too small (< physical memory page size)
	if ((top - base) < PAGE_SIZE)
		return NULL;


	kmem_range *range = (kmem_range*)slab_cache_alloc(Execbase.kmem_range_cache, SLAB_ALLOC_ATOMIC);

	range->Base = base;
	range->Pages   = (top - base) / PAGE_SIZE;

	if (is_free)
		AddTail(&Execbase.FreeVMemRange, (Node *)&range->Node);
	else
	{
		vir_addr vaddr;
		range->Slab = slab;
		AddTail(&Execbase.UsedVMemRange, (Node *)&range);

		// Ok, set the range owner for the pages in this page
		for (vaddr = base ; vaddr < top ; vaddr += PAGE_SIZE)
		{
			phy_addr ppage_paddr = paging_v_to_p(vaddr);

			physmem_set_kmem_range(ppage_paddr, range);
		}
	}

	return range;
}
*/



/*
// 
// Allocate a new kernel area spanning one or multiple pages.
// 
// @eturn a new range structure
// 
kmem_range *	kmem_new_range(ULONG nb_pages, ULONG flags, vir_addr * range_start)
{
	kmem_range *free_range = NULL;
	kmem_range *new_range = NULL;

	if (nb_pages <= 0)
		return NULL;

	// Find a suitable free range to hold the size-sized object 
	free_range = find_suitable_free_range(nb_pages);
	if (free_range == NULL)
		return NULL;

	// If range has exactly the requested size, just move it to the "used" list 
	if(free_range->Pages == nb_pages)
	{
		list_delete(kmem_free_range_list, free_range);
		kmem_used_range_list = insert_range(kmem_used_range_list, free_range);

		// The new_range is exactly the free_range 
		new_range = free_range;
	}

	// Otherwise the range is bigger than the requested size, split it.
	// This involves reducing its size, and allocate a new range, which
	// is going to be added to the "used" list 
	else
	{
		// free_range split in { new_range | free_range } 
		new_range = (kmem_range*) kmem_cache_alloc(kmem_range_cache, (flags & KMEM_VMM_ATOMIC) ? SLAB_ALLOC_ATOMIC : 0);
		if (! new_range)
			return NULL;

		new_range->Base		= free_range->Base;
		new_range->Pages    = nb_pages;
		free_range->Base	+= nb_pages * PAGE_SIZE;
		free_range->Pages   -= nb_pages;

		// free_range is still at the same place in the list 
		// insert new_range in the used list 
		kmem_used_range_list = insert_range(kmem_used_range_list, new_range);
	}

	// By default, the range is not associated with any slab 
	new_range->Slab = NULL;

	// If mapping of physical pages is needed, map them now 
	if (flags & KMEM_VMM_MAP)
	{
		int i;
		for (i = 0 ; i < nb_pages ; i ++)
		{
			// Get a new physical page 
			phy_addr ppage_paddr = sos_physmem_ref_physpage_new(! (flags & KMEM_VMM_ATOMIC));
	  
			// Map the page in kernel space 
			if (ppage_paddr)
			{
				if (sos_paging_map(ppage_paddr, new_range->Base + i * PAGE_SIZE, FALSE // Not a user page ,
				((flags & KMEM_VMM_ATOMIC) ? VM_MAP_ATOMIC : 0) | VM_MAP_PROT_READ | VM_MAP_PROT_WRITE))
				{
					// Failed => force unallocation, see below 
					physmem_unref_physpage(ppage_paddr);
					ppage_paddr = (phy_addr)NULL;
				}
				else
				{
					// Success : page can be unreferenced since it is now mapped 
					physmem_unref_physpage(ppage_paddr);
				}
			}

			// Undo the allocation if failed to allocate or map a new page 
			if (! ppage_paddr)
			{
				kmem_vmm_del_range(new_range);
				return NULL;
			}

			// Ok, set the range owner for this page 
			physmem_set_kmem_range(ppage_paddr, new_range);
		}
	}

	// Otherwise we need a correct page fault handler to support
	// deferred mapping (aka demand paging) of ranges 
	else
	{
		//SOS_ASSERT_FATAL(! "No demand paging yet");
	}

	if (range_start)
		*range_start = new_range->Base;

	return new_range;
}
*/

/*
//
//
//
ret_t kmem_setup(vir_addr kernel_base, vir_addr kernel_top, vir_addr stack_base, vir_addr stack_top)
{
	kslab		*first_struct_slab_of_caches = NULL,
				*first_struct_slab_of_ranges = NULL;
	vir_addr	first_slab_of_caches_base,
				first_slab_of_caches_nb_pages,
				first_slab_of_ranges_base,
				first_slab_of_ranges_nb_pages;
	kmem_range	*first_range_of_caches = NULL,
				*first_range_of_ranges = NULL;

	NewList(&Execbase.FreeVMemRange);
	NewList(&Execbase.UsedVMemRange);

	Execbase.kmem_range_cache = slab_cache_prepare(
		kernel_base, 
		kernel_top,
		sizeof(kmem_range),
		& first_struct_slab_of_caches,
		& first_slab_of_caches_base,
		& first_slab_of_caches_nb_pages,
		& first_struct_slab_of_ranges,
		& first_slab_of_ranges_base,
		& first_slab_of_ranges_nb_pages);

	// Mark virtual addresses 16kB - Video as FREE 
	create_range(TRUE, KMEM_VMM_BASE, PAGE_ALIGN_INF(BIOS_VIDEO_START), NULL);
  
	// Mark virtual addresses in Video hardware mapping as NOT FREE 
	create_range(FALSE, PAGE_ALIGN_INF(BIOS_VIDEO_START), PAGE_ALIGN_SUP(BIOS_VIDEO_END), NULL);
  
	// Mark virtual addresses Video - Kernel as FREE 
	create_range(TRUE, PAGE_ALIGN_SUP(BIOS_VIDEO_END), PAGE_ALIGN_INF(kernel_base), NULL);
  
	// Mark virtual addresses in Kernel code/data up to the bootstrap stack as NOT FREE 
	create_range(FALSE, PAGE_ALIGN_INF(kernel_base), stack_base, NULL);

	// Mark virtual addresses in the bootstrap stack as NOT FREE too, but in another vmm region in order to be un-allocated later 
	create_range(FALSE, stack_base, stack_top, NULL);

	// Mark the remaining virtual addresses in Kernel code/data after the bootstrap stack as NOT FREE 
	create_range(FALSE, stack_top, PAGE_ALIGN_SUP(kernel_top), NULL);

	// Mark virtual addresses in the first slab of the cache of caches as NOT FREE 
	first_range_of_caches = create_range(FALSE,
		first_slab_of_caches_base,
		first_slab_of_caches_base
		+ first_slab_of_caches_nb_pages*PAGE_SIZE,
		first_struct_slab_of_caches);

	// Mark virtual addresses in the first slab of the cache of ranges as NOT FREE 
	first_range_of_ranges = create_range(FALSE,
	first_slab_of_ranges_base,
	first_slab_of_ranges_base
	+ first_slab_of_ranges_nb_pages * PAGE_SIZE,
	first_struct_slab_of_ranges);
  
	// Mark virtual addresses after these slabs as FREE 
	create_range(TRUE, first_slab_of_ranges_base + first_slab_of_ranges_nb_pages * PAGE_SIZE, KMEM_VMM_TOP, NULL);

	// Update the cache subsystem so that the artificially-created
	//caches of caches and ranges really behave like *normal* caches (ie
	//those allocated by the normal slab API) 
	slab_setup_commit(first_struct_slab_of_caches, first_range_of_caches, first_struct_slab_of_ranges, first_range_of_ranges);

	return RET_OK;
}

*/