#include <string.h>
#include <exec/execbase.h>
#include <exec/memory.h>
#include <exec/lists.h>
#include <exec/errors.h>
#include <exec/kslab.h>
#include <exec/kmem.h>
#include "pmm.h"



// slab cache flags 
#define KSLAB_CREATE_MAP		(1<<0)			// See kmem_slab.h 
#define KSLAB_CREATE_ZERO		(1<<1)			// " " " " " " " " 
#define KSLAB_ON_SLAB			(1<<31)			// slab is included inside the slab 
#define	KSLAB_MIN_OBJECT_SIZE	(8)				// Minimum object size



// The structure of the free objects in the slab  <========= MinNode
//struct slab_free_object
//{
//	slab_free_object	*prev, *next;
//};



// The structure of a slab cache
struct kslab_cache
{
	struct Node		Node;					// Node for linked list of caches
	ULONG			ObjectSize;				// asked object size 
	ULONG			AllocatedSize;			// actual object size, taking the alignment constraints into account 
	ULONG			ObjectsPerSlab;			// # of object per slab
	ULONG			PagesPerSlab;			// # of pages per slab
	ULONG			MinFree;				// # of slab in reserve
	ULONG			Flags;					// Parameters
	ULONG			FreeCount;				// # of free slab (can be flushed)
	struct MinList	Slabs;					// The lists of slabs owned by this cache 
};


// The structure of a slab
struct kslab
{
	struct MinNode			Node;				// Node for linked list of slabs
	ULONG					FreeCount;			// Number of free objects on this slab 
	struct MinNode *		FreeList;			// The list of these free objects  
	struct kmem_range *		Range;				// The address of the associated range structure 
	vir_addr				First;				// Virtual start address of this range 
	struct kslab_cache *	Cache;				// Slab cache owning this slab 
};


/*

//
//
//
vir_addr slab_cache_alloc(kslab_cache * cache, ULONG alloc_flags)
{
	vir_addr	obj_vaddr = 0;
	kslab *		slab_head = NULL;

	if (cache == NULL)
		return 0;

	// If the slab at the head of the slabs' list has no free object,
	// then we need to allocate a new slab 
	if (((kslab *)GetHead((List *)&cache->Slabs))->FreeCount == 0)
	{
		if (slab_cache_grow(cache, alloc_flags) != RET_OK)
			return (vir_addr)NULL;
	}

	// Here: we are sure that list_get_head(kslab_cache->slab_list)
	// exists *AND* that list_get_head(kslab_cache->slab_list)->free is
	// NOT NULL
	slab_head = (kslab *)GetHead((List *)&cache->Slabs);

	// Allocate the object at the head of the slab at the head of the
	// slabs' list
	obj_vaddr = (vir_addr)RemHead((List *)&slab_head->FreeList);
	slab_head->FreeCount --;
	cache->FreeCount --;

	// If needed, reset object's contents
	if (cache->Flags & SLAB_CREATE_ZERO)
		memset((void*)obj_vaddr, 0x0, cache->AllocatedSize);

	// Slab is now full ?
	if (slab_head->free == NULL)
	{
		// Transfer it at the tail of the slabs' list 
		kslab * slab = (kslab *)GetHead((List *)&cache->Slabs);
		//list_add_tail(cache->Slabs, slab);
		AddTail((List *)&cache->Slabs, &slab->Node);
	}
  
	// For caches that require a minimum amount of free objects left,
	// allocate a slab if needed.
	// 
	// Notice the "== min_objects - 1": we did not write " <
	// min_objects" because for the cache of kmem structure, this would
	// lead to an chicken-and-egg problem, since cache_grow below would
	// call cache_alloc again for the kmem_vmm cache, so we return here
	// with the same cache. If the test were " < min_objects", then we
	// would call cache_grow again for the kmem_vmm cache again and
	// again... until we reach the bottom of our stack (infinite
	// recursion). By telling precisely "==", then the cache_grow would
	// only be called the first time.
	if ((cache->MinFree > 0) && (cache->FreeCount == (cache->MinFree - 1)))
	{
		// No: allocate a new slab now 
		if (slab_cache_grow(cache, alloc_flags) != RET_OK)
		{
			// Not enough free memory or blocking alloc => undo the allocation
			slab_cache_free(obj_vaddr);
			return (vir_addr)NULL;
		}
	}

	return obj_vaddr;
}


//
// Helper function to allocate a new slab for the given slab_cache 
//
static bool slab_cache_grow(kslab_cache * kslab_cache, ULONG alloc_flags)
{
	ULONG					range_alloc_flags = 0;
	kmem_range *			new_range = NULL;
	vir_addr				new_range_start = 0;
	kslab *					new_slab = NULL;

	//
	//Setup the flags for the range allocation
	range_alloc_flags = 0;

	// Atomic ?
	if (alloc_flags & SLAB_ALLOC_ATOMIC)
		range_alloc_flags |= KMEM_VMM_ATOMIC;

	// Need physical mapping NOW ?
	if (kslab_cache->Flags & (SLAB_CREATE_MAP | SLAB_CREATE_ZERO))
		range_alloc_flags |= KMEM_VMM_MAP;

	// Allocate the range
	new_range = kmem_new_range(kslab_cache->PagesPerSlab, range_alloc_flags, & new_range_start);
	if (!new_range)
		return false;

	// Allocate the slab structure 
	if (kslab_cache->Flags & KSLAB_ON_SLAB)
	{
		// Slab structure is ON the slab: simply set its address to the end of the range
		vir_addr slab_vaddr = new_range_start + kslab_cache->PagesPerSlab * PAGE_SIZE - sizeof(kslab);
		new_slab = (kslab*)slab_vaddr;
	}
	else
	{
		// Slab structure is OFF the slab: allocate it from the cache of slab structures 
		vir_addr slab_vaddr = kmem_cache_alloc(cache_of_struct_kslab, alloc_flags);
		if (! slab_vaddr)
		{
			kmem_vmm_del_range(new_range);
			return false;
		}
		new_slab = (kslab *)slab_vaddr;
	}

	cache_add_slab(kslab_cache, new_range_start, new_slab);
	new_slab->Range = new_range;

	// Set the backlink from range to this slab
	kmem_vmm_set_slab(new_range, new_slab);

	return true;
}


//
//
//
kslab_cache * slab_setup_prepare(vir_addr kernel_core_base,
			     vir_addr		kernel_core_top,
			     ULONG			sizeof_struct_range,
			     kslab **		first_struct_slab_of_caches,
			     vir_addr *		first_slab_of_caches_base,
			     ULONG *		first_slab_of_caches_nb_pages,
			     kslab **		first_struct_slab_of_ranges,
			     vir_addr *		first_slab_of_ranges_base,
			     ULONG *		first_slab_of_ranges_nb_pages)
{
	int			i;
	ret_t		retval;
	vir_addr	vaddr;

	// The cache of ranges we are about to allocate 
	kslab_cache *cache_of_ranges;

	// In the begining, there isn't any cache 
	NewList(&Execbase.SlabCacheList);
	cache_of_struct_kslab = NULL;
	cache_of_struct_kslab_cache = NULL;

	//
	// Create the cache of caches, initialised with 1 allocated slab
	//

	// Allocate the pages needed for the 1st slab of caches, and map them
	//   in kernel space, right after the kernel 
	*first_slab_of_caches_base = PAGE_ALIGN_SUP(kernel_core_top);

	for (i = 0, vaddr = *first_slab_of_caches_base ; i < NB_PAGES_IN_SLAB_OF_CACHES ; i++, vaddr += PAGE_SIZE)
	{
		phy_addr ppage_paddr = physmem_ref_physpage_new(FALSE);

		retval = paging_map(ppage_paddr, vaddr, FALSE,
		VM_MAP_ATOMIC | VM_MAP_PROT_READ | VM_MAP_PROT_WRITE);

		retval = physmem_unref_physpage(ppage_paddr);
	}

	// Create the cache of caches 
	*first_slab_of_caches_nb_pages = NB_PAGES_IN_SLAB_OF_CACHES;
	cache_of_struct_kslab_cache = create_cache_of_caches(*first_slab_of_caches_base, NB_PAGES_IN_SLAB_OF_CACHES);

	// Retrieve the slab that should have been allocated 
	*first_struct_slab_of_caches = GetHead(cache_of_struct_kslab_cache->slab_list);

  
	//
	// Create the cache of ranges, initialised with 1 allocated slab   
	*first_slab_of_ranges_base = vaddr;

	// Allocate the 1st slab 
	for (i = 0, vaddr = *first_slab_of_ranges_base ; i < NB_PAGES_IN_SLAB_OF_RANGES ; i++, vaddr += PAGE_SIZE)
	{
		phy_addr ppage_paddr;

		ppage_paddr = physmem_ref_physpage_new(FALSE);

		retval = sos_paging_map(ppage_paddr, vaddr,
		FALSE, VM_MAP_ATOMIC | VM_MAP_PROT_READ | VM_MAP_PROT_WRITE);

		retval = physmem_unref_physpage(ppage_paddr);
	}

	// Create the cache of ranges 
	*first_slab_of_ranges_nb_pages = NB_PAGES_IN_SLAB_OF_RANGES;
	cache_of_ranges = create_cache_of_ranges(*first_slab_of_ranges_base, sizeof_struct_range, NB_PAGES_IN_SLAB_OF_RANGES);

	// Retrieve the slab that should have been allocated 
	*first_struct_slab_of_ranges = (kslab *)GetHead((List *)&cache_of_ranges->Slabs);

	// Create the cache of slabs, without any allocated slab yet
	cache_of_struct_kslab = slab_cache_create("off-slab slab structures",
		sizeof(kslab),
		1,
		0,
		SLAB_CREATE_MAP);

	return cache_of_ranges;
}



//
//
//
bool slab_setup_commit(kslab *first_struct_slab_of_caches, kmem_range *first_range_of_caches,
			    kslab *first_struct_slab_of_ranges, kmem_range *first_range_of_ranges)
{
  first_struct_slab_of_caches->Range = first_range_of_caches;
  first_struct_slab_of_ranges->Range = first_range_of_ranges;

  return true;
}


//
//
//
bool slab_cache_free(vir_addr vaddr)
{
	ret_t	retval;
	kslab *	empty_slab = NULL;

	// Remove the object from the slab
	retval = free_object(vaddr, &empty_slab);
	if (retval != RET_OK)
		return false;

	// Remove the slab and the underlying range if needed
	if (empty_slab != NULL)
		return cache_release_slab(empty_slab, TRUE);

	return true;
}

*/