#define _LARGEFILE64_SOURCE
#include "kern_ll.h"
#include <pthread.h> // should include <sched.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <assert.h>
#include <sys/types.h>
 #include <sys/mman.h>
 
#include "comm_mem.h"
#include <time.h>


#include <sched.h>
#include <sys/types.h>
#include <errno.h>
#include "comm_arbiter.h"
//#include <linux/unistd.h>
#include "comm_slab.h"
//#define DEBUG 1


/* The slab manager is an extension of the communication arbiter
 * While this will never be called directly by the user, this extension
 * treats the file being accessed as a series of slabs which can be 
 * accessed through the following functions
 */

/**
  * initialize the slab manager
  */

void init_slab_manager()
{
	// we simply initialize the variables.
	int i=0;
	floating_slab_size = SLAB_SIZE;
	active_slabs = 0;
	slab_tables = NULL;
	
}


/**
  * new_slab creates a new slab table for a given file descriptor
  * @param fd -- incoming file descriptor
  * @return returns the slab table.
  */

PSLABTABLE new_slab(unsigned int fd)
{
	#ifdef DEBUG
		printf("new_slab(%i)\n",fd);
	#endif
	
	int i;
	// allocate memory for the slab and set the file descriptor
	// that the user sends to this fx
	PSLABTABLE new_table = malloc( sizeof(SLABTABLE));
	assert( new_table != NULL );
	new_table->fd = fd;
	
	#ifdef DEBUG
		printf("malloc %i * %i\n",MAX_SLAB_CNT,sizeof(SLAB));
	#endif
	// MAX_SLAB_CNT is sometimes viewed as an arbitrary count
	new_table->slab_table = malloc(MAX_SLAB_CNT*sizeof(SLAB));
	assert( new_table->slab_table != NULL );
	PSLAB slab;
	// set variables for each slab, then initialize the signal
	// and mutex
	for (i=0; i < MAX_SLAB_CNT; i++)
	{
		slab = GET_SLAB_I(i,new_table->slab_table);
		slab->offset=0;
		slab->size_allocated=-1;
		slab->ref_count = 0;
		pthread_cond_init(&slab->slab_signal,NULL);
		pthread_mutex_init(&slab->slab_mutex, NULL);

	}
	
	// SLAB_PRE_ALLOCATE is actually a bad idea
	#ifdef SLAB_PRE_ALLOCATE
		//new_table->master_slab = (void*)malloc(  (int)( (float)SLAB_SIZE*(float)MAX_SLAB_CNT) ); // allocate a large slab
	#endif
	// obtain the file size for the file
	new_table->file_size = lseek64(fd,0,SEEK_END);
	return new_table;
	
//	lseek(fd,0,SEEK_CUR);
	
}


/**
  * free_slab frees the slab at a given offset
  * @param fd -- incoming file descriptor
  */

u_int64_t get_file_size(unsigned int fd)
{
	PSLABTABLE slab_table = get_slab(fd);
	if (slab_table == NULL)
		return 0;
	return slab_table->file_size;
}

/**
 * method to free the slab
 * @param fd -- incoming file descriptor
 * @param offset -- offset within the slab that we will begin
 */
void free_slab(unsigned int fd, u_int64_t offset)
{
	#ifdef DEBUG
		printf(" -- free_slab(fd,offset) -- free_slab(%i,%d)\n",fd,offset);
	#endif
	PSLABTABLE slab_table = get_slab(fd);
    int new_size = 0,return_size=0;
	// get the slab associated with this offset
    PSLAB slab = GET_SLAB(offset,slab_table->slab_table);
	
	// okay, so in this method we aren't concern with overlap
	// on offset since we are simply unmapping the memory space
	if (slab->size_allocated != -1)
	{
		#ifdef SLAB_PRE_ALLOCATE
			// unmap the file
			munmap(slab->ptr,slab->size_allocated);
		#endif
		slab->size_allocated = -1;
	}
}


/**
  * release_slab is used to remove a reference to a given slab
  * if a slab is not referenced i tis freed and is available to be
  * used for another offset range
  * @param fd -- incoming file descriptor
  * @param offset -- offset within the file to release
  */

void release_slab(unsigned int fd, u_int64_t offset)
{
	PSLABTABLE slab_table = get_slab(fd);
	// obtain the slab through our GET_SLAB calculation, then
	// lock that slab by gaining its mutex.
    PSLAB slab = GET_SLAB(offset,slab_table->slab_table);
	
	pthread_mutex_lock( &slab->slab_mutex );
	
	// decrement the reference counter
	slab->ref_count--;
	
	// and 'garbage collect' the slab
	// iff there are no more references.
	if (slab->ref_count <= 0)
	{
		
		free_slab(fd,offset);
		// broadcast to the slab that this slab is available
		pthread_cond_broadcast(&slab->slab_signal);
	}
	pthread_mutex_unlock( &slab->slab_mutex );
}

/**
 * Isn't really necessary, but this does the same as above; however
 * there is no garbage collection.
 * @param fd -- incoming file descriptor
 * @param offset -- offset within the file to release
 */
void decrement_count(unsigned int fd, u_int64_t offset)
{
	PSLABTABLE slab_table = get_slab(fd);
	
    PSLAB slab = GET_SLAB(offset,slab_table->slab_table);
	
	pthread_mutex_lock( &slab->slab_mutex );
	
	slab->ref_count--;
	
	if (slab->ref_count <= 0)
	{
		
		
	}
	pthread_mutex_unlock( &slab->slab_mutex );
}


/**
  * adjust slab size adjusts the slab size when additional files
  * are opened
  */

void adjust_slab_sizes()
{
	short i=0;
	floating_slab_size = SLAB_SIZE / active_slabs;
	
	
	PLINKEDLIST pslab_tables = slab_tables;
	PSLABTABLE table = NULL;
	
	pthread_mutex_lock( &mem_mutex );
	
	pslab_tables = slab_tables;
	// essentially goes through all tables and slabs to release them
	// at which point their sizes are reduced
	while(pslab_tables!=NULL)
	{
		table = LIST_ENTRY_PTR(pslab_tables,SLABTABLE,table_list);
		
			PSLAB slab;
			for (i=0; i < MAX_SLAB_CNT; i++)
			{
				slab = GET_SLAB_I(i,table->slab_table);
				pthread_mutex_lock( &slab->slab_mutex );
				if (slab->ref_count <= 0)
				{
					if (slab->size_allocated != -1)
						munmap(slab->ptr,slab->size_allocated);
					slab->size_allocated=-1;
					pthread_cond_broadcast(&slab->slab_signal);
				}		
				pthread_mutex_unlock( &slab->slab_mutex );

			}
			pthread_mutex_unlock( &mem_mutex );
			return table;
		
		if (pslab_tables->nextPtr == NULL)
			break;
		pslab_tables = pslab_tables->nextPtr;
	}
	
	
	pthread_mutex_unlock( &mem_mutex );

	
	// we're adjusting the size of the slab
}


/**
  * get_slab returns a given slab using the incoming fd
  * @param fd -- incoming file descriptor
  * @return the SLAB TABLE entry
  */

PSLABTABLE get_slab(unsigned int fd)
{
	
	PLINKEDLIST pslab_tables = slab_tables;
	PSLABTABLE table = NULL;
	
	pthread_mutex_lock( &mem_mutex );
	
	if (slab_tables == NULL)
	{
		active_slabs++;
		table = new_slab(fd);
		KernAddListEntry(&slab_tables,&(table->table_list));
		pthread_mutex_unlock( &mem_mutex );		
	}
	else
	{
		
		pslab_tables = slab_tables;
		while(pslab_tables!=NULL)
		{
			table = LIST_ENTRY_PTR(pslab_tables,SLABTABLE,table_list);
			
			if (fd == table->fd)
			{
				
				pthread_mutex_unlock( &mem_mutex );
				return table;
			}
			if (pslab_tables->nextPtr == NULL)
				break;
			pslab_tables = pslab_tables->nextPtr;
		}
		
		active_slabs++;
		adjust_slab_sizes();
		table = new_slab(fd);
		KernAddListEntryAfter(&slab_tables,pslab_tables,&(table->table_list));
		pthread_mutex_unlock( &mem_mutex );
	}
	return table;
}



/**
  * allocate_slab_queue_entry requests that a slab be allocated
  * for a given offset and size
  */

unsigned int allocate_slab_queue_entry(unsigned int fd, u_int64_t offset, u_int64_t size)
{
	PSLABTABLE slab_table = get_slab(fd);
    
    PSLAB slab = GET_SLAB(offset,slab_table->slab_table);

	if (slab->size_allocated != -1 && slab->ref_count > 0)
	{

		return 1; // already has been allocated elsewhere
	}
	offset = START_OFFSET(offset);
	
	u_int64_t sz;
    // let's grab it
    
	#ifdef SLAB_PRE_ALLOCATE
		
		// mmap the ptr
		if (offset >= slab_table->file_size)
		{

			return -1; // nothing left
		}

		sz = slab_table->file_size-offset;
		if (sz > floating_slab_size)
			sz = floating_slab_size;
		slab->ptr = mmap64(0,sz,PROT_READ,MAP_SHARED,fd,offset);
		slab->offset = offset;
		slab->size_allocated = sz;

		if (slab->ptr == MAP_FAILED)
		{
			// could not map file
			perror("mmap64");
			if (errno == EACCES)
			{
				printf("EACCES\n");
			}
			
			if (errno == EINVAL)
			{
				printf("EINVAL\n");
			}
			
			if (errno == EMFILE)
			{
				printf("EMFILE\n");
			}
			
			if (errno == ENODEV)
			{
				printf("ENODEV\n");
			}
			
			if (errno == ENOMEM)
			{
				printf("ENOMEM\n");
			}
			
			if (errno == ENOTSUP)
			{
				printf("ENOTSUP\n");
			}
			
			if (errno == ENXIO)
			{
				printf("ENXIO\n");
			}
			
			if (errno == EOVERFLOW)
			{
				printf("EOVERFLOW\n");
			}
			exit(1);
		}
	#else
		// allocate memory
    	slab->ptr = malloc(floating_slab_size);
    	assert( slab->ptr != NULL )
    	if ( lseek(fd,offset,SEEK_SET) < 0)
		{
			
				return -1;
			}
		slab->size_allocated = read(fd,slab->ptr,floating_slab_size);

		slab->offset = offset;
		if (slab->size_allocated <=0)
		{
			perror("read");
			return -1;
		}
    #endif
    
    
    // now, slabs[new_slab]->ptr contains the slab from which we can access mem
	return 1;
}


/**
  * arbitrate_slab_queue creates a request that will be handled later
  * by allocate_slab_queue_entry
  */

PLINKEDLIST arbitrate_slab_queue(PLINKEDLIST list, unsigned char type)
{
	#ifdef DEBUG
		printf(" -- arbitrate_slab_queue(%X,%i)\n",list,type);
	#endif
	
	char isnull=0;
	PMEMREQUEST request = NULL;
	if (list== NULL)
	{
			isnull=1;
	}
	while(list != NULL)
	{
		// get the request at the front of the list
		request = LIST_ENTRY_PTR(list,MEM_REQUEST,queue_list);
		// assign the associated mem region so the signaled thread can use it
		
		request->associated_mem_region = (PMEMREGION)allocate_slab_queue_entry(request->fd,request->offset,request->size);
	
		// remove the entry from the list to indicate it is finished
		KernUnlinkListEntry(&reg_allocation_queue,&(request->queue_list));
		
		// should split this to another conditional
		if (type == NOWAIT)
		{
			#ifdef DEBUG
				printf("Signaling waiting threads 1\n");
			#endif
			// signal waiting thread(s)
			//pthread_cond_signal(&queue_wait);
			
			//pthread_cond_broadcast(&queue_wait);
		}
		if (list == NULL)
		{
			break;	
		}
		
		list = list->nextPtr;
		
	}
	
	// signal waiting thread(s)
	
	if (type == WAIT && isnull == 0)
	{
		#ifdef DEBUG
				printf("Signaling waiting threads 2\n");
			#endif
		//pthread_cond_signal(&queue_wait);
		pthread_cond_broadcast(&queue_wait);
		
	}
	return list;
}


/**
  * queue_slab_and_wait queues a request and waits for the request
  * to be fulfilled. the wait is performed by a signal
  */

int queue_slab_and_wait(unsigned int fd, u_int64_t offset, u_int64_t size)
{
#ifdef DEBUG
		printf(" -- queue_slab_and_wait(%i,%i,%i)\n",fd,offset,size);
	#endif
		
		
	PLINKEDLIST *ptr_list = NULL;
	char signal =0;
	PMEMREGION new_mem_region=NULL;

	// atomically add to the queue
	PMEMREQUEST mem_region=NULL,upper_region=NULL;
	PLINKEDLIST ptr = reg_allocation_queue;
   
	#ifdef DEBUG
		printf("Creating mem request\n");
	#endif
	// create request
	PMEMREQUEST new_request = (PMEMREQUEST)malloc(sizeof(MEM_REQUEST));
	assert( new_request != NULL);
	new_request->fd = fd;
	new_request->offset = offset;
	new_request->size = size;
	new_request->associated_mem_region = NULL;
	pthread_mutex_lock( &queue_mutex );
	#ifdef DEBUG
		printf("Adding entry to queue\n");
	#endif
	if (reg_allocation_queue == NULL)
	{
		
		KernAddListEntry(&reg_allocation_queue,&(new_request->queue_list));
		reg_allocation_queue_tail = reg_allocation_queue;
	}
	else
	{
		
		KernAddListEntryAfter(&reg_allocation_queue,reg_allocation_queue_tail,&(new_request->queue_list));
		reg_allocation_queue_tail = reg_allocation_queue_tail->nextPtr;
	}
	// wait for the condition variabe to be set
	#ifdef DEBUG
		printf("Inserted into list\n");
	#endif
	struct timespec timer;
	timer.tv_sec = 2;
	//timer.tv_nsec = 2000000;
	// wait until memory has been allocated
	while( new_request->associated_mem_region == NULL)
	{
		pthread_cond_signal(&arbiter_wait);
	
		pthread_cond_wait( &queue_wait, &queue_mutex );
		
	}
	pthread_mutex_unlock( &queue_mutex );
	int v = (int*)new_request->associated_mem_region;
	
	free(new_request);
	
	return v;

}


/**
  * access_slab provides access to a given slab
  */

u_int64_t access_slab(unsigned int fd,char *buf, u_int64_t offset, u_int64_t size)
{
	#ifdef DEBUG
		printf(" -- access_slab(%i,%i,%lu,%lu)\n",fd,buf,offset,size);
	#endif
	
	char *mybuf = NULL;
	char *bufPlus=NULL;
	u_int64_t runcntr=0;
	u_int64_t new_size = 0;
	u_int64_t init_size = size;
	while(runcntr <= size)
	{
		size -=new_size;
		// get slab ptr
		new_size = slab_ptr(fd,&mybuf,offset+runcntr,size,1);
		
		
			
		if (runcntr >= init_size || new_size > init_size)
		{
			printf("in excess\n");
		}
		if (mybuf == NULL)
			printf("oh shit, null fx\n");
	
		bufPlus = buf+runcntr;
		// copies memory
	
		memcpy((buf+runcntr),mybuf,new_size);
		decrement_count(fd,offset+runcntr);
		runcntr+=new_size;
		
		
		
	}
	
	
	return runcntr;
	
}


/**
  * slab_ptr returns a pointer to the allocated memory
  */

u_int64_t slab_ptr(unsigned int fd,char **buf, u_int64_t offset, u_int64_t size,char lock)
{
	
	PSLABTABLE slab_table = get_slab(fd);
    u_int64_t new_size = 0,return_size=0;
	
    PSLAB slab = GET_SLAB(offset,slab_table->slab_table);
	
	if (slab->size_allocated == -1)
	{
	
		// oh...so fresh and so clean
	
		if ( queue_slab_and_wait(fd,offset,size) <= 0 )
		{
			
			return 0;
		}
	}
	else
	{
		if (!(offset >= slab->offset && offset < (slab->offset+slab->size_allocated)))
		{
			
			pthread_mutex_lock( &slab->slab_mutex );
			while(slab->ref_count > 0)
			{
				// yield until we are free to take this
				pthread_cond_wait(&slab->slab_signal,&slab->slab_mutex);
				
				
			}
			pthread_mutex_unlock( &slab->slab_mutex );
			
			if ( queue_slab_and_wait(fd,offset,size) <= 0 )
			{
				
				return 0;
			}
			
			
		}
		
		if (offset > slab->offset+slab->size_allocated)
		{

			pthread_mutex_lock( &slab->slab_mutex );
				while(slab->ref_count > 0)
						{
							pthread_cond_wait(&slab->slab_signal,&slab->slab_mutex);

						}
						pthread_mutex_unlock( &slab->slab_mutex );

						if ( queue_slab_and_wait(fd,offset,size) <= 0 )
						{
							return 0;
						}
						
						
		}
	}
	// return because this has already been allocated
		
	pthread_mutex_lock( &slab->slab_mutex );
		if (lock == 1)
		{
			
			slab->ref_count++;
		}
		
		pthread_mutex_unlock( &slab->slab_mutex );
	if ((offset+size) <= (slab->offset+slab->size_allocated) )
	{
		
		*buf = slab->ptr+(offset-slab->offset);
		return size;
	}
	else
	{
		// at this point end of input is greater than the current slab
		new_size = offset-slab->offset;//

		if (new_size >= slab->size_allocated)
		{
			return 0; // we can assume an error occurred
		}

		*buf = (slab->ptr+new_size);

		return_size = slab->size_allocated-new_size;
		return return_size;
	}
	
}
