#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <sys/types.h>
#include <sys/timeb.h>
#include <apr_general.h>
#include <apr_pools.h>
#include <apr_network_io.h>
#include <apr_env.h>
#include "mpi.h"
#include "cm.h"
#include "pmi.h"
#include "oom.h"
#include "log.h"

#define MPI_VERSION 1
#define MPI_SUBVERSION 1

/** \file env.c
 *  \brief All of the methods that make up the environment portion of MPI
 */

/**
 * \brief holds the memory pool for use by this module
 */
static apr_pool_t *env_pool = NULL;

/**
 * \brief holds the hostname of the machine
 */
static char hostname[APRMAXHOSTLEN + 1];

/**
 * \brief holds the length of the hostname
 */
static int hostnamelen = 0;

/**
 * \brief holds the port the system listens on
 */
static int port = 0;

/**
 * \brief flag to hold initialization status
 */
static int initialized = 0;

/**
 * \brief process rank in the world
 */
static int world_rank = 0;

/**
 * \brief size of our world
 */
static int world_size = 0;

/**
 * \brief names of the hosts which are connected to this system
 */
static char **processor_hosts;

/**
 * \brief parallel list of ports for remote hosts
 */
static int *processor_ports;

/**
 * \brief get the current version of the implementation
 * \param version an integer to hold the version
 * \param subversion an integer to hold the subversion
 * \return currently only returns MPI_SUCCESS
 */
int MPI_Get_version(int *version, int *subversion)
{
	*version = MPI_VERSION;
	*subversion = MPI_SUBVERSION;
	return MPI_SUCCESS;
}

/**
 * \brief get the name of the current host upon which this process is running on
 * \param name a character array for the hostname 
 * \param resulten the length of the hostname
 * \return currently only returns MPI_SUCCESS
 */
int MPI_Get_processor_name(char *name, int *resultlen)
{
	strncpy(name, hostname, hostnamelen);
	*resultlen = hostnamelen;
	return MPI_SUCCESS;
}

/**
 * \brief Returns an elapsed time on the calling processor
 * \return Time in seconds since an arbitrary time in the past.
 */
double MPI_Wtime()
{
	struct timeb now;
	double wtime = 0;
	ftime(&now);
	wtime = now.time;
	wtime += now.millitm / 1000.0;
	return wtime;
}

/**
 * \brief MPI_Wtick returns the resolution of MPI_WTIME in seconds.
 * \return MPI_WTICK returns the resolution of MPI_WTIME in seconds. That is, it returns, as a double precision value, the number of seconds between successive clock ticks. For example, if the clock is implemented by the hardware as a counter that is incremented every millisecond, the value returned by MPI_WTICK should be .
 */
double MPI_Wtick()
{
	return 0.001;
}

/**
 * \brief Initialize the MPI execution environment
 * \param argc pointer to the number of arguments
 * \param argv pointer to the argument vector
 * \return error or success
 */
int MPI_Init(int *argc, char ***argv)
{	
	apr_status_t status;
	apr_pool_t *mem_pool = NULL;
	int err, spawned;
	int kvsname_max, key_max, value_max;
	char *envvar, *kvsname, *key, *value;
	
	/* MPI_Init should only be called once */
	
	if (initialized)
	{
		return MPI_ERR_OTHER;
	}

	/* initialize APR */
	
	status = apr_app_initialize(argc, (char const *const **)argv, NULL);
	if (status != APR_SUCCESS) return MPI_ERR_OTHER;
	atexit(apr_terminate);
	
	/* allocate a memory pool */
	
	status = apr_pool_create(&env_pool, NULL);
	if (status != APR_SUCCESS) return MPI_ERR_OTHER;
	
	/* set the debug logging flag from the MPIMT_DEBUG environment
	   variable */

	if (apr_env_get(&envvar, "MPIMT_DEBUG", mem_pool) == APR_SUCCESS)
        {
		log_init(atoi(envvar));
	}

	/* get our hostname */
	
	status = apr_gethostname(hostname, APRMAXHOSTLEN + 1, env_pool);
	if (status != APR_SUCCESS) return MPI_ERR_OTHER;
	hostnamelen = strlen(hostname);

	/* initialize PMI */

	err = PMI_Init(&spawned);

	/* fetch our world configuration from PMI */
	
	PMI_Get_size(&world_size);
	PMI_Get_rank(&world_rank);

	/* start listening on a port */

	CM_Init(&port);
	debug("listening at %s:%d", hostname, port);
	
	/* get KVS max size info from PMI and allocate some temporary
	   storage for key-value insertion */
	
	kvsname_max = PMI_KVS_Get_name_length_max();
	key_max = PMI_KVS_Get_key_length_max();
	value_max = PMI_KVS_Get_value_length_max();

	kvsname = (char *) apr_palloc(env_pool,	sizeof(char) * (kvsname_max + 1));
	key = (char *) apr_palloc(env_pool,	sizeof(char) * (key_max + 1));
	value = (char *) apr_palloc(env_pool, sizeof(char) * (value_max + 1));

	/* add key-value pairs for our hostname and port to the KVS */
	
	PMI_KVS_Get_my_name(kvsname);

	snprintf(key, key_max, "P%d_host", world_rank);
	snprintf(value, value_max, "%s", hostname);
	PMI_KVS_Put(kvsname, key, value);

	snprintf(key, key_max, "P%d_port", world_rank);
	snprintf(value, value_max, "%d", port);
	PMI_KVS_Put(kvsname, key, value);

	PMI_KVS_Commit(kvsname);

	/* wait for all processors to finish publishing their information */
	
	PMI_Barrier();

	/* allocate the processor host / port lookup table */

	processor_hosts = (char **) apr_palloc(env_pool, sizeof(char *) * world_size);
	processor_ports = (int *) apr_palloc(env_pool, sizeof(int) * world_size);

	/* lookup all processor host/port from the KVS and cache in our
	   lookup tables */
	
	int i;
	for (i = 0; i < world_size; i++)
	{
		snprintf(key, key_max, "P%d_host", i);
		PMI_KVS_Get(kvsname, key, value);
		processor_hosts[i] = (char *) apr_palloc(env_pool, sizeof(char) * (strlen(value) + 1));
		strncpy(processor_hosts[i], value, strlen(value));

		snprintf(key, key_max, "P%d_port", i);
		PMI_KVS_Get(kvsname, key, value);
		processor_ports[i] = atoi(value);
	}
	
	/* initialize the opaque object manager */
	
	OOM_init();
	
	/* at this point, we are fully initalized */
	
	initialized = 1;

	return MPI_SUCCESS;
}

/**
 * \brief Terminates MPI execution environment
 * \return status code
 */
int MPI_Finalize()
{
	/* finalize the communications manager */

	CM_Finalize();

	/* finalize the opaque object manager */
	
	OOM_finalize();
	
	/* finalize PMI */

	PMI_Finalize();

	/* destroy the memory pool */

	apr_pool_destroy(env_pool);

	/* finalize APR */

	apr_terminate();
	
	initialized = 0;

	return MPI_SUCCESS;
}

/**
 * \brief Indicates whether MPI_Init has been called
 * \param flag pointer to the variable which will hold the status
 * \return currently returns only MPI_SUCCESS
 */
int MPI_Initialized(int *flag)
{
	*flag = initialized;
	return MPI_SUCCESS;
}

/**
 * \brief Terminates MPI execution environment
 * \param comm communicator of tasks to abort
 * \param errorcode error code to return to invoking environment
 */
int MPI_Abort(MPI_Comm comm, int errorcode)
{
	PMI_Abort(errorcode, "MPI_Abort called");	
	return 0;
}

int get_world_rank()
{
	return world_rank;
}

int get_world_size()
{
	return world_size;
}

const char *get_processor_host(int world_rank)
{
	return processor_hosts[world_rank];
}

int get_processor_port(int world_rank)
{
	return processor_ports[world_rank];
}
