#include <stdlib.h>
#include <math.h>
#include <time.h>
#include "StochasticModel.h"
#include <unistd.h>
#include "Config.h"
#include "StochasticModel.h"
#include "Clustering.h"
#include <string.h>
#include <sys/types.h>
#include <unistd.h>


DEFINE_HASHTABLE_INSERT(insert_size, struct size_key, struct size_value);
DEFINE_HASHTABLE_SEARCH(search_size, struct size_key, struct size_value);
DEFINE_HASHTABLE_REMOVE(remove_size, struct size_key, struct size_value);
DEFINE_HASHTABLE_ITERATOR_SEARCH(search_itr_size, struct size_key);

static unsigned int hashFromKey(void *k) {
    return ((SizeKey *)k)->size;
}

static int compareKey(void *key1, void *key2) {
    int x = ((SizeKey*)key1)->size;
    int y = ((SizeKey*)key2)->size;
    if (x == y)
        return 1;
    else
        return 0;
}

void computeVisitsPerServer() {
    int i;
    ServerLocation sl;
    for (i = 0; i < N_P_REQUEST; i++) {
        sl = getServerLocation(p_request[i].server);
        visits_per_cluster[sl.cluster][sl.server]++;
        cluster_id[sl.cluster][sl.server] = p_request[i].server;
    }

    for (i = 0; i < N_TOTAL_SERVER; i++) {
        server_probability[i] = visits_per_server[i] / (double) N_P_REQUEST;
        //value_s_p[i] = i;
    }
}

void initializeStochasticServer() {
    computeVisitsPerServer();
    setup_empirical(N_TOTAL_SERVER, server_probability, cutoff_s_p, alias_s_p);
}

int nextStochasticServer() {
    return (int) empirical(N_TOTAL_SERVER, cutoff_s_p, alias_s_p, value_s_p);
}

void computeSizeProbability() {
    struct hashtable * prob_size_hashtable;

    int i;
    SizeKey * k;
    SizeValue * v;
    N_SIZE = 0;

    prob_size_hashtable = create_hashtable(13000, hashFromKey, compareKey);
    if (NULL == prob_size_hashtable) {
            printf("Hash Table Ran Out of Memory Allocating a Key\n");
            exit(EXIT_FAILURE);
    }

    for (i = 0; i < N_P_REQUEST; i++) {
        k = (SizeKey *) malloc(sizeof (SizeKey));

        if (NULL == k) {
            printf("Hash Table Ran Out of Memory Allocating a Key\n");
            exit(EXIT_FAILURE);
        }

        k->size = p_request[i].size;

        if ((v = search_size(prob_size_hashtable, k)) == NULL) {
            v = (SizeValue *) malloc(sizeof (SizeValue));
            v->probability = 1;
            v->size = k->size;

            N_SIZE++;

            insert_size(prob_size_hashtable, k, v);
        } else {
            v->probability = v->probability + 1;
        }
    }
    cumulative_distribution = (SizeValue **) malloc(sizeof (SizeValue*) * N_SIZE);

    struct hashtable_itr* iterator = hashtable_iterator(prob_size_hashtable);
    i = 0;
    double sum = 0;

    if (hashtable_count(prob_size_hashtable) > 0) {
        do {
            v = hashtable_iterator_value(iterator);
            //printf("%d-%f,", v->size, v->probability/ (double) N_P_REQUEST);

            sum = sum + (v->probability / (double) N_P_REQUEST);
            v->probability = sum;

            cumulative_distribution[i] = v;
            cumulative_distribution[i]->tested_occurrences = 0;
            i++;

        } while (hashtable_iterator_advance(iterator));
    }
    hashtable_destroy(prob_size_hashtable, 0);
}

double alpha_p = 0;
double log_mu = 0;
double log_sigma= 0;

double paretoAlphaEstimation(int xm, int n_pareto_size) {
    double sum = 0;
    int i;
    //double sizze;
    for (i=0; i<N_P_REQUEST; i++)
        if(p_request[i].size >= MODEL_SWITCH_SIZE)
            sum += (log((double)p_request[i].size) - log((double)xm));

    double alpha = ((double)n_pareto_size) / sum;

    return alpha;
}

double logNormalMuEstimation(int n_lognormal_size) {
    int i;
    double sum=0;
    //double sizze;
    for (i=0; i<N_P_REQUEST; i++)
        if(p_request[i].size < MODEL_SWITCH_SIZE && p_request[i].size != 0 )
            sum += log((double)p_request[i].size);

    return sum/((double)n_lognormal_size);
}

double logNormalSigma2Estimation(double mu, int n_lognormal_size) {
    int i;
    double sum = 0;
    //double sizze;

    for (i=0; i<N_P_REQUEST; i++){
        if(p_request[i].size < MODEL_SWITCH_SIZE && p_request[i].size != 0 )
            sum += (log((double)p_request[i].size) - mu)*(log((double)p_request[i].size) - mu);
    }

    return sum/((double)n_lognormal_size);
}

void computeModelSwitchProbability() {
    int i;
    double value_zero = 0;
    double value_lognormal = 0;
    double value_pareto = 0;

    for (i = 0; i < N_P_REQUEST; i++) {
        if (p_request[i].size == 0) {
            value_zero++;
        } else {
            if (p_request[i].size < MODEL_SWITCH_SIZE) {
                value_lognormal++;
            } else {
                value_pareto++;
            }
        }
    }
    prob_hybrid[0] = value_zero / (double) N_P_REQUEST;
    prob_hybrid[1] = value_lognormal / (double) N_P_REQUEST;
    prob_hybrid[2] = value_pareto / (double) N_P_REQUEST;

#if !defined(LOGNORMAL_MEAN) && !defined(LOGNORMAL_VARIANCE)
    alpha_p = paretoAlphaEstimation(MODEL_SWITCH_SIZE, value_pareto);
    log_mu = logNormalMuEstimation(value_lognormal);
    log_sigma = sqrt(logNormalSigma2Estimation(log_mu, value_lognormal));
#endif

}

void initializeStochasticSize() {
    int size_seed = time(0) + getpid() -1;

#if STOCHASTIC_CLUSTERIZED_SIZE
    computeSizeProbability();
    computeKMeansClustering(N_CLASS_STOCHASTIC);
    int i;
    for (i=0; i<N_CLASS_STOCHASTIC; i++) {
        prob_class_stochastic[i] = cluster[i].probability_element;
        value_class_stochastic[i] = (double)((int)cluster[i].center);
        printf("richieste cl_%d: %lu centroide %f devstd: %f\n", i, cluster[i].n_item, cluster[i].center, cluster[i].standard_deviation);
    }

    setup_empirical(N_CLASS_STOCHASTIC, prob_class_stochastic, cutoff_class_stochastic, alias_class_stochastic);
#endif


#if STOCHASTIC_SIZE_EMPIRICAL
    computeSizeProbability();
    computeKMeansClustering(N_CLASS);
    uniform_stream = create_stream();
    reseed(uniform_stream, size_seed);
#endif

#if STOCHASTIC_SIZE_HYBRID_LOGNORMAL_POWERLAW
    computeSizeProbability();
    computeKMeansClustering(N_CLASS);
    computeModelSwitchProbability();

    setup_empirical(3, prob_hybrid, cutoff_hybrid, alias_hybrid);

    pareto_stream = create_stream();
    reseed(pareto_stream, (size_seed * size_seed + 1) % 71);

    lognormal_stream = create_stream();
    reseed(lognormal_stream, (100000 * size_seed) % (size_seed + 1));

    value_hybrid[0] = 0;
    value_hybrid[1] = 1;
    value_hybrid[2] = 2;
#endif

}

int searchSizeFromDistribution(double x) {
    int p, u, m;
    p = 0;
    u = N_SIZE - 1;
    if (x < cumulative_distribution[0]->probability) {
        cumulative_distribution[0]->tested_occurrences++;
        return cumulative_distribution[0]->size;
    }
    while ((u-p) > 1) {
        m = (p + u) / 2;
        if (cumulative_distribution[m]->probability < x)
            p = m;
        else
            u = m;
    }
    cumulative_distribution[u]->tested_occurrences++;
    return cumulative_distribution[u]->size;
}


int nextStochasticSize() {

#if STOCHASTIC_CLUSTERIZED_SIZE
    return (int) empirical(N_CLASS_STOCHASTIC, cutoff_class_stochastic, alias_class_stochastic, value_class_stochastic);
#endif

#if STOCHASTIC_SIZE_EMPIRICAL
    double value;
    value = stream_uniform(uniform_stream, 0.0, 1.0);
    int i;
    for (i = 0; i < N_SIZE; i++)
        return searchSizeFromDistribution(value);
#endif

#if STOCHASTIC_SIZE_HYBRID_LOGNORMAL_POWERLAW
    double value;
    double model = empirical(3, cutoff_hybrid, alias_hybrid, value_hybrid);

    if (model == 0) {
        return 0;
    } else if (model == 1) {

        //Lognormal distribution for the body of size
#if defined(LOGNORMAL_MEAN) && defined(LOGNORMAL_VARIANCE)
            value = stream_normal(lognormal_stream, LOGNORMAL_MEAN, LOGNORMAL_VARIANCE);
#else
            value = stream_normal(lognormal_stream, log_mu, log_sigma);
#endif
            
            value = exp(value);
        return (int)value;
    } else {
        
        //PowerLaw (pareto) distribution for the tail of size
#ifdef ALPHA_PARETO
            value = stream_pareto(pareto_stream, ALPHA_PARETO) + MODEL_SWITCH_SIZE;
#else
            value = stream_pareto(pareto_stream, alpha_p)+MODEL_SWITCH_SIZE;
#endif
        return (int)value;
    }
#endif
    return -1;
}

double computeMeanInterarrivalTime() {
    double initial_request = p_request[0].timestamp;
    double final_request = p_request[N_P_REQUEST - 1].timestamp;
    double delta = final_request - initial_request;
    return delta / (double) N_P_REQUEST;
}

void initializeStochasticInterarrivalTime() {
    int interarrival_time_seed = time(0);
    exponential_stream = create_stream();
    reseed(exponential_stream, (interarrival_time_seed + getpid()) % (71 * 71 + 1));
    exponential_mean = computeMeanInterarrivalTime();
}

double nextStochasticInterarrivalTime() {
    return stream_exponential(exponential_stream, exponential_mean);
}