
from algorithms import clustering_algorithms
from algorithms import collaborative_filtering_algorithms
from collections import defaultdict

import time

from datetime import datetime

def plot_data(models, clusterids):

    centroids, _ = pc.clustercentroids(users, clusterid=clusterids)

    # reduce dimensionality by principal component analysis
    models_pca = mlab.PCA(models)
    cutoff = models_pca.fracs[1]

    models_2d = models_pca.project(models, minfrac=cutoff)
    centroids_2d = models_pca.project(centroids, minfrac=cutoff)

    # make a plot
    colors = ['red', 'green', 'blue', 'red', 'green', 'blue', 'red', 'green', 'blue', 'red', 'green', 'blue' , 'blue', 'red', 'green', 'blue', 'red', 'green', 'blue', 'red', 'green', 'blue' , 'blue', 'red', 'green', 'blue', 'red', 'green', 'blue', 'red', 'green', 'blue' , 'blue', 'red', 'green', 'blue', 'red', 'green', 'blue', 'red', 'green', 'blue']

    # create a new figure
    plt.figure()

    #set
    plt.xlim([models_2d[:,0].min() - .5, models_2d[:,0].max() + .5])
    plt.ylim([models_2d[:,1].min() - .5, models_2d[:,1].max() + .5])
    plt.xticks([], []); plt.yticks([], []) # numbers aren't meaningful

    # show the centroids
    plt.scatter(centroids_2d[:,0], centroids_2d[:,1], marker='o', c=colors, s=100)

    # show domain colored by their cluster id
    for i, ((x,y), kls) in enumerate(zip(models_2d, clusterids)):
        plt.annotate(str(i), xy=(x,y), xytext=(0,0), textcoords='offset points',
            color=colors[kls])

    plt.show()

def prepare_data_for_clustering(file_path, min_domain_count, min_no_of_domains_for_clustering):

    domain_count = defaultdict(int)

    # iterating through each session to get domain count
    for line in open(file_path).read().splitlines():
        session = line.split(',')
        for domain in session:
            domain_count[domain] += 1

    # write the new processed data to a file
    processed_logs = open('tmp/training_logs', 'w+')

    session_count = 0

    domain_mapping = defaultdict(int)
    i = 0 # domain mapping number

    for line in open(file_path).read().splitlines():

        session_count += 1

        session = line.split(',')

        if len(session) >= min_no_of_domains_for_clustering:

            new_session = ''
            for domain in session:
                if domain_count[domain] >= min_domain_count:
                    if domain in domain_mapping:
                        new_session += str(domain_mapping[domain])
                        new_session += ','
                    else:
                        domain_mapping[domain] = i
                        i += 1
                        new_session += str(domain_mapping[domain])
                        new_session += ','
            if len(new_session) > 1:
                new_session = new_session[:-1]
                new_session += '\n'
                processed_logs.write(new_session)

    processed_logs.close()

    # write the new domain mapping to a file
    domain_mapping_log = open('tmp/domain_mapping', 'w+')
    domain_mapping_log.write(str(domain_mapping))
    domain_mapping_log.close()

    print 'no of domains'
    print len(domain_mapping)

def evaluate_cluster(clusterids, min_no_of_access_for_prediction):

    clusters = defaultdict(list)
    i = 0

    clusterids = clusterids.tolist()

    print 'clusterids list'
    print clusterids
    print 'no of clusters'
    print len(clusterids)

    for clusterid in clusterids:
        clusters[clusterid].append(str(i))
        i += 1

    print '$$$$$$$$$$$$$$$$$$$$$$$$$$'
    print 'clusters'
    print clusters
    print '$$$$$$$$$$$$$$$$$$$$$'

    # write the clusters to a file
    cluster_mapping_log = open('tmp/cluster_mapping', 'a+')
    cluster_mapping_log.write(str(clusters))
    cluster_mapping_log.close()


    print 'start predicting'

    # test data is used for prediction
    file_path = 'tmp/test_logs'
    session_count = 0
    correct_no_of_predictions = 0
    total_no_of_predictions = 0
    covered_domains = 0
    total_no_of_domains = 0

    cluster_prediction_count = defaultdict(int)

    # prediction
    for line in open(file_path).read().splitlines():

        session_count += 1

        session = line.split(',')
        if len(session) >= min_no_of_access_for_prediction:

            no_of_accesses = 0

            prev_domain = None

            # iterating through each domain accessed in the session
            for domain in session:

                no_of_accesses += 1

                if no_of_accesses >= min_no_of_access_for_prediction:

                    if prev_domain != domain:

                        total_no_of_domains += 1

                        if int(domain) < i:

                            covered_domains += 1
                            total_no_of_predictions += 1

                            predictions, native_cluster_index = get_predictions(no_of_accesses, session, clusters)
                            cluster_prediction_count[native_cluster_index] += 1

                            if domain in predictions:
                                correct_no_of_predictions += 1

                        else:
                            print 'this domain is not covered'
                            print domain

                    prev_domain = domain

    time.sleep(5)

    print '*****************'
    print 'cluster prediction count'
    print cluster_prediction_count
    print 'correct no of predictions'
    print correct_no_of_predictions
    print 'total no of predictions'
    print total_no_of_predictions
    print 'accuracy'
    print float(float(correct_no_of_predictions)/float(total_no_of_predictions))
    print 'covered domains'
    print covered_domains
    print 'total no of domains'
    print total_no_of_domains

def get_predictions(no_of_accesses, session, clusters):

    accessed_domains = session[:no_of_accesses]

    # find the native cluster
    max_no_of_occurences = 0
    native_cluster = []
    native_cluster_index = 0

    # iterate through clusters
    for index,cluster in clusters.iteritems():

        intersection = (set(accessed_domains) & set(cluster))
        no_of_occurrences = len(list(intersection))

        if no_of_occurrences > max_no_of_occurences:
            max_no_of_occurences = no_of_occurrences
            native_cluster = cluster
            native_cluster_index = index

    predictions = native_cluster
    return predictions, native_cluster_index

"""
    Clustering Techniques
"""

def kcluster(file_path, no_of_domains, no_of_clusters, clustering_method, distance_function, min_no_of_access_for_prediction):

    clusterids, error, nfound  = clustering_algorithms.kcluster(file_path, no_of_domains, no_of_clusters, clustering_method, distance_function)
    evaluate_cluster(clusterids, min_no_of_access_for_prediction)




def kmedoids(file_path, no_of_domains, no_of_clusters):

    clusterids, error, nfound = clustering_algorithms.kmedoids(file_path, no_of_domains, no_of_clusters)

def somcluster(file_path, no_of_domains, distance_function):

    clusterids, error, nfound = clustering_algorithms.somcluster(file_path, no_of_domains, distance_function)

def treecluster(file_path, no_of_domains, no_of_clusters, clustering_method, distance_function, min_no_of_access_for_prediction):

    clusterids = clustering_algorithms.treecluster(file_path, no_of_domains, no_of_clusters, clustering_method, distance_function)
    evaluate_cluster(clusterids, min_no_of_access_for_prediction)

"""
    Collaborative Filtering Techniques
"""

def slope_one_filter(file_path, min_no_of_access_for_prediction, min_no_of_access_for_updation, no_of_predictions, no_of_training_sessions):

    """
    Upgrade and Predict model
    """
    collaborative_filtering_algorithms.slope_one_filter(file_path, min_no_of_access_for_prediction, min_no_of_access_for_updation, no_of_predictions, no_of_training_sessions)
