import common
import output

import numpy as np
from collections import OrderedDict
import datetime
import scipy
import pylab
import scipy.cluster.hierarchy as sch

#http://mines.humanoriented.com/classes/2010/fall/csci568/portfolio_exports/sphilip/hier.html

# Create class to represent the hierarchical clustering algorithm
# Each cluster is: 1. A point in the tree w/2 branches or 2. Endpoint associated w/ row from data
# Clusters have data about location (either row data for endpts or merged data from its 2 branches)
class tree:
  def __init__(self,data_list,left_subtree=None,right_subtree=None,distance=0.0,id=None):
    self.left_subtree=left_subtree
    self.right_subtree=right_subtree
    self.data_list=data_list
    self.id=id
    self.distance=distance
    
# Hierarchical clustering algorithm:
#   1. Create group of clusters containing original items
#   2. Find 2 objects that are the closest together (ie iterate through
#      all items and find Pearson correlation) and merge.
#      Data = avg(2 objects)
#   3. Repeat until 1 cluster left w/ all objects
#
# Store correlation in array, since it's calculated repeatedly w/ the
#   same objects (@ least when individual objects are clusters)
def hierarchical_cluster(rows,distance=common.LMsimilarity):
    distance_cache={}
    current_cluster_id=-1

    # Performing step 1 of algorithm
    cluster   = [tree(rows[i],id=i) for i in range(len(rows))]
    
    while len(cluster)>1:
        lowest_pair=(0,1)
        closest=distance(cluster[0].data_list,cluster[1].data_list)

        # Performing step 2 of algorithm
        # loop through every pair looking for the smallest distance
        for i in range(len(cluster)):
            for j in range(i+1,len(cluster)):
                # Check the distance_cache dict for any previously calculated distances
                if (cluster[i].id,cluster[j].id) not in distance_cache:
                    distance_cache[(cluster[i].id,cluster[j].id)]=distance(cluster[i].data_list,cluster[j].data_list)

                closer_distance=distance_cache[(cluster[i].id,cluster[j].id)]

                if closer_distance<closest:
                    closest=closer_distance
                    lowest_pair=(i,j)

        # Nearing the end of step 2- find the avg b/w closest pair
        merge_clusters=[
        (cluster[lowest_pair[0]].data_list[i]+cluster[lowest_pair[1]].data_list[i])/2.0 
        for i in range(len(cluster[0].data_list))]

        # create the new cluster
        new_cluster=tree(merge_clusters,left_subtree=cluster[lowest_pair[0]],
                         right_subtree=cluster[lowest_pair[1]],
                         distance=closest,id=current_cluster_id)

        # Cluster ids that are not in the original set are negative
        current_cluster_id-=1
        del cluster[lowest_pair[1]]
        del cluster[lowest_pair[0]]
        cluster.append(new_cluster)

    return cluster[0]

def extract_clusters(clust,dist):
    # extract list of sub-tree clusters from hcluster tree with distance < dist
    if clust.distance < dist:
        # we have found a cluster subtree
        return [clust]
    else:
        # check the right and left branches
        cl = []
        cr = []
        if clust.left_subtree!=None: 
            cl = extract_clusters(clust.left_subtree,dist=dist)
        if clust.right_subtree!=None: 
            cr = extract_clusters(clust.right_subtree,dist=dist)
        return cl+cr
  
def calculate_mean(clustered):
    mu = []
    for cluster in clustered:
        if len(cluster) == 1:
            mu.append(cluster[0].data_list)
        elif len(cluster) > 1:
            #calculate
            mean = []
            for i in xrange(len(cluster[0].data_list)):
                s_mean = np.mean([s_vector.data_list[i] for s_vector in cluster])
                mean.append(s_mean)
            mu.append(mean)
        else:
            print "empty cluster"
    return mu

def extract_clusters_with_mean_orig(clust,dist):
    # extract list of sub-tree clusters from hcluster tree with distance < dist
    clust = extract_clusters(clust, dist)
    
    clustered = []
    for clus in clust:
        clustered.append(get_cluster_elements_with_data(clus))
    
    mu = calculate_mean(clustered)
    return [mu, clust]
    
def extract_clusters_with_mean(clust,dist):
    # extract list of sub-tree clusters from hcluster tree with distance < dist
    clust = extract_clusters(clust, dist)
    
    clustered = []
    for clus in clust:
        clustered.append(get_cluster_elements_with_data(clus))

    for item in clustered:
        if len(item) == 0:
            clustered.remove(item)
    
    mu = calculate_mean(clustered)
    return [mu, clustered]

def get_cluster_elements_with_data(clust):
    # return ids for elements in a cluster sub-tree
    if clust.id>0:
        # positive id means that this is a leaf
        return [clust]
    else:
        # check the right and left branches
        cl = []
        cr = []
        if clust.left_subtree!=None: 
            cl = get_cluster_elements_with_data(clust.left_subtree)
        if clust.right_subtree!=None: 
            cr = get_cluster_elements_with_data(clust.right_subtree)
        return cl+cr
        
def get_cluster_elements(clust):
    # return ids for elements in a cluster sub-tree
    if clust.id>0:
        # positive id means that this is a leaf
        return [clust.id]
    else:
        # check the right and left branches
        cl = []
        cr = []
        if clust.left_subtree!=None: 
            cl = get_cluster_elements(clust.left_subtree)
        if clust.right_subtree!=None: 
            cr = get_cluster_elements(clust.right_subtree)
        return cl+cr
        
def global_impact(centers, clusters, X):
    global_impact = []
    for idx in xrange(len(clusters)):
        distorsion = []
        for item in clusters[idx]:
#            print item
            distorsion.append(common.LMsimilarity(centers[idx], item.data_list))
        global_impact.append(sum(distorsion))
    return sum(global_impact)

def weight_factor(attr, k, old_alfa):
    if k == 2 and attr > 1:
        return 1 - (3 / (4 * attr))
    return old_alfa + ((1 - old_alfa) / 6)
    
def real_distortion(global_imp, old_global_imp, k, alfa):
    if k == 1 or k-1 == 0:
        return 1
    return global_imp / (alfa * old_global_imp)

def xfrange(start, stop, step):
    res = []
    while start < stop:
        res.append(start)
        start += step
    return res
        
def gap_statistic_my(X):
    # taken from https://datasciencelab.wordpress.com/2014/01/21/selection-of-k-in-k-means-clustering-reloaded/
    ks = np.arange(0.1, 1, 0.1)
    global_imp = np.zeros(len(ks))
    fk = np.zeros(len(ks))
    alfa = np.zeros(len(ks))
    for indk, k in enumerate(ks):
        mu, clusters = extract_clusters_with_mean(X,k)
        global_imp[indk] = global_impact(mu, clusters, X)
        if indk - 1 >= 0:
            old_alfa = alfa[indk - 1]
        else:
            old_alfa = 0
        alfa[indk] = weight_factor(len(X.data_list), k, old_alfa)
        fk[indk] = real_distortion(global_imp[indk], global_imp[indk - 1], k, alfa[indk])
        
    return(ks, global_imp, fk, alfa)
    
def close_to_clusters(router, mu, k):
    dist = 1
    for m in mu:
        temp_dist = common.LMsimilarity(router, m)
        if temp_dist < dist:
            dist = temp_dist
    return dist < k
    
def get_router_colors(scan_data_binned, res_optimal):
    clustered_idx = []
    for clus in res_optimal:
        clustered_idx.append(get_cluster_elements(clus))

    colour_list = OrderedDict()
    for idx in range(len(clustered_idx)):
        cluster = clustered_idx[idx]
        for router_idx in cluster:
            colour_list[scan_data_binned.keys()[router_idx]] = output.my_colors[idx]
    return colour_list

def get_location_dates(scan_data_binned, res_optimal, start_date, end_date):
    min_gap = 10
    location_dates = []
    clustered_idx = []
    for clus in res_optimal:
        clustered_idx.append(get_cluster_elements(clus))
        
    # create stop location vectors
    for idx in range(len(clustered_idx)):
        cluster = clustered_idx[idx]
        if len(cluster) > 0:
            all_dates = []
            for router_idx in cluster:
                all_dates += [item[0] for item in scan_data_binned[scan_data_binned.keys()[router_idx]]]
            
            location_dates += [min(all_dates), max(all_dates)]       
            all_dates = sorted(list(set(all_dates)))
            for idx in xrange(len(all_dates) - 1):
                curr_idx = all_dates[idx]
                next_idx = all_dates[idx+1]
                if next_idx - curr_idx > datetime.timedelta(minutes = min_gap):
                    location_dates.append(curr_idx)
                    location_dates.append(next_idx)
                
    return sorted(location_dates)
        
def get_location_vectors(scan_data_binned, res_optimal, start_date, end_date):
    min_gap = 10
    location_vectors = []
    clustered_idx = []
    loc_dates = []
    for clus in res_optimal:
        clustered_idx.append(get_cluster_elements(clus))
        
    # create stop location vectors
    for idx in range(len(clustered_idx)):
        cluster = clustered_idx[idx]
        if len(cluster) > 0:
            all_dates = []
            for router_idx in cluster:
                all_dates += [item[0] for item in scan_data_binned[scan_data_binned.keys()[router_idx]]]
            
            loc_dates.append(min(all_dates))
            loc_dates.append(max(all_dates))
            
            all_dates = sorted(list(set(all_dates)))
            for idd in xrange(len(all_dates) - 1):
                curr_idx = all_dates[idd]
                next_idx = all_dates[idd + 1]
                if next_idx - curr_idx > datetime.timedelta(minutes = min_gap):
                    loc_dates.append(curr_idx)
                    loc_dates.append(next_idx)
                
    loc_dates = sorted(loc_dates)
    loc_dates[-1] = end_date
    print loc_dates
    curr_date = start_date
    s_vector = []
    idx = 0
    while curr_date <= end_date:
        flag = False
        if not flag and curr_date > loc_dates[idx] and curr_date < loc_dates[idx + 1]:
            s_vector.append(1)
        elif curr_date > loc_dates[idx + 1]:
            idx = idx + 2
        else:
            s_vector.append(0)
        curr_date = curr_date + datetime.timedelta(minutes = 1)
#    print s_vector
    return location_vectors

def output_dendrogram(vd):
    dist_mat = np.zeros([len(vd), len(vd)])
    for id_1 in xrange(len(vd)):
        for id_2 in xrange(len(vd)):
            dist_mat[id_1, id_2] = common.LMsimilarity(vd[id_1], vd[id_2])
            
    pylab.figure(figsize=(8,6))
    Y = sch.linkage(dist_mat, method='centroid')
    sch.dendrogram(Y)
    pylab.xticks([])
#    pylab.yticks(rotation = 0)
    pylab.tick_params(axis='y', which='major', labelsize=6)
    pylab.tick_params(axis='x', which='major', labelsize=6)