# -*- coding: utf-8 -*-
"""
Created on Wed Sep 10 21:55:40 2014

@author: Magda
"""

from collections import OrderedDict
import csv
import matplotlib.pyplot as plt
import matplotlib.dates as mpld
from matplotlib.lines import Line2D
import datetime
import numpy  as np
import itertools as it
import networkx as nx
import community
import copy

from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
from sklearn.lda import LDA

###
#CONSTANT VARIABLES

#WIFI_DATA_FILENAME = "1b768f942564005168dda562defa5b_halfday.bigdata"
#WIFI_DATA_FILENAME = "1b768f942564005168dda562defa5b_oneday.bigdata"
WIFI_DATA_FILENAME = "1b768f942564005168dda562defa5b_1412706617_onemonth.bigdata"
#WIFI_DATA_FILENAME = "1b768f942564005168dda562defa5b_transition1.bigdata"
CLASS_DATA_FILENAME = "TransportData.txt"

delta = 1
merging_delta = 50
min_time = 20
###
#COLOURS
#TODO: define colors!
my_colors = ['#1f77b4', #blue
             '#aec7e8', 
             '#ff7f0e', #orange
             '#ffbb78', 
             '#2ca02c', #green
             '#98df8a', 
             '#d62728', #red
             '#ff9896', 
             '#9467bd', #purple
             '#c5b0d5', 
             '#8c564b', #brown
             '#c49c94', 
             '#e377c2', #pink
             '#f7b6d2', 
             '#7f7f7f', #gray
             '#c7c7c7', 
             '#bcbd22', #yellow-green
             '#dbdb8d', 
             '#17becf',  #cyan
             '#9edae5']

global colors, hatches
colors=it.cycle('bgrcmyk')# blue, green, red, ...
hatches=it.cycle('/\|-+*')

###############################################################################

def convert_dict_to_date_router_list(data):
    jaccard_input = {}
    for key in data.keys():
        for single_date in data[key]:
            if jaccard_input.has_key(single_date[0]):
                jaccard_input[single_date[0]] = jaccard_input[single_date[0]] + [key]
            else:
                jaccard_input[single_date[0]] = [key]
                
    for key in jaccard_input:
        jaccard_input[key] = list(set(jaccard_input[key]))
        
    return OrderedDict(sorted(jaccard_input.items()))
    
def calculate_jaccard(data, delta): 
    jaccard_input = convert_dict_to_date_router_list(data)
    
    # put the data into 5 min bins
    bin_data = OrderedDict()
    start_date = min(jaccard_input.keys())
    end_date = max(jaccard_input.keys())
    end_bin_date = start_date + datetime.timedelta(minutes = delta)
    idx = 0
    while start_date < end_date:
        while idx < len(jaccard_input.keys()) and jaccard_input.keys()[idx] < end_bin_date:
            if bin_data.has_key(start_date):
                bin_data[start_date] = bin_data[start_date] + jaccard_input[jaccard_input.keys()[idx]]
            else:
                bin_data[start_date] = jaccard_input[jaccard_input.keys()[idx]]
            idx = idx + 1    
        start_date = end_bin_date
        end_bin_date = start_date + datetime.timedelta(minutes = delta)
        
    keys = bin_data.keys()
    jaccard_values = OrderedDict()
    for i in xrange(0, len(keys) - 1):
        jaccard_values[keys[i]] = compute_jaccard_index(set(bin_data[keys[i]]), set(bin_data[keys[i + 1]]))
    return jaccard_values
    
def calculate_jaccard_weighted(data, counted, delta): 
    jaccard_input = convert_dict_to_date_router_list(data)
    
    # put the data into 5 min bins
    bin_data = OrderedDict()
    start_date = min(jaccard_input.keys())
    end_date = max(jaccard_input.keys())
    end_bin_date = start_date + datetime.timedelta(minutes = delta)
    idx = 0
    while start_date < end_date:
        while idx < len(jaccard_input.keys()) and jaccard_input.keys()[idx] < end_bin_date:
            if bin_data.has_key(start_date):
                bin_data[start_date] = bin_data[start_date] + jaccard_input[jaccard_input.keys()[idx]]
            else:
                bin_data[start_date] = jaccard_input[jaccard_input.keys()[idx]]
            idx = idx + 1    
        start_date = end_bin_date
        end_bin_date = start_date + datetime.timedelta(minutes = delta)
        
    keys = bin_data.keys()
    jaccard_values = OrderedDict()
    for i in xrange(0, len(keys) - 1):
        # prepare set I
        set_a = set(bin_data[keys[i]])
        # prepare set I + 1
        set_b = set(bin_data[keys[i + 1]])
        jaccard_values[keys[i]] = compute_jaccard_index_weighted(set_a, set_b, counted)
    return jaccard_values
 
def compute_jaccard_index_weighted(set_1, set_2, counted):   
    intersection = set_1.intersection(set_2)

    intersection_weight = weighted(intersection, counted)
    union_weight = float(weighted(set_1, counted) + weighted(set_2, counted) - intersection_weight)
    if union_weight == 0:
        return 1
    return intersection_weight / union_weight

def weighted(data_set, counted):
    weight = 0
    for item in data_set:
        if counted.has_key(item):
            weight = weight + counted[item]
        else:
            weight = weight + 1
    return weight
    
def calculate_jaccard_difference(data, num):
    data_diff = OrderedDict()
    for idx in xrange(0, len(data.keys()) - num):
        data_diff[data.keys()[idx]] = data[data.keys()[idx]] - data[data.keys()[idx + num]]
    return data_diff
    
def compute_jaccard_index(set_1, set_2):
    n = len(set_1.intersection(set_2))
    if float(len(set_1) + len(set_2) - n) == 0:
        return 0
    return n / float(len(set_1) + len(set_2) - n) 

def plot_router_histogram(hist_data, bin_num):
    # Histogram of access points   
    fig = plt.figure()
    ax = plt.gca() 
    ax.hist(hist_data.values(), bins = bin_num)
     
    plt.title("Histogram of number of occurences of access points")
    plt.xlabel("Number of occurences")
    plt.ylabel("Number of access points")
    plt.savefig("ac_histogram.pdf", bbox_inches='tight')       
    plt.close(fig)
  
def is_transportation_router(data, router):  
    dates = [item[0] for item in data[router]]
    flags = OrderedDict()
    for s_date in dates:
        flags[s_date] = False
        for idx in xrange(0, len(class_data.items()) - 1):
            item = class_data.items()[idx]
            next_item = class_data.items()[idx + 1]
            if item[1] == 'walk':
                if s_date >= item[0] and s_date < next_item[0]:
                    flags[s_date] = True
    return len(flags.values()) == sum(flags.values())
    

def calc_transportation_routers(data):
    trans_routers = []
    for router in data.keys():
        if is_transportation_router(data, router):
            trans_routers.append(router)
    print len(trans_routers)
    
###############################################################################

class_data = OrderedDict() # key: date, value: transport, e.g. walk, stand
with open(CLASS_DATA_FILENAME, 'rb') as f:
    reader = csv.reader(f, delimiter='\t')
    for row in reader:
        curr_date = datetime.datetime.strptime(row[0] + ' ' + row[1], "%d-%m-%Y %H:%M")
        class_data[curr_date] = row[2]

###############################################################################

hist_data = OrderedDict() # key = (bssid, ssid), value: count occurences
with open(WIFI_DATA_FILENAME, 'rb') as f:
    reader = csv.reader(f, delimiter=',')
    for row in reader:
        if row[3] != 'timestamp':
            if(hist_data.has_key((row[1], row[7]))):
                hist_data[(row[1], row[7])] = hist_data[(row[1], row[7])] + 1
            else:
                hist_data[(row[1], row[7])] = 1
                
###############################################################################
                
scan_data_routers_count = OrderedDict() # key: (bssid, ssid), value: list of dates
scan_data_routers_count_1min_bin = OrderedDict() # key: (bssid, ssid), value: list of dates
scan_data_binned = OrderedDict()

#start_date = datetime.datetime(2014, 9, 9, 10, 0)
#end_date = start_date + datetime.timedelta(days = 5)
#start_date = start_date + datetime.timedelta(days = 1)

start_date = datetime.datetime(2014, 9, 21, 0, 0)
end_date = datetime.datetime(2014, 9, 23, 0, 0)

#start_date = datetime.datetime(2014, 9, 25, 0, 0)
#end_date = datetime.datetime(2014, 9, 27, 0, 0)

#start_date = datetime.datetime(2014, 9, 9, 16, 0)
#end_date = datetime.datetime(2014, 9, 11, 16, 0)

#start_date = datetime.datetime(2014, 10, 6, 0, 0)
#end_date = datetime.datetime(2014, 10, 8, 0, 0)

isfirst = True

with open(WIFI_DATA_FILENAME, 'rb') as f:
    reader = csv.reader(f, delimiter=',')
    for row in reader:
        if row[3] != 'timestamp':
            curr_date = datetime.datetime.fromtimestamp(int(row[3]))
            router = (row[1], row[7])
            if curr_date < end_date and curr_date > start_date:
                if scan_data_routers_count.has_key(router):
                    scan_data_routers_count[router] = scan_data_routers_count[router] + [(curr_date, int(row[2]))]
                else:
                    scan_data_routers_count[router] = [(curr_date, int(row[2]))] 
                    
                curr_date = datetime.datetime(curr_date.year, curr_date.month, curr_date.day, curr_date.hour, curr_date.minute)
            
                if scan_data_routers_count_1min_bin.has_key(router):
                    scan_data_routers_count_1min_bin[router] = scan_data_routers_count_1min_bin[router] + [(curr_date, int(row[2]))]
                else:
                    scan_data_routers_count_1min_bin[router] = [(curr_date, int(row[2]))]  
                    
#            if isfirst:
#                first_scan = datetime.datetime.fromtimestamp(int(row[3]))
#                isfirst = False
#                beg_scan = datetime.datetime(curr_date.year, curr_date.month, curr_date.day, curr_date.hour, 0)
#                end_scan = datetime.datetime(curr_date.year, curr_date.month, curr_date.day, curr_date.hour, 0) + datetime.timedelta(minutes = delta)
#             
#            curr_date = datetime.datetime.fromtimestamp(int(row[3]))
#            
#            while curr_date > beg_scan and curr_date < end_scan:
#                if scan_data_binned.has_key(router):
#                    scan_data_binned[router] = scan_data_binned[router] + [(beg_scan)]
#                else:
#                    scan_data_binned[router] = [(beg_scan)]  
                    
            
scan_data_routers_count.pop("ssid", None)
scan_data_routers_count_1min_bin.pop("ssid", None)
#
for key in scan_data_routers_count_1min_bin.keys():
    scan_data_routers_count_1min_bin[key] = list(set(scan_data_routers_count_1min_bin[key]))
    
first_item = scan_data_routers_count_1min_bin.items()[0]
for key in scan_data_routers_count_1min_bin.keys():
    date_list = []
    date_list_orig = sorted(scan_data_routers_count_1min_bin[key])
    idx = 0
    start_bin =  min(date_list_orig)[0]
    start_bin = datetime.datetime(start_bin.year, start_bin.month, start_bin.day, start_bin.hour, 0)
    end_bin = start_bin + datetime.timedelta(minutes = delta)
    while idx < len(date_list_orig):
        if date_list_orig[idx][0] >= start_bin and date_list_orig[idx][0] < end_bin:
            # to jest w binie
            date_list.append((start_bin, 1))
            idx = idx + 1
        else:
            # nowy bin
            start_bin = end_bin
            end_bin = start_bin + datetime.timedelta(minutes = delta)
    scan_data_binned[key] = date_list
    
for key in scan_data_binned.keys():
    scan_data_binned[key] = sorted(list(set(scan_data_binned[key])))
    if key[0] == '00:00:00:00:00:00':
        scan_data_binned.pop(key, None)
        
###############################################################################
#PREPROCESSING

# Take only the once that occur more than 50 times a day
#for key in hist_data.keys():
#    if(hist_data[key] < min_time):
#        hist_data.pop(key, None)    
#
## remove the keys that have less than 50 occurences
#frequent_routers = hist_data.keys()
#scan_data_binned_frequent = scan_data_binned.copy()
#for key in scan_data_binned:
#    if key not in frequent_routers:
#        scan_data_binned.pop(key, None)
        
###############################################################################
#calc_transportation_routers(scan_data_binned_frequent)

###############################################################################
 
dates = []
for item in scan_data_binned.values():
    dates = dates + [i[0] for i in item]
      
#dates = []
#for item in scan_data_routers_count.values():
#    dates = dates + [i[0] for i in item]

main_hours = []
main_mins = []
dates = set(dates)
for curr_date in dates:
    main_hours.append(datetime.datetime(curr_date.year, curr_date.month, curr_date.day, curr_date.hour))
    main_mins.append(datetime.datetime(curr_date.year, curr_date.month, curr_date.day, curr_date.hour, curr_date.minute))
    
main_mins = sorted(list(set(main_mins)))
main_hours = sorted(list(set(main_hours)))
main_hours.pop(0)

###############################################################################
#PLOT DIAGRAMS

def plot_figure(data, from_date, end_date, figname, figsize, ground_truth=False, jaccard_data={}, node_colour={}):
    step = 4
    node_size = 1
    fig = plt.figure(figsize=figsize)
    ax = plt.gca() 
    counter = 0
    for key in data.keys():
        sorted_data = sorted(data[key], reverse = True, key=lambda x: x[0])
        filtered_data = []
        for item in sorted_data:
            if item[0] >= from_date and item[0] <= end_date:
                filtered_data.append(item)
        ssid_name = numpy.ones(len(filtered_data)) * counter 
        
        if len(node_colour.keys()) > 0:
            scatter_colors = []
            for router in filtered_data:
                if not node_colour.has_key(key):
                    scatter_colors.append("#000000")
                else:
                    scatter_colors.append(node_colour[key])
            ax.scatter([i[0] for i in filtered_data], ssid_name, facecolor=scatter_colors, s = node_size, edgecolor= scatter_colors, lw = 0, label='Router presence')
        else:
            ax.scatter([i[0] for i in filtered_data], ssid_name, s = node_size, lw = 0, label='Router presence')
        counter = counter + step    
    plt.gca().xaxis.set_major_formatter(mpld.DateFormatter('%d/%m/%y %H:%M'))
    plt.title("Presence of routers over time")
    plt.ylim(-1, len(data.keys()) * step + 1)
    
    if ground_truth:
        for item in class_data.keys():
            gt_handle, = ax.plot([item, item], [-1, ax.get_ylim()[0] + ax.get_ylim()[1]], 'r', label='Ground truth')
            
    ax.set_ylabel('Access points')
    plt.xlim(from_date, end_date)
    plt.xticks(main_hours, rotation=90)
    plt.tick_params(axis='y', which='major', labelsize=4)
    plt.tick_params(axis='x', which='major', labelsize=8)
    plt.xlabel("Time")
    
    if figsize == (8,12):
        plt.yticks(xrange(0, len(data.keys()) * step, step), data.keys())
    else:
        plt.tick_params(axis='y', which='both', bottom='off', top='off', labelbottom='off')
    plt.tick_params(axis='y', which='both', bottom='off', top='off', labelbottom='off')
    plt.yticks([])
#    plt.yticks(xrange(0, len(data.keys()) * step, step), data.keys())
    plt.tick_params(axis='y', which='major', labelsize=4)
    
    if len(jaccard_data.keys()) > 0:
        ax2 = ax.twinx()
        jac_handle, = ax2.plot(jaccard_data.keys(), jaccard_data.values(), 'g', label='Jaccard index')
        ax2.set_ylabel('Jaccard index')
#        circ1 = Line2D([0], [0], linestyle="none", marker="o", markersize=5, markerfacecolor="b")
#        circ2 = Line2D([], [], color='g', label='Jaccard index')
#        circ3 = Line2D([], [], color='r', label='Ground truth')
        plt.ylim(0, 1.05)
#        plt.legend((circ1, circ2, circ3), ("Router presence", "Jaccard index", "Ground truth"), numpoints=1, loc="best",prop={'size':7})
    
    plt.xlim(from_date, end_date)
    
    plt.savefig(figname, bbox_inches='tight')       
    plt.close(fig)



def plot_figure_1(data, from_date, end_date, figname, figsize, ground_truth=False, jaccard_data={}, node_colour={}):
    step = 1
    if len(jaccard_data) > 0:
        fig, axarr = plt.subplots(2, sharex=True, figsize=figsize)
        ax = axarr[0] 
    else:
        fig, axarr = plt.subplots(1, figsize=figsize)
        ax = axarr
        
    counter = 0
    for key in data.keys():
        sorted_data = sorted(data[key], reverse = True, key=lambda x: x[0])
        filtered_data = []
        for item in sorted_data:
            if item[0] >= from_date and item[0] <= end_date:
                filtered_data.append(item)
       
        if len(node_colour) > 0:
            for item in sorted_data:
                ax.plot([item[0], item[0] + datetime.timedelta(seconds = delta)], [counter, counter], node_colour[key])
        else:
            for item in sorted_data:
                ax.plot([item[0], item[0] + datetime.timedelta(seconds = delta)], [counter, counter], my_colors[0])
        counter = counter + step    
    ax.xaxis.set_major_formatter(mpld.DateFormatter('%d/%m/%y %H:%M'))
    ax.set_ylabel('Access points')
    ax.set_title("Presence of routers over time")
#    plt.setp(ax.get_yticklabels(), visible=False)
    ax.set_ylim([-1, len(data.keys()) * step + 1])
        
#    plt.yticks(xrange(0, len(data.keys()) * step, step), data.keys())
#    plt.tick_params(axis='y', which='major', labelsize=4)
    
    if ground_truth:
        for item in class_data.keys():
            gt_handle, = ax.plot([item, item], [-1, ax.get_ylim()[0] + ax.get_ylim()[1] + 1], 'r', label='Ground truth', linewidth=1)
    if len(jaccard_data) > 0:
        ax = axarr[1]
        for item in class_data.keys():
            gt_handle, = ax.plot([item, item], [-1, ax.get_ylim()[0] + ax.get_ylim()[1] + 1], 'r', label='Ground truth', linewidth=1)
        ax.set_ylim([0, 1])
        
    plt.xlim(from_date, end_date)
    
    delta_date = (end_date - start_date)
    single_delta = divtd(delta_date, 24)
    
    xtick_list = []
    curr_date = start_date
    while curr_date < end_date:
        xtick_list.append(curr_date)
        curr_date = curr_date + single_delta
        
    plt.xticks(xtick_list, rotation=90)
    
    plt.xticks(main_hours, rotation=90)
    plt.tick_params(axis='y', which='major', labelsize=8)
    plt.tick_params(axis='x', which='major', labelsize=8)
    plt.xlabel("Time")
    
#    if figsize == (8,12):
#    plt.yticks(xrange(0, len(data.keys()) * step, step), data.keys())
#    else:
#        plt.tick_params(axis='y', which='both', bottom='off', top='off', labelbottom='off')
    plt.tick_params(axis='y', which='both', bottom='off', top='off', labelbottom='off')
#    plt.yticks(xrange(0, len(data.keys()) * step, step), data.keys())
    plt.tick_params(axis='y', which='major', labelsize=8)
    
    if len(jaccard_data.keys()) > 0:
        jac_handle, = ax.plot(jaccard_data.keys(), jaccard_data.values(), 'g', label='Jaccard index')
        ax.set_ylabel('Jaccard index')
#        circ1 = Line2D([0], [0], linestyle="none", marker="o", markersize=5, markerfacecolor="b")
#        circ2 = Line2D([], [], color='g', label='Jaccard index')
#        circ3 = Line2D([], [], color='r', label='Ground truth')
        plt.ylim(0, 1.05)
#        plt.legend((circ1, circ2, circ3), ("Router presence", "Jaccard index", "Ground truth"), numpoints=1, loc="best",prop={'size':7})
    
    plt.xlim(from_date, end_date)
    
    plt.savefig(figname, bbox_inches='tight')       
    plt.close(fig)

divtdi = datetime.timedelta.__div__
def divtd(td1, td2):
    if isinstance(td2, (int, long)):
        return divtdi(td1, td2)
    us1 = td1.microseconds + 1000000 * (td1.seconds + 86400 * td1.days)
    us2 = td2.microseconds + 1000000 * (td2.seconds + 86400 * td2.days)
    return us1 / us2 # this does integer division, use float(us1) / us2 for fp division
    
def plot_jaccard(jaccard_data, delta, from_date, end_date, figname, ground_truth=False):
    fig = plt.figure()
    ax = plt.gca() 
    plt.gca().xaxis.set_major_formatter(mpld.DateFormatter('%d/%m/%y %H:%M'))
    plt.title("Jaccard index over time computed from " + str(delta) + " min time bins")
    
    ax.plot(jaccard_data.keys(), jaccard_data.values(), 'g')
    
    if ground_truth:
        for item in class_data.keys():
            ax.plot([item, item], [0, 1], 'r')
            
    plt.xlim(from_date, end_date)
    plt.xticks(main_hours, rotation=90)
    plt.tick_params(axis='y', which='major', labelsize=8)
    plt.tick_params(axis='x', which='major', labelsize=8)
    plt.xlabel("Time")
    plt.ylabel("Jaccard index")
    plt.savefig(figname, bbox_inches='tight')       
    plt.close(fig)

def plot_jaccard_diff(jaccard_data, delta, from_date, end_date, figname, ground_truth=False):
    fig = plt.figure()
    ax = plt.gca() 
    plt.gca().xaxis.set_major_formatter(mpld.DateFormatter('%d/%m/%y %H:%M'))
    plt.title("Jaccard index difference over time computed from " + str(delta) + " min time bins")
    
    ax.plot(jaccard_data.keys(), jaccard_data.values(), 'g')
    
    if ground_truth:
        for item in class_data.keys():
            ax.plot([item, item], [-1, 1], 'r')
            
    plt.xlim(from_date, end_date)
    plt.ylim(-1,1)
    plt.xticks(main_hours, rotation=90)
    plt.tick_params(axis='y', which='major', labelsize=8)
    plt.tick_params(axis='x', which='major', labelsize=8)
    plt.xlabel("Time")
    plt.ylabel("Jaccard index difference")
    plt.savefig(figname, bbox_inches='tight')       
    plt.close(fig)
    
def plot_router_network(data, figname, k_clique, min_width=0):
    wifi_data = convert_dict_to_date_router_list(data)
    G = nx.Graph()
#    for router in data.keys():
#        G.add_node(router)
    
    wifi_data_pairs = OrderedDict()
    for routers_set in wifi_data.values():
        for router_a in routers_set:
            for router_b in routers_set:
                if(router_a != router_b):
                    if(wifi_data_pairs.has_key((router_a, router_b))):
                        wifi_data_pairs[(router_a, router_b)] = wifi_data_pairs[(router_a, router_b)] + 1
                    elif wifi_data_pairs.has_key((router_b, router_a)):
                        wifi_data_pairs[(router_b, router_a)] = wifi_data_pairs[(router_b, router_a)] + 1
                    else:
                        wifi_data_pairs[(router_a, router_b)] = 1
     
    for single_date in wifi_data.keys():
        for router_a in wifi_data[single_date]:
            for router_b in wifi_data[single_date]:
                if(router_a != router_b):
                    if(wifi_data_pairs.has_key((router_a, router_b))):
                        w = wifi_data_pairs[(router_a, router_b)]
                    elif(wifi_data_pairs.has_key((router_b, router_a))):
                        w = wifi_data_pairs[(router_b, router_a)]
                    else:
                        w = 0
                    
                    if w > min_width:
                        G.add_edge(router_a, router_b, weight = w)
    
    node_size_list = []    
    for node in G.nodes():
        node_size_list.append(len(data[node]))
    
    fig = plt.figure(figsize=(50,50))    
    pos = nx.spring_layout(G) 
    node_colour = {}
    
    if k_clique > 0:            
        clique_list = list(nx.k_clique_communities(G, k_clique))
        for node in G.nodes():
            for idx in xrange(0, len(clique_list)):
                if node in clique_list[idx]:
                    node_colour[node] = my_colors[idx]
            if not node_colour.has_key(node):
                node_colour[node] = "#000000"
            
        node_colour_list = []
        for node in G.nodes():          
                node_colour_list.append(node_colour[node])
                
        for idx in clique_list:
            nx.draw_networkx_nodes(G, pos, G.nodes(), node_color=node_colour_list, node_size = node_size_list)
    else: #louvain
        partition = community.best_partition(G)
        pos = nx.spring_layout(G)
        count = 0
        for com in set(partition.values()) :

            list_nodes = [nodes for nodes in partition.keys()
                                        if partition[nodes] == com]
            nx.draw_networkx_nodes(G, pos, list_nodes, node_size = node_size_list,
                                        node_color = my_colors[count])
            for node in list_nodes:
                node_colour[node] = my_colors[count]
            count = count + 1
          
    nx.draw_networkx_edges(G, pos, alpha = 0.2)
    plt.axis('off')
    plt.savefig(figname, bbox_inches='tight')
    plt.show()
    plt.close(fig)        
    return node_colour

def most_common(lst):
    return max(set(lst), key=lst.count)
    
def plot_router_network_directed(data, figname, ground_truth):
    wifi_data = convert_dict_to_date_router_list(data)
    gt_classes = {}
    gt = ground_truth.keys()
    stop_location_id = 0
    for idx in xrange(0, len(gt) - 1):
        if((ground_truth[gt[idx]], ground_truth[gt[idx + 1]]) == ('walk', 'stand')): # travel
            gt_classes[(gt[idx], gt[idx + 1])] = -1
        else: # stop location
            gt_classes[(gt[idx], gt[idx + 1])] = 1
            stop_location_id = stop_location_id + 1
    
    router_class = {}
    for router in data.keys():
        date_class = []
        for s_date in data[router]:
            for gt_class in gt_classes.keys():
                if s_date[0] > gt_class[0] and s_date[0] <= gt_class[1]:
                    date_class.append(gt_classes[gt_class])
        if len(date_class) == 0:
            router_class[router] = 1
        else:
            if most_common(date_class) == 4:
                router_class[router] = 0
            elif most_common(date_class) == 3:
                router_class[router] = 1
            else:
                router_class[router] = most_common(date_class)
    G = nx.MultiDiGraph()
    
    for date_idx in xrange(0, len(wifi_data.keys()) - 1):
        curr_date = wifi_data.keys()[date_idx]
        next_date = wifi_data.keys()[date_idx + 1]
        # for curr_date add all routers
        
        for router in wifi_data[curr_date]:
            if(not G.has_node(router)):
                G.add_node(router, weight = router_class[router], size = len(data[router]))
        
        #for next_date add the loops
        for router in wifi_data[next_date]:
            if(not G.has_node(router)):
                G.add_node(router, weight = router_class[router], size = len(data[router]))
            for router_prev in wifi_data[curr_date]:
                if(not G.has_edge(router_prev, router)):
                    # add new edge with weight 1
                    G.add_edge(router_prev, router, weight = 1)
                else:
                    weight = G.get_edge_data(router_prev, router)[0]['weight']
                    G.remove_edge(router_prev, router)
                    G.add_edge(router_prev, router, weight = weight + 1)
                
    #edge_labels=dict([((u,v,),d['weight']) for u,v,d in G.edges(data=True)])
    nx.write_gexf(G, "graph.gexf")
#    pos = nx.spring_layout(G)
#    fig = plt.figure()
#    ax = plt.gca()
#    nx.draw_networkx_nodes(G, pos, node_size = 5)
#        
#    nx.draw_networkx_edges(G, pos, alpha = 0.2, arrows = True)
#    #nx.draw_networkx_edge_labels(G,pos,edge_labels=edge_labels)
#    #nx.draw_networkx_labels(G, pos)
#    plt.axis('off')
#    plt.show()
#    plt.savefig(figname) 
#    plt.close()
    
# Plot histogram of occurences#################################################
#plot_router_histogram(hist_data, 200)
#   
## Plot raw data - scatter plot#################################################
#plot_figure(scan_data_routers_count, min(dates), max(dates), "data_raw_big.pdf", (8,12))
#plot_figure(scan_data_routers_count, min(dates), max(dates), "data_raw_small.pdf", (8,6))
#plot_figure(scan_data_routers_count, min(dates), max(dates), "data_raw_gt_big.pdf", (8,12), True)
#plot_figure(scan_data_routers_count, min(dates), max(dates), "data_raw_gt_small.pdf", (8,6), True)
#
## Plot binned data 1 min - scatter plot########################################
#plot_figure(scan_data_routers_count_1min_bin, min(dates), max(dates), "data_bin_1min_big.pdf", (8,12))
#plot_figure(scan_data_routers_count_1min_bin, min(dates), max(dates), "data_bin_1min_small.pdf", (8,6))
#plot_figure(scan_data_routers_count_1min_bin, min(dates), max(dates), "data_bin_1min_gt_big.pdf", (8,12), True)
#plot_figure(scan_data_routers_count_1min_bin, min(dates), max(dates), "data_bin_1min_gt_small.pdf", (8,6), True)
### Calculate and plot Jaccard-index for 1-min binned data
### just Jaccard
#delta = 1 # min bins
#num = 3 # how many jaccard measurement have to be taken into account of difference
#jaccard_data = calculate_jaccard(scan_data_routers_count_1min_bin, delta)
#jaccard_diff_data = calculate_jaccard_difference(jaccard_data, num)
#plot_jaccard(jaccard_data, delta, min(dates), max(dates), "jaccard_" + str(delta) + "min_bins.pdf")
#plot_jaccard_diff(jaccard_diff_data, delta, min(dates), max(dates), "jaccard_diff_" + str(delta) + "min_bins" + str(num) + ".pdf", True)
#plot_jaccard(jaccard_data, delta, min(dates), max(dates), "jaccard_" + str(delta) + "min_bins_gt.pdf", True)
## Jaccard on scatter plot with GT
#plot_figure(scan_data_routers_count_1min_bin, min(dates), max(dates), "data_bin_1min_gt_small_jaccard.pdf", (8,6), True, jaccard_data)
#
##Plot only most frequent routers 1 min binned##################################
#plot_figure(scan_data_routers_count_1min_bin_frequent, min(dates), max(dates), "data_bin_1min_frequent_big.pdf", (8,12))
#plot_figure(scan_data_routers_count_1min_bin_frequent, min(dates), max(dates), "data_bin_1min_frequent_small.pdf", (8,6))
#plot_figure(scan_data_routers_count_1min_bin_frequent, min(dates), max(dates), "data_bin_1min_frequent_gt_big.pdf", (8,12), True)
#plot_figure(scan_data_routers_count_1min_bin_frequent, min(dates), max(dates), "data_bin_1min_frequent_gt_small.pdf", (8,6), True)
##Calculate and plot Jacccard-index for most frequent routers
#delta = 5 # min bins
#jaccard_data2 = calculate_jaccard(scan_data_routers_count_1min_bin_frequent, delta)
#plot_jaccard(jaccard_data2, delta, min(dates), max(dates), "jaccard_" + str(delta) + "min_bins_frequent.pdf")
#plot_jaccard(jaccard_data2, delta, min(dates), max(dates), "jaccard_" + str(delta) + "min_bins_gt_frequent.pdf", True)
### Jaccard on scatter plot with GT
#plot_figure(scan_data_routers_count_1min_bin_frequent, min(dates), max(dates), "data_bin_1min_gt_small_jaccard_frequent.pdf", (8,6), True, jaccard_data2)

#TODO: Plot only routers occuring in two 5min bins (?)

#TODO: Imputation

#TODO: Plot multi-mode trips

#TODO: Plot single-mode trips

# Building router network
#k_clique = 3
#min_width = 5
#node_colour = plot_router_network(scan_data_routers_count_1min_bin, 'routers_network_k_' + str(k_clique) + 'min_width_' + str(min_width) + '.pdf', k_clique, min_width)
#plot_figure(scan_data_routers_count_1min_bin, min(dates), max(dates), "data_bin_1min_gt_small_k_clique_" + str(k_clique) + 'min_width_' + str(min_width) + '.pdf', (8,6), True, {}, node_colour)

# Louvain method
#node_colour = plot_router_network(scan_data_routers_count_1min_bin, 'routers_network_louvain.pdf', 0)
#plot_figure(scan_data_routers_count_1min_bin, min(dates), max(dates), 'data_bin_1min_gt_small_louvain.pdf', (8,6), True, {}, node_colour)
#node_colour = plot_router_network(scan_data_routers_count_1min_bin_frequent, 'routers_frequent_network_louvain.pdf', 0)
#plot_figure(scan_data_routers_count_1min_bin_frequent, min(dates), max(dates), 'data_bin_1min_frequent_gt_small_louvain.pdf', (8,6), True, {}, node_colour)

#Directed graph of routers
#plot_router_network_directed(scan_data_routers_count_1min_bin, "routers_directed_graph.pdf", class_data)

def merge_routers(data, perc):
    print len(data.keys())
    
#    for router in data.items():
#        print router
        
    router_set_desc = OrderedDict(sorted(data.items(), reverse = True, key=lambda x: len(x[1]))).keys()
    router_set_asc = OrderedDict(sorted(data.items(), reverse = False, key=lambda x: len(x[1]))).keys()
    for router_a in router_set_asc:
        for router_b in router_set_desc:
            if(data.has_key(router_b) and data.has_key(router_a) and router_b != router_a):
                data_router_a = set([item[0] for item in data[router_a]])
                data_router_b = set([item[0] for item in data[router_b]])
#                print data_router_a
#                print data_router_b
                
                counter = 0
                for item in data_router_b:
                    if item in data_router_a:
                        counter = counter + 1
                if(float(counter) / len(data_router_b) >= float(perc) / 100):
                    data.pop(router_b, None)
#                    print "Tak"
#                print "Nie"
#                elif counter > 0:
#                    print counter, len(data_router_b), len(data_router_a)
                    
                #                if(set([item[0] for item in data[router_b]]).issubset(set([item[0] for item in data[router_a]]))):
#                    data.pop(router_b, None)
    print len(data.keys())
    return data

def merge_routers_weighted(data, perc):
    print len(data.keys())
    
#    for router in data.items():
#        print router
    res = OrderedDict()
    
    router_set_desc = OrderedDict(sorted(data.items(), reverse = True, key=lambda x: len(x[1]))).keys()
    router_set_asc = OrderedDict(sorted(data.items(), reverse = False, key=lambda x: len(x[1]))).keys()
    for router_a in router_set_asc:
        for router_b in router_set_desc:
            if(data.has_key(router_b) and data.has_key(router_a) and router_b != router_a):
                data_router_a = set([item[0] for item in data[router_a]])
                data_router_b = set([item[0] for item in data[router_b]])
                
                counter = 0
                for item in data_router_b:
                    if item in data_router_a:
                        counter = counter + 1
                if(float(counter) / len(data_router_b) >= float(perc) / 100):
                    num = (len(data[router_a]) + len(data[router_b])) / (len(data[router_a]))
                    data.pop(router_b, None)
                    if res.has_key(router_a):
                        res[router_a] = res[router_a] + num
                    else:
                        res[router_a] = num
#                    print "Tak"
#                print "Nie"
#                elif counter > 0:
#                    print counter, len(data_router_b), len(data_router_a)
                    
                #                if(set([item[0] for item in data[router_b]]).issubset(set([item[0] for item in data[router_a]]))):
#                    data.pop(router_b, None)
    print len(data.keys())
    return [data, res]

def merge_routers_weighted_in_parts(data, perc, min_date, max_date):
    print len(data.keys())

    time_delta = datetime.timedelta(hours = merging_delta)
    start_date = datetime.datetime(min_date.year, min_date.month, min_date.day, min_date.hour, 0)
    end_date = start_date + time_delta
    curr_date = start_date
    
    merged_routers_all = OrderedDict()
    merge_counted_all = OrderedDict()
    while curr_date < max_date:
        small_data = OrderedDict()
        for router in data.keys():
            dates = []
            for s_date in data[router]:
                if s_date[0] < end_date and s_date[0] >= curr_date:
                    dates.append(s_date)
            if len(dates) > 0:
                small_data[router] = dates
        curr_date = end_date
        end_date = curr_date + time_delta
        # calc weighted jaccard
        [merged_routers, merge_counted] =  merge_routers_weighted(small_data, perc)
        # merge results
        for router in merged_routers:
            if merged_routers_all.has_key(router):
                merged_routers_all[router] = merged_routers_all[router] + merged_routers[router]
            else:
                merged_routers_all[router] = merged_routers[router]
        for router in merge_counted:
            if merge_counted_all.has_key(router):
                merge_counted_all[router] = merge_counted_all[router] + merge_counted[router]
            else:
                merge_counted_all[router] = merge_counted[router]

    return [merged_routers_all, merge_counted_all]
    
def plot_merging(data, main_router, to_remove, from_date, end_date, figname, figsize, ground_truth=False, jaccard_data={}, node_colour={}):
    step = 4
    node_size = 1
    fig = plt.figure(figsize=figsize)
    ax = plt.gca() 
    counter = 0
    for key in data.keys():
        sorted_data = sorted(data[key], reverse = True, key=lambda x: x[0])
        filtered_data = []
        for item in sorted_data:
            if item[0] >= from_date and item[0] <= end_date:
                filtered_data.append(item)
        ssid_name = numpy.ones(len(filtered_data)) * counter 
        
        for router in filtered_data:
            if key == main_router:
                curr_color = my_colors[0]
#                plt.plot([key[0], key[0] + datetime.timedelta(minutes = delta)], [counter, counter], curr_color)
            elif key in to_remove:
                curr_color = my_colors[2]
#                plt.plot([key[0], key[0] + datetime.timedelta(minutes = delta)], [counter, counter], curr_color)
            else:
                curr_color = my_colors[5]
#                plt.plot([key[0], key[0] + datetime.timedelta(minutes = delta)], [counter, counter], curr_color)
                   
        for item in filtered_data:
            plt.plot([item[0], item[0] + datetime.timedelta(minutes = delta)], [counter, counter], 'b')
        ax.scatter([i[0] for i in filtered_data], ssid_name, facecolor=curr_color, s = node_size, edgecolor= curr_color, lw = 0, label='Router presence')
        #print scatter_colors
        counter = counter + step    
    plt.gca().xaxis.set_major_formatter(mpld.DateFormatter('%d/%m/%y %H:%M'))
    plt.title("Presence of routers over time")
    plt.ylim(-1, len(data.keys()) * step + 1)
    
    if ground_truth:
        for item in class_data.keys():
            gt_handle, = ax.plot([item, item], [-1, ax.get_ylim()[0] + ax.get_ylim()[1]], 'r', label='Ground truth')
            
    ax.set_ylabel('Access points')
    plt.xlim(from_date, end_date)
    xtick_list = []
    curr_date = start_date
    while curr_date < end_date:
        xtick_list.append(curr_date)
        curr_date = curr_date + datetime.timedelta(minutes = 60)
        
    plt.xticks(xtick_list, rotation=90)
    plt.tick_params(axis='x', which='major', labelsize=8)
    plt.xlabel("Time")
    
    if figsize == (8,12):
        plt.yticks(xrange(0, len(data.keys()) * step, step), data.keys())
    else:
        plt.tick_params(axis='y', which='both', bottom='off', top='off', labelbottom='off')
    plt.yticks(xrange(0, len(data.keys()) * step, step), data.keys())
    plt.tick_params(axis='y', which='major', labelsize=1)
    
    if len(jaccard_data.keys()) > 0:
        ax2 = ax.twinx()
        jac_handle, = ax2.plot(jaccard_data.keys(), jaccard_data.values(), 'g', label='Jaccard index')
        ax2.set_ylabel('Jaccard index')
#        circ1 = Line2D([0], [0], linestyle="none", marker="o", markersize=5, markerfacecolor="b")
#        circ2 = Line2D([], [], color='g', label='Jaccard index')
#        circ3 = Line2D([], [], color='r', label='Ground truth')
        plt.ylim(0, 1.05)
#        plt.legend((circ1, circ2, circ3), ("Router presence", "Jaccard index", "Ground truth"), numpoints=1, loc="best",prop={'size':7})
    
    plt.xlim(from_date, end_date)
    
    plt.savefig(figname, bbox_inches='tight')       
    plt.close(fig)
    
def merge_routers_two(data, perc):
    print len(data.keys())
    router_set_desc = OrderedDict(sorted(data.items(), reverse = True, key=lambda x: len(x[1]))).keys()
    counter = 0
    for router_a in router_set_desc:
        if(data.has_key(router_a)):

            to_remove = []
            for router_b in router_set_desc:
                if(data.has_key(router_b) and router_b != router_a):
                    if(set([item[0] for item in data[router_b]]).issubset(set([item[0] for item in data[router_a]]))):
                        to_remove.append(router_b)
                        
            # chage data to routers data
            if len(to_remove) > 0:
                plot_merging(data, router_a, to_remove, min(main_mins), max(main_mins), "merge_iteration_" + str(counter) + ".pdf", (8,12), True)
                for item in to_remove:
                    data.pop(item, None)
                counter = counter + 1
                print counter
        
#                data_router_a = set([item[0] for item in data[router_a]])
#                data_router_b = set([item[0] for item in data[router_b]])
#                counter = 0
#                for item in data_router_b:
#                    if item in data_router_a:
#                        counter = counter + 1
#                if(float(counter) / len(data_router_b) >= float(perc) / 100):
#                    data.pop(router_b, None)
                #elif counter > 0:
                #    print counter, len(data_router_b), len(data_router_a)
                    
                #                if(set([item[0] for item in data[router_b]]).issubset(set([item[0] for item in data[router_a]]))):
#                    data.pop(router_b, None)
    print len(data.keys())
    return data
            
#Merging of routers + JACCARD
# If router 1 always occurs when router 2 is present (= set of timebins when 1 is present is a subset of timebins when 2 is ready), then you can merge router 2 into router 1.
#perc = 100
#merged_routers = merge_routers_two(scan_data_routers_count_1min_bin, perc)
##Calculate and plot Jacccard-index for most frequent routers
#delta = 5 # min bins
#jaccard_data2 = calculate_jaccard(merged_routers, delta)
#plot_jaccard(jaccard_data2, delta, min(dates), max(dates), "jaccard_" + str(delta) + "min_bins_merged_" + datetime.datetime.strftime(start_date, "%Y-%m-%d") + ".pdf")
#plot_jaccard(jaccard_data2, delta, min(dates), max(dates), "jaccard_" + str(delta) + "min_bins_gt_merged_" + datetime.datetime.strftime(start_date, "%Y-%m-%d") + ".pdf", True)
### Jaccard on scatter plot with GT
#plot_figure(merged_routers, min(dates), max(dates), "data_bin_1min_gt_small_jaccard_merged_" + datetime.datetime.strftime(start_date, "%Y-%m-%d") + ".pdf", (8,6), True, jaccard_data2)

#10-09-2014	16:23	stand
#11-09-2014	07:39	walk

#Directed graph of routers
#plot_router_network_directed(merged_routers, "routers_directed_graph.pdf", class_data)

#scan_small = OrderedDict()
#for router in scan_data_routers_count_1min_bin:
#    for s_date in scan_data_routers_count_1min_bin[router]:
#        if s_date[0] >= datetime.datetime(2014,9,10,16,23) and s_date[0] <= datetime.datetime(2014,9,11,7,39):
#            if scan_small.has_key(router):
#                scan_small[router] = scan_small[router] + [s_date[0]]
#            else:
#                scan_small[router] = [s_date[0]]
#                
#count_dict = {}
#for router in scan_small:
#    count_dict[router] = len(scan_small[router])
#counted_dict_sort = sorted(count_dict.items(), reverse = True, key=lambda x: x[1])
#plt.bar(xrange(0, len(count_dict.keys())), count_dict.values())
#plt.show()

#Merging of routers + JACCARD
# If router 1 always occurs when router 2 is present (= set of timebins when 1 is present is a subset of timebins when 2 is ready), then you can merge router 2 into router 1.
#Xperc = 100
#Xnum = 3
#X[merged_routers, merge_counted] = merge_routers_weighted_in_parts(scan_data_binned, perc, min(dates), max(dates))
#Calculate and plot Jacccard-index for most frequent routers
#Xjaccard_data2 = calculate_jaccard_weighted(merged_routers, merge_counted, delta)
#plot_jaccard(jaccard_data2, delta, min(dates), max(dates), "jaccard_" + str(delta) + "min_bins_merged_" + datetime.datetime.strftime(start_date, "%Y-%m-%d") + ".pdf")
#plot_jaccard(jaccard_data2, delta, min(dates), max(dates), "jaccard_" + str(delta) + "min_bins_gt_merged_" + datetime.datetime.strftime(start_date, "%Y-%m-%d") + ".pdf", True)
## Jaccard on scatter plot with GT
#Xplot_figure_1(merged_routers, min(dates), max(dates), "data_bin_" + str(delta) + "min_gt_small_jaccard_merged_" + datetime.datetime.strftime(start_date, "%Y-%m-%d") + ".pdf", (8,6), True, jaccard_data2)
#plot_figure_1(merged_routers, min(dates), max(dates), "data_bin_" + str(delta) + "min_gt_small_jaccard_merged_" + datetime.datetime.strftime(start_date, "%Y-%m-%d") + "_2.pdf", (8,6), True)
#for i in xrange(1, 4):
#    jaccard_diff_data = calculate_jaccard_difference(jaccard_data2, i)
#    plot_jaccard_diff(jaccard_diff_data, delta, min(dates), max(dates), "jaccard_diff_" + str(delta) + "min_bins" + str(i) + ".pdf", True)

def take_part_of_data(data, start_date, end_date):
    res = OrderedDict()
    for router in data.keys():
        date_list = []
        for s_date in data[router]:
            if s_date[0] < end_date and s_date[0] >= start_date:
                date_list.append(s_date)
        if len(date_list) > 0:
            res[router] = date_list
    return res
    
#### Reading samples of running / cycling etc and plotting them
#CLASS_DATA_FILENAME = "TransportDataMore.txt"
#WIFI_DATA_FILENAME = "1b768f942564005168dda562defa5b_1412706617_onemonth.bigdata"
#
#class_data = OrderedDict() # key: date, value: transport, e.g. walk, stand
#with open(CLASS_DATA_FILENAME, 'rb') as f:
#    reader = csv.reader(f, delimiter='\t')
#    for row in reader:
#        curr_date = datetime.datetime.strptime(row[0] + ' ' + row[1], "%d-%m-%Y %H:%M")
#        class_data[curr_date] = row[2]
#
#modes = ['walk', 'bike', 'train', 'bus', 'run']   
#for mode in modes:     
#    for idx in xrange(0, len(class_data.items()) - 1):
#        item = class_data.items()[idx]
#        next_item = class_data.items()[idx + 1]
#        if item[1] == mode:
#            start_date = item[0]
#            end_date = next_item[0]
#            perc = 100
#            num = 3
#            scan_data_part = take_part_of_data(scan_data_routers_count, start_date, end_date)
#            if len(scan_data_part.items()) > 0:
##                [merged_routers, merge_counted] = merge_routers_weighted_in_parts(scan_data_binned_part, perc, start_date, end_date)
#                #Calculate and plot Jacccard-index for most frequent routers
##                jaccard_data2 = calculate_jaccard_weighted(merged_routers, merge_counted, delta)
#                plot_figure_1(scan_data_part, start_date - datetime.timedelta(minutes = 1), end_date + datetime.timedelta(minutes = 1), mode + datetime.datetime.strftime(start_date, "%Y-%m-%d-%H-%M") + ".pdf", (8,6))
#            else:
#                print start_date, mode
    
#plot_figure_1(scan_data_binned_frequent, min(dates), max(dates), "data_bin_" + str(delta) + "min_least_frequent" + datetime.datetime.strftime(start_date, "%Y-%m-%d") + ".pdf", (8,6), True)

# scan_data_binned to vectors 0 0 0 1 0 1 1 0

def to_vector_data(data):
    vector_data = []
    for router in data:
        s_vector = []
        s_date = min(dates)
        e_date = max(dates)
        while s_date <= e_date:
            if s_date in [item[0] for item in data[router]]:
                s_vector.append(1)
            else:
                s_vector.append(0)
            s_date = s_date + datetime.timedelta(minutes = 1)
        vector_data.append(s_vector)
    return np.array(vector_data) 
    
vector_data = to_vector_data(scan_data_binned_frequent)

n_comp = 20
#n_comp = 3
#pca = PCA(n_components=n_comp)
#X_r = pca.fit(vector_data).transform(vector_data)
#
## Percentage of variance explained for each components
#print('explained variance ratio: \n%s'
#      % str(pca.explained_variance_ratio_))

#GMM
#from sklearn import mixture
#g = mixture.GMM(n_components=3)
#g_res = g.fit(X_r)
#Z = g.predict(X_r)

#x = [item[0] for item in X_r]
#y = [item[1] for item in X_r]
##
#kmeans = KMeans(init='k-means++', n_clusters=3, n_init=10)
#kmeans.fit(X_r)
##
#Z = kmeans.predict(X_r)
#
#plt.figure()
#
#colour_list = OrderedDict()
#for idx in xrange(0, len(Z)):
#    colour_list[scan_data_binned_frequent.keys()[idx]] = my_colors[Z[idx]]
#        
#plt.scatter(x, y, facecolor=colour_list.values(), s = 30)
#plt.show()
#plt.savefig("pca.pdf")
#plt.close
#
#plt.figure()
#plt.bar(xrange(0, len(pca.explained_variance_ratio_)), pca.explained_variance_ratio_)        
#plt.show()
#plt.close()
    
    
#for router in scan_data_binned_frequent:
#    colour_list[router] = my_colors[1]
#
#for router in scan_data_binned:
#    if not colour_list.has_key(router):
#        colour_list[router] = my_colors[0]
        
#plot_figure_1(scan_data_binned_frequent, min(dates), max(dates), "data_bin_" + str(delta) + "min_least_frequent" + datetime.datetime.strftime(start_date, "%Y-%m-%d") + "PCA.pdf", (8,6), True, {}, colour_list)
 

###############################################################################
#PCA STEP BY STEP     http://sebastianraschka.com/Articles/2014_pca_step_by_step.html 
#n_router = len(vector_data)
### vector_data - [date1, date2, date3 ...]
##
#vector_data = np.array(vector_data)
#mean_arr = []
#for col_idx in xrange(0, len(vector_data[0])):
#    mean_arr.append(np.mean(vector_data[:, col_idx]))
#mean_arr = np.array(mean_arr)
#
#scatter_matrix = np.zeros((n_router,n_router))
#for i in range(vector_data.shape[1]):
#    scatter_matrix += (vector_data[:,i].reshape(n_router,1)\
#        - mean_arr).dot((vector_data[:,i].reshape(n_router,1) - mean_arr).T)
#print('Scatter Matrix:\n', scatter_matrix)
#
#cov_mat = np.cov([vector_data[col_idx, :] for col_idx in xrange(0, len(vector_data))])
###cov_mat = np.cov([vector_data[0,:],vector_data[1,:],vector_data[2,:]])
#print('Covariance Matrix:\n', cov_mat)
#
##vector_data = np.array(vector_data)
##cov_mat = np.cov([vector_data[col_idx, :] for col_idx in xrange(0, len(vector_data))])
#
##plt.figure()
##mult_product = np.zeros((len(vector_data), len(vector_data)))
##
##for i in xrange(0, len(vector_data)):
##    for j in xrange(0, len(vector_data)):
###        print sum(vector_data[i] * vector_data[j])
##        mult_product[i,j] = sum(vector_data[i] * vector_data[j]) / float(sum(vector_data[i]))
#        
## eigenvectors and eigenvalues for the from the scatter matrix
#eig_val_sc, eig_vec_sc = np.linalg.eig(scatter_matrix)
#
## eigenvectors and eigenvalues for the from the covariance matrix
#eig_val_cov, eig_vec_cov = np.linalg.eig(cov_mat)
#
#for i in range(len(eig_vec_cov)):
#    eigvec_sc = eig_vec_sc[:,i].reshape(1,n_router).T
#    eigvec_cov = eig_vec_cov[:,i].reshape(1,n_router).T
##    assert eigvec_sc.all() == eigvec_cov.all(), 'Eigenvectors are not identical'
##
##    print('Eigenvector {}: \n{}'.format(i+1, eigvec_sc))
##    print('Eigenvalue {} from scatter matrix: {}'.format(i+1, eig_val_sc[i]))
##    print('Eigenvalue {} from covariance matrix: {}'.format(i+1, eig_val_cov[i]))
##    print('Scaling factor: ', eig_val_sc[i]/eig_val_cov[i])
##print(40 * '-')
#    
##for i in range(len(eig_val_sc)):
##    eigv = eig_vec_sc[:,i].reshape(1,n_router).T
##    np.testing.assert_array_almost_equal(scatter_matrix.dot(eigv),\
##            eig_val_sc[i] * eigv, decimal=6,\
##            err_msg='', verbose=True)
##
##for ev in eig_vec_sc:
##    np.testing.assert_array_almost_equal(1.0, np.linalg.norm(ev))
#    #instead of 'assert' because of rounding errors
###    
###    
#### Make a list of (eigenvalue, eigenvector) tuples
#eig_pairs = [(np.abs(eig_val_sc[i]), eig_vec_sc[:,i]) for i in range(len(eig_val_sc))]
###
#### Sort the (eigenvalue, eigenvector) tuples from high to low
#eig_pairs.sort()
#eig_pairs.reverse()
###
#### Visually confirm that the list is correctly sorted by decreasing eigenvalues
#for i in eig_pairs:
#    print(i[0])
###    
#matrix_w = np.hstack((eig_pairs[0][1].reshape(n_router,1), eig_pairs[1][1].reshape(n_router,1)))
#print('Matrix W:\n', matrix_w)
###
#transformed = matrix_w.T.dot(vector_data)
#
#from matplotlib import pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D
#from mpl_toolkits.mplot3d import proj3d

#GMM
#from sklearn import mixture
#g = mixture.GMM(n_components=2)
#g_res = g.fit(transformed)
#Z = g.predict(transformed)

#x = [item[0] for item in transformed]
#y = [item[1] for item in transformed]
#
#kmeans = KMeans(init='k-means++', n_clusters=3, n_init=10)
#kmeans.fit(transformed)
#
#Z = kmeans.predict(transformed)
#
#plt.figure()
###
#colour_list = OrderedDict()
#for idx in xrange(0, len(Z)):
#    colour_list[scan_data_binned_frequent.keys()[idx]] = my_colors[Z[idx]]
#
#fig = plt.figure()
#ax = fig.add_subplot(111, projection='3d')
#ax.scatter(transformed[0, :], transformed[1, :],\
#    transformed[2, :], c=colour_list.values(), s=50)
#       
#     
#plt.scatter(x, y, facecolor=colour_list.values(), s = 30)
#plt.show()
#plt.savefig("pca_1.pdf")
#plt.close
#
#plt.figure()
#plt.bar(xrange(0, len(pca.explained_variance_ratio_)), pca.explained_variance_ratio_)        
#plt.show()
#plt.close()
    
#for router in scan_data_binned_frequent:
#    colour_list[router] = my_colors[1]
#
#for router in scan_data_binned:
#    if not colour_list.has_key(router):
#        colour_list[router] = my_colors[0]
        
#plot_figure_1(scan_data_binned_frequent, min(dates), max(dates), "data_bin_" + str(delta) + "min_least_frequent" + datetime.datetime.strftime(start_date, "%Y-%m-%d") + "PCA.pdf", (8,6), True, {}, colour_list)
 
 
 
#plt.plot(transformed[0,:], transformed[1,:],\
#     'o', markersize=7, color='blue', alpha=0.5)
#plt.plot(transformed[0,20:40], transformed[1,20:40],
#     '^', markersize=7, color='red', alpha=0.5, label='class2')
#plt.xlim([-4,4])
#plt.ylim([-4,4])
#plt.xlabel('x_values')
#plt.ylabel('y_values')
#plt.legend()
#plt.title('Transformed samples with class labels')
##
#plt.show()

###############################################################################
#
#from sklearn.decomposition import PCA as sklearnPCA
#
#sklearn_pca = sklearnPCA(n_components=3)
#sklearn_transf = sklearn_pca.fit_transform(vector_data)
#
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import proj3d
#
##kmeans = KMeans(init='k-means++', n_clusters=3, n_init=10)
##kmeans.fit(sklearn_transf)
##
##Z = kmeans.predict(sklearn_transf)
#
#colour_list = OrderedDict()
#for idx in xrange(0, len(Z)):
#    colour_list[scan_data_binned_frequent.keys()[idx]] = my_colors[Z[idx] * 2 + 1]
#
#fig = plt.figure(figsize=(8,8))
#ax = fig.add_subplot(111, projection='3d')
#ax.scatter(sklearn_transf[:, 0], sklearn_transf[:, 1],\
#    sklearn_transf[:, 2], c=colour_list.values(), s=50)
#
#plt.show()

#plot_figure_1(scan_data_binned_frequent, min(dates), max(dates), "data_bin_" + str(delta) + "min_GMM" + datetime.datetime.strftime(start_date, "%Y-%m-%d") + "PCA.pdf", (8,6), True, {}, colour_list)
 
 
####################################################
#clustering kmeans step by step
#def cluster_points(X, mu):
#    clusters  = {}
#    for x in X:
#        bestmukey = min([(i[0], np.linalg.norm(x-mu[i[0]])) \
#                    for i in enumerate(mu)], key=lambda t:t[1])[0]
#        try:
#            clusters[bestmukey].append(x)
#        except KeyError:
#            clusters[bestmukey] = [x]
#    return clusters
#    
#def reevaluate_centers(mu, clusters):
#    newmu = []
#    keys = sorted(clusters.keys())
#    for k in keys:
#        newmu.append(np.mean(clusters[k], axis = 0))
#    return newmu  
#
#def has_converged(mu, oldmu):
#    return (set([tuple(a) for a in mu]) == set([tuple(a) for a in oldmu]))    
#    
#def find_centers(X, K):
#    # Initialize to K random centers
#    oldmu = random.sample(X, K)
#    mu = random.sample(X, K)
#    while not has_converged(mu, oldmu):
#        oldmu = mu
#        # Assign all points in X to clusters
#        clusters = cluster_points(X, mu)
#        # Reevaluate centers
#        mu = reevaluate_centers(oldmu, clusters)
#    return(mu, clusters)
#    
#clusters =  cluster_points(sklearn_transf, xrange(0, 20))
#
#colour_list = OrderedDict()
#for idx in xrange(0, len(clusters)):
#    for cluster in clusters.keys():
##        print clusters[cluster]
#        for s_cluster in clusters[cluster]:
#            if all(item in s_cluster for item in sklearn_transf[idx]):
#                colour_list[scan_data_binned_frequent.keys()[idx]] = my_colors[cluster]
#                break;
#
#fig = plt.figure(figsize=(8,8))
#ax = fig.add_subplot(111)
#ax.scatter(sklearn_transf[:, 0], sklearn_transf[:, 1], s=50)
#
#plt.show()
 
 #####
 
from numpy import corrcoef, sum, log, arange
from numpy.random import rand
from pylab import pcolor, show, colorbar, xticks, yticks, xlim, ylim, title, savefig, tick_params

vector_data = np.array(vector_data)  
cov_mat = np.cov([vector_data[col_idx, :] for col_idx in xrange(0, len(vector_data))])

#plt.figure()
#R = corrcoef(vector_data)
#pcolor(R)
#pcolor(cov_mat)
#colorbar()
#yticks(arange(0 + 0.5,len(cov_mat) + 0.5),range(0,len(cov_mat)))
#xticks(arange(0 + 0.5,len(cov_mat) + 0.5),range(0,len(cov_mat)), rotation=90)
#ylim(0, len(cov_mat))
#xlim(0, len(cov_mat))
#title('Covariance matrix')
#tick_params(axis='y', which='major', labelsize=4)
#tick_params(axis='x', which='major', labelsize=4)
#savefig('covariance.pdf')
#show()

#minimum = R.min()
#min_idx = np.where(R==R.min())

#list_max_val = {}
#for row_idx in xrange(0, len(R)):
#    for col_idx in xrange(0, len(R[row_idx])):
#        if row_idx != col_idx:
#            list_max_val[(row_idx, col_idx)] = R[row_idx][col_idx]
#            
#max_val = max(list_max_val.values())
#min_val = min(list_max_val.values())
#
#max_idx = []
#min_idx = []
#for key in list_max_val.keys():
#    if list_max_val[key] == max_val:
#        max_idx.append(key)
#    elif list_max_val[key] == min_val:
#        min_idx.append(key)
#        
##[(45, 44), (44, 45)] MAX
##[(0, 86), (86, 0)] MIN
#
##print max_idx
##print min_idx
#             
#print scan_data_binned_frequent.keys()[max_idx[0][0]] # commutenet
#print scan_data_binned_frequent.keys()[max_idx[0][1]] # commutenet
#
#print scan_data_binned_frequent.keys()[min_idx[0][0]] # rabarbar
#print scan_data_binned_frequent.keys()[min_idx[0][1]] # golderguest
#
#colour_list = OrderedDict()
#for key in scan_data_binned_frequent.keys():
#    if key == scan_data_binned_frequent.keys()[max_idx[0][0]]:
#        # MAX
#        colour_list[key] = my_colors[0]
#    elif key == scan_data_binned_frequent.keys()[max_idx[0][1]]:
#        # MAX
#        colour_list[key] = my_colors[1]
#    elif key == scan_data_binned_frequent.keys()[min_idx[0][0]]:
#        # MIN
#        colour_list[key] = my_colors[2]
#    elif key == scan_data_binned_frequent.keys()[min_idx[0][1]]:
#        #MIN
#        colour_list[key] = my_colors[3]
#    else:
#        colour_list[key] = my_colors[0]
#    
#scan_data_max_min = OrderedDict()
#scan_data_max_min[scan_data_binned_frequent.keys()[max_idx[0][0]]] = scan_data_binned_frequent[scan_data_binned_frequent.keys()[max_idx[0][0]]]
#scan_data_max_min[scan_data_binned_frequent.keys()[max_idx[0][1]]] = scan_data_binned_frequent[scan_data_binned_frequent.keys()[max_idx[0][1]]]
#scan_data_max_min[scan_data_binned_frequent.keys()[min_idx[0][0]]] = scan_data_binned_frequent[scan_data_binned_frequent.keys()[min_idx[0][0]]]
#scan_data_max_min[scan_data_binned_frequent.keys()[min_idx[0][1]]] = scan_data_binned_frequent[scan_data_binned_frequent.keys()[min_idx[0][1]]]
#            
#plot_figure_1(scan_data_max_min, min(dates), max(dates), "data_bin_" + str(delta) + "min_max_correlation_" + datetime.datetime.strftime(start_date, "%Y-%m-%d") + "PCA.pdf", (8,6), True, {}, colour_list)
#      
#vect_data_max_min = []
#for router in scan_data_max_min:
#    s_vector = []
#    for s_date in dates:
#        if s_date in [item[0] for item in scan_data_max_min[router]]:
#            s_vector.append(1)
#        else:
#            s_vector.append(0)
#    vect_data_max_min.append(s_vector)
#     
#print corrcoef(vect_data_max_min)
#
#print scan_data_max_min[scan_data_max_min.keys()[2]] == scan_data_max_min[scan_data_max_min.keys()[3]]
#arr = []
#for item in scan_data_max_min[scan_data_max_min.keys()[2]]:
#    if item in scan_data_max_min[scan_data_max_min.keys()[3]]:
#        arr.append(item)
#print len(arr)


###################################
# inner product of vectors

from numpy import corrcoef, sum, log, arange
from numpy.random import rand
from pylab import pcolor, show, colorbar, xticks, yticks, xlim, ylim, title, savefig, tick_params, close
 
cov_mat = np.cov([vector_data[col_idx, :] for col_idx in xrange(0, len(vector_data))])

#plt.figure()
mult_product = np.zeros((len(vector_data), len(vector_data)))

def LMdist(v1,v2):
    return sum(v1 * v2) / float(min([sum(v1), sum(v2)]))
    
def LM(v1,v2):
#    print v1
#    print v2
    return abs(sum(v1 * v2) / float(min([sum(v1), sum(v2)])) - 1)
#    print v1
#    add = 0
#    for idx in xrange(len(v1)):
#        add = add + v1[idx] * v2[idx]
#    return add / float(min([sum(v1), sum(v2)]))
    
for i in xrange(0, len(vector_data)):
    for j in xrange(0, len(vector_data)):
#        print sum(vector_data[i] * vector_data[j])
        mult_product[i,j] = LM(vector_data[i], vector_data[j])#sum(vector_data[i] * vector_data[j]) / float(sum(vector_data[i]))
        
#pcolor(mult_product)
#colorbar()
#yticks(arange(0 + 0.5,len(cov_mat) + 0.5),range(0,len(cov_mat)))
#xticks(arange(0 + 0.5,len(cov_mat) + 0.5),range(0,len(cov_mat)), rotation=90)
#ylim(0, len(cov_mat))
#xlim(0, len(cov_mat))
#title('Covariance matrix')
#tick_params(axis='y', which='major', labelsize=4)
#tick_params(axis='x', which='major', labelsize=4)
#savefig('covariance.pdf')
#show()
#close()

import random
    
#Manhattan Distance
def L1(v1,v2):
    if(len(v1)!=len(v2)):
        print 'error'
        return -1
    return sum([abs(v1[i]-v2[i]) for i in range(len(v1))])

def find_k_most_different_vect(rows, k):
    idx_arr = []
    first_vec = copy.deepcopy(rows[0])
    for i in xrange(k):
        max_idx = 0
        max_dist = 0
        for j in xrange(len(rows)):
            distance = LMdist(first_vec, rows[j])
            print distance
            if distance > max_dist:
                max_dist = distance
                max_idx = j
        idx_arr.append(max_idx)
        # calculate the sum of the vectors and continue finding next one
        first_vec = first_vec + rows[max_idx] 
      
    result = rows[0].tolist()
    print result
    for idx in idx_arr:
#        print result
        print rows[idx].tolist()
        result.append(rows[idx].tolist())
    return result
    
# kmeans with L1 distance. 
# rows refers to the NxM feature vectors
def kcluster(rows, k=3, distance=LM):# Cited from Programming Collective Intelligence 
    # Determine the minimum and maximum values for each point
    ranges=[(min([row[i] for row in rows]),max([row[i] for row in rows])) for i in range(len(rows[0]))]

    # Create k randomly placed centroids
    clusters=[[random.random( )*(ranges[i][1]-ranges[i][0])+ranges[i][0] for i in range(len(rows[0]))] for j in range(k)]
    # Find K-most different vectors
#    clusters = find_k_most_different_vect(rows, k)

    lastmatches=None
    for t in range(100):
        print 'Iteration %d' % t
        bestmatches=[[] for i in range(k)]
        # Find which centroid is the closest for each row
        for j in range(len(rows)):
            row=rows[j]
            bestmatch=0
            for i in range(k):
                d=distance(clusters[i],row)
                if d<distance(clusters[bestmatch],row): 
                    bestmatch=i
            bestmatches[bestmatch].append(j)
        ## If the results are the same as last time, this is complete
        if bestmatches==lastmatches:
            break
        lastmatches=bestmatches

        # Move the centroids to the average of their members
        for i in range(k):
            avgs=[0.0]*len(rows[0])
            if len(bestmatches[i])>0:
                for rowid in bestmatches[i]:
                    for m in range(len(rows[rowid])):
                        avgs[m]+=rows[rowid][m]
                for j in range(len(avgs)):
                    avgs[j]/=len(bestmatches[i])
                clusters[i]=avgs
    return bestmatches


###############################################################################
# from http://pandoricweb.tumblr.com/post/8646701677/python-implementation-of-the-k-means-clustering
import sys, math

class Point:
    def __init__(self, coords, idx, reference=None):
        self.idx = idx
        self.coords = coords
        self.n = len(coords)
        self.reference = reference
    def __repr__(self):
        return str(self.coords)
    def ___len__(self):
        return len(coords)

class Cluster:
    def __init__(self, points):
        if len(points) == 0: raise Exception("ILLEGAL: empty cluster")
        self.points = points
        self.n = points[0].n
        for p in points:
            if p.n != self.n: raise Exception("ILLEGAL: wrong dimensions")
        self.centroid = self.calculateCentroid()
    def __repr__(self):
        return str(self.points)
    def update(self, points):
        old_centroid = self.centroid
        self.points = points
        self.centroid = self.calculateCentroid()
        return getDistance_2(old_centroid, self.centroid)
    def calculateCentroid(self):
        reduce_coord = lambda i:reduce(lambda x,p : x + p.coords[i],self.points,0.0)    
        centroid_coords = [reduce_coord(i)/len(self.points) for i in range(self.n)] 
        return Point(centroid_coords, 0)

def kmeans(points, k, cutoff):
    initial = random.sample(points, k)
    clusters = [Cluster([p]) for p in initial]
    while True:
        lists = [ [] for c in clusters]
        for p in points:
            smallest_distance = getDistance_2(p,clusters[0].centroid)
            index = 0
            for i in range(len(clusters[1:])):
                distance = getDistance_2(p, clusters[i+1].centroid)
                if distance < smallest_distance:
                    smallest_distance = distance
                    index = i+1
            lists[index].append(p)
        biggest_shift = 0.0
        for i in range(len(clusters)):
            shift = clusters[i].update(lists[i])
            biggest_shift = max(biggest_shift, shift)
        if biggest_shift < cutoff: 
            break
    return clusters
    
#def getDistance(a, b):
#    if a.n != b.n: raise Exception("ILLEGAL: non comparable points")
#    ret = reduce(lambda x,y: x + pow((a.coords[y]-b.coords[y]), 2),range(a.n),0.0)
#    return math.sqrt(ret)

def getDistance_2(p1, p2):
    return LM(p1.coords, p2.coords)
    #return sum(v1 * v2) / float(min([sum(v1), sum(v2)]))
#    print v1
#    add = 0
#    for idx in xrange(len(v1)):
#        add = add + v1[idx] * v2[idx]
#    return add / float(min([sum(v1), sum(v2)]))
    
def makeRandomPoint(n, lower, upper):
    return Point([random.uniform(lower, upper) for i in range(n)])

#num_points, dim, k, cutoff, lower, upper = 10, 2, 3, 0.5, 0, 200
##points = map( lambda i: makeRandomPoint(dim, lower, upper), range(num_points) )
#point_list = []
#for idx in xrange(0, len(vector_data)):
#    point_list.append(Point(vector_data[idx], idx))
    
#clusters = kmeans(point_list, k, cutoff)
#
#colour_list = OrderedDict()
#for i,c in enumerate(clusters): 
#    for p in c.points:
#        print " Cluster: ",i,"\t Point :", p.idx
#        colour_list[scan_data_binned_frequent.keys()[p.idx]] = my_colors[i]
    
#plot_figure_1(scan_data_binned, min(dates), max(dates), "data_bin_" + str(delta) + datetime.datetime.strftime(start_date, "%Y-%m-%d") + datetime.datetime.strftime(end_date, "%Y-%m-%d") + ".pdf", (8,6), True, {})

#print scan_data_binned[('00:0e:8c:bd:87:e8', 'CommuteNet')]

def remove_transport_routers(data):
    tempData = copy.deepcopy(data)
    vd = to_vector_data(tempData)
    for idx in xrange(len(vd) - 1, 0, -1):
#        if data.keys()[idx] == ('00:0e:8c:bd:87:e8', 'CommuteNet'):
#            for idd in xrange(0, len(vd[idx])):
#                print vd[idx, idd]
#            print vd[idx]
        max_count = 0
        curr_count = 0
        for num in vd[idx]:
            if num == 1:
                curr_count = curr_count + 1
#                print num, curr_count
            else:
                if max_count < curr_count:
                    max_count = curr_count
                curr_count = 0
        if max_count < 3:
            print 'seq: ', tempData.keys()[idx]
#            print idx, data.keys()[idx]
            tempData.pop(tempData.keys()[idx], None)
    return tempData
        
scan_data_stop_location = remove_transport_routers(scan_data_binned)

def remove_routers_by_jaccard(data, data_orig, threshold, delta):
    tempData = copy.deepcopy(data) 
    jaccard_data = calculate_jaccard(data_orig, delta)
#    print max(jaccard_data.keys())
    for router in data:
        jaccard_arr = []
        for s_date in tempData[router]:
            if jaccard_data.has_key(s_date[0]):
                jaccard_arr.append(jaccard_data[s_date[0]])
#            else:
#                print s_date[0]
        if max(jaccard_arr) < threshold:
            tempData.pop(router, None)
            print 'jac: ', router
    return tempData   
    
print len(scan_data_stop_location)

jaccard_threshold = 0.8
jaccard_delta = 1 #minutes
scan_data_stop_location = remove_routers_by_jaccard(scan_data_stop_location, scan_data_binned, jaccard_threshold, delta)    
print len(scan_data_stop_location)

min_time = 10
for key in scan_data_stop_location.keys():
    if len(scan_data_stop_location[key]) < min_time:
        scan_data_stop_location.pop(key, None)

print len(scan_data_stop_location)        
vd = to_vector_data(scan_data_stop_location)

k = 4
clustered_idx = kcluster(vd, k)
print clustered_idx
        
colour_list = OrderedDict()
for idx in range(len(clustered_idx)):
    cluster = clustered_idx[idx]
    for router_idx in cluster:
        colour_list[scan_data_stop_location.keys()[router_idx]] = my_colors[idx]
    
plot_figure_1(scan_data_stop_location, min(dates), max(dates), "k_means_" + str(k) + "_jaccard_rejection_" + datetime.datetime.strftime(start_date, "%Y-%m-%d") + datetime.datetime.strftime(end_date, "%Y-%m-%d") + ".pdf", (8,6), True, {}, colour_list)

#plot_figure_1(scan_data_binned_frequent, min(dates), max(dates), "data_bin_" + str(delta) + "min_max_correlation_" + datetime.datetime.strftime(start_date, "%Y-%m-%d") + "PCA.pdf", (8,6), True, {}, colour_list)