import main
import output
import datetime
import common
import loader
import csv
from collections import OrderedDict
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mpld

def calc_scans_per_hours(start_date, dates, mean_deltas):
    curr_date = start_date
    end_date = start_date + datetime.timedelta(days = 1)
    all_deltas = OrderedDict()
    while curr_date < end_date:
        deltas = []
        for idx in xrange(len(dates) - 1):
            s_date = dates[idx]
            next_date = dates[idx + 1] 
            if s_date > curr_date and s_date < curr_date + datetime.timedelta(hours = 1):
                deltas.append((next_date - s_date).seconds)
        all_deltas[curr_date] = deltas
        curr_date = curr_date + datetime.timedelta(hours = 1)
        if len(deltas) > 0:
            if mean_deltas.has_key(datetime.time(curr_date.hour, 0)):
                mean_deltas[datetime.time(curr_date.hour, 0)] = mean_deltas[datetime.time(curr_date.hour, 0)] + [np.mean(deltas)]
            else:
                mean_deltas[datetime.time(curr_date.hour, 0)] = [np.mean(deltas)]
        else:
            if mean_deltas.has_key(datetime.time(curr_date.hour, 0)):
                mean_deltas[datetime.time(curr_date.hour, 0)] = mean_deltas[datetime.time(curr_date.hour, 0)] + [0]
            else:
                mean_deltas[datetime.time(curr_date.hour, 0)] = [0]
    return [all_deltas, mean_deltas]
    
def loadScanData(file_name, start_date, end_date):
    with open(file_name, 'rb') as f:
        reader = csv.reader(f, delimiter=',')
        dates = []
        for row in reader:
            if row[3] != 'timestamp':
                curr_date = datetime.datetime.fromtimestamp(long(row[3]))
                if curr_date < end_date and curr_date > start_date:
                    dates.append(curr_date)
                    
    return dates               
    
train_dates = [datetime.datetime(2014, 9,  9, 0, 0), \
               datetime.datetime(2014, 9, 10, 0, 0), \
               datetime.datetime(2014, 9, 11, 0, 0), \
               datetime.datetime(2014, 9, 12, 0, 0), \
               datetime.datetime(2014, 9, 13, 0, 0)]
              # datetime.datetime(2014, 9, 14, 0, 0), \
              # datetime.datetime(2014, 9, 15, 0, 0), \
              # datetime.datetime(2014, 9, 16, 0, 0), \
              # datetime.datetime(2014, 9, 17, 0, 0), \
              # datetime.datetime(2014, 9, 18, 0, 0)]

all_deltas_all = OrderedDict()
               
for start_date in train_dates:
    end_date = start_date + datetime.timedelta(days = 1)
    scans = loadScanData(loader.DATA_FOLDER + "//" + loader.WIFI_DATA_FILENAME, start_date, end_date)
    calc_scans_per_hours(start_date, list(sorted(set(scans))), all_deltas_all)
    #print all_deltas_all

itms = []
for item in all_deltas_all.values():
    for it in item:
        itms.append(it)
        
fig, axarr = plt.subplots(1, figsize=(8,4))  
plt.boxplot([[item] for item in all_deltas_all.values()])
plt.xlabel("Time")
plt.ylabel("Delta time between scans [s]")
plt.savefig("Output" + "//" + "delta_time.pdf", bbox_inches='tight')       
plt.close()
  
fig, axarr = plt.subplots(1, figsize=(8,4))  
plt.hist(itms, bins = 40, color=output.my_colors[0])
plt.xlabel("Delta time between scans [min]")
plt.ylabel("Number of occurences")
plt.xticks([item * 60 for item in xrange(12)], xrange(12))
plt.savefig("Output" + "//" + "hist_delta_time.pdf", bbox_inches='tight')       
plt.close()  
