'''
Created on Feb 24, 2015

@author: dewey

CLEARLY NEED TO MIGRATE THIS TO pystrat and not proxy_model
'''

from __future__ import division

import numpy as np
import math
import datetime

from pystrat.datamodel import VerticalData, Dendrogram, METADATA_NAME, METADATA_DESCRIPTION
from pystrat.project import METADATA_LOCATION_ID
from pystrat.units import convert as units_convert

def filter_list(data_list, filter_values):
    f_len = len(filter_values)
    f_sum = float(np.sum(filter_values))
    data_len = len(data_list)
    if f_len / 2.0 == f_len//2:
        raise ValueError("Need odd length filter")
    out_list = []
    centre_index = len(filter_values)//2
    for target_index in range(data_len):
        cumprod = 0.0
        filter_local_sum = 0.0
        for filter_index in range(f_len):
            cell_index = target_index - centre_index + filter_index
            cell_value = 0
            if 0 <= cell_index < data_len:
                cell_value = data_list[cell_index]
                filter_local_sum += filter_values[filter_index]
            cumprod += filter_values[filter_index] * cell_value
        if filter_local_sum != 0:
            scaling = f_sum / filter_local_sum
        else:
            scaling = 1
        out_list.append(cumprod * scaling)
    return out_list

def gaussian_filter(stdev, sample_interval=1, filter_length=None):
    stdev = stdev / float(sample_interval)
    if filter_length is None:
        filter_length = int(stdev*4)
        if filter_length / 2.0 == filter_length//2:
            filter_length += 1
    def gaus(x):
        return 1 / (np.sqrt(2*np.pi) * stdev) * np.exp(-x**2/(2*stdev**2))
    out_filter = []
    for index in range(filter_length):
        x = index - filter_length // 2
        out_filter.append(gaus(x))
    total = sum(out_filter)
    new_filter = list(np.array(out_filter) / total)

    return tuple(new_filter)
        
def convert_units(vdata, unit):
    
    old_unit = vdata.value_unit
    new_id = "%s_converted_to_%s" % (vdata._id, unit) #might be good to remove any "converted to"s before doing this to preserve same ids for same parameters
    
    transformation = lambda value: units_convert(value, old_unit, unit)
    new_vdata = VerticalData()
    new_vdata._id = new_id
    new_vdata.set_name(vdata.metadata(METADATA_NAME))
    new_vdata.set_metadata(METADATA_LOCATION_ID, vdata.metadata(METADATA_LOCATION_ID))
    
    keys, values, sample_ids = vdata.data_all()
    for index in range(len(keys)):
        new_value = transformation(values[index])
        new_vdata.add_value(keys[index], new_value, sample_ids[index])
    
    new_vdata.value_unit = unit
    new_vdata.key_unit = vdata.key_unit

    
    return new_vdata

def standardized_anomalies(vdata):
    
    keys, values, sample_ids = vdata.data_all()
    variance = np.var(values)
    if variance == 0:
        raise ValueError("Cannot scale vdata %s to unit stdev (variance is 0)" % vdata.name())
    mean = np.mean(values)
    
    transformation = lambda value: np.sqrt(1.0/variance)*(value-mean)
    new_id = "%s_std_anom" % vdata._id
    old_name = vdata.metadata(METADATA_NAME)
    if not old_name:
        old_name = vdata._id
    new_name = "%s Std" % old_name
    new_vdata = VerticalData()
    new_vdata._id = new_id
    new_vdata.set_name(new_name)
    new_vdata.key_unit = vdata.key_unit
    new_vdata.set_metadata(METADATA_LOCATION_ID, vdata.metadata(METADATA_LOCATION_ID))
    
    for index in range(len(keys)):
        new_value = transformation(values[index])
        new_vdata.add_value(keys[index], new_value, sample_ids[index])
    
    return new_vdata

class Cluster(object):
    
    def __init__(self, data, data_indicies, clusters=None):
        self.__data = data
        self.__cluster_data = None
        self.data_indicies = list(data_indicies)
        self.__dispersion = None
        self.__centroid = None
        self.clusters = clusters
        self.n = len(data_indicies)
        self.dispersion_increase = 0
    
    def cluster_data(self):
        if self.__cluster_data is None:
            self.__cluster_data = np.array([self.__data[index] for index in self.data_indicies])
        return self.__cluster_data
    
    def centroid(self):
        if self.__centroid is None:
            self.__centroid = np.sum(self.cluster_data(),0) / self.n
        return self.__centroid
    
    def dispersion(self):
        if self.__dispersion is None:
            centroid = self.centroid()
            self.__dispersion = np.sum([(sample - centroid)**2 for sample in self.cluster_data()])

        return self.__dispersion
    
    def data(self):
        return self.__data
    
    def __str__(self, level=0):
        spaces = "    " * level
        if self.clusters is None:
            return "%sDatapoint %s\n" % (spaces, self.cluster_data()[0])
        else:
            stringout = "%sCluster with dispersion %s\n" % (spaces, self.dispersion())
            for cluster in self.clusters:
                stringout += cluster.__str__(level+1)
            return stringout
    
 
def _merge_clusters(c1, c2):
    data = c1.data()
    indicies = c1.data_indicies + c2.data_indicies
    return Cluster(data, indicies, (c1, c2))

def least_squares_cluster(data, order_constrain=False):
    cluster_list = [Cluster(data, [index,]) for index in range(len(data))]
    while len(cluster_list) > 1:
        min_dispersion_cluster = None
        min_increase = -1
        min_dispersion_index1 = None
        min_dispersion_index2 = None
        
        for index1 in range(len(cluster_list)):
            for index2 in range(index1+1, len(cluster_list)):
                cluster1 = cluster_list[index1]
                cluster2 = cluster_list[index2]
                new_cluster = _merge_clusters(cluster_list[index1], cluster_list[index2])
                increase = new_cluster.dispersion() - cluster1.dispersion() - cluster2.dispersion()
                new_cluster.dispersion_increase = increase
                if min_dispersion_cluster is None:
                    min_dispersion_cluster = new_cluster
                    min_increase = increase
                    min_dispersion_index1 = index1
                    min_dispersion_index2 = index2
                else:
                    if increase < min_increase:
                        min_dispersion_cluster = new_cluster 
                        min_dispersion_index1 = index1
                        min_dispersion_index2 = index2
                        min_increase = increase
                if order_constrain:
                    break

        cluster_list[min_dispersion_index1] = min_dispersion_cluster
        cluster_list.pop(min_dispersion_index2)
    return cluster_list[0]

def create_dendrogram(cluster, depths):
    if cluster.clusters is None:
        index = cluster.data_indicies[0]
        depth = depths[index] if depths else None
        return Dendrogram(None, None, None, 0, 0, 1, depth)
    else:
        c1 = cluster.clusters[0]
        c2 = cluster.clusters[1]
        d1 = create_dendrogram(c1, depths)
        d2 = create_dendrogram(c2, depths)
        depth = (d1.central_key+d2.central_key)/2.0 if depths else None
        return Dendrogram(None, None, (d1,d2), cluster.dispersion(), cluster.dispersion_increase, cluster.n, depth)

def data_table_averaged(vdata_list, allow_null_values=True):
    unique_depths = set()
    table_by_vdata = []
    for vdata in vdata_list:
        table_by_vdata.append([])
        for depth in vdata.depths():
            unique_depths.add(depth)
    ordered_depths = sorted(unique_depths)
    for depth in ordered_depths:
        for vdata_index in range(len(vdata_list)):
            value_summary_at_depth = vdata_list[vdata_index].data_summary(depth)
            if value_summary_at_depth:
                value = value_summary_at_depth[0]
                if math.isnan(value) and not allow_null_values:
                    raise ValueError("Null value found for depth %s in vdata %s" % (depth, vdata_list[vdata_index].name()))
                table_by_vdata[vdata_index].append(value)
            else:
                if not allow_null_values:
                    raise ValueError("Null value found for depth %s in vdata %s" % (depth, vdata_list[vdata_index].name()))
                table_by_vdata[vdata_index].append(None)

    return np.array(table_by_vdata).T, ordered_depths

def data_table_all(vdata_list, allow_null_values=True):
    unique_depths = set()
    table_by_vdata = []
    for vdata in vdata_list:
        table_by_vdata.append([])
        for depth in vdata.depths():
            unique_depths.add(depth)
    ordered_depths = sorted(unique_depths)
    depths_out = []
    sample_ids_out = []
    for depth in ordered_depths:
        vdata_values_at_depth = []
        vdata_sample_ids = set()
        for vdata_index in range(len(vdata_list)):
            values_at_depth = vdata_list[vdata_index].data_all(depth)
            vdata_values_at_depth.append(values_at_depth)
            if values_at_depth:
                sample_ids_at_depth = set()
                for sample_id in values_at_depth[1]:
                    vdata_sample_ids.add(sample_id)
                    sample_ids_at_depth.add(sample_id)
                if not len(sample_ids_at_depth) == len(values_at_depth[1]):
                    raise ValueError("Duplicate sample id found at depth %s for vdata %s." % (depth, vdata_list[vdata_index].name()))
                
            elif not allow_null_values:
                raise ValueError("No values found for %s at depth %s" % (vdata_list[vdata_index].name(), depth))
        #order: sample ids (unsorted)
        ordered_sample_ids = list(vdata_sample_ids)
        if not None in ordered_sample_ids:
            ordered_sample_ids.sort()
        for sample_id in ordered_sample_ids:
            sample_ids_out.append(sample_id)
            depths_out.append(depth)
        
        for vdata_index in range(len(vdata_list)):
            vals = vdata_values_at_depth[vdata_index]
            if vals:
                for sample_id in ordered_sample_ids:
                    if sample_id in vals[1]:
                        table_by_vdata[vdata_index].append(vals[0][vals[1].index(sample_id)])
                    else:
                        table_by_vdata[vdata_index].append(None)
            else:
                for index in range(len(ordered_sample_ids)):
                    table_by_vdata[vdata_index].append(None)
    #transpose results
    return np.array(table_by_vdata).T, depths_out, sample_ids_out
        
                
            

def __timenowseconds():
    now = datetime.datetime.now()
    return str(now.timestamp())

def __timenowformatted():
    return datetime.datetime.now().strftime("%Y-%m-%d %H:%M")

def cluster_vdata(original_vdata_list, depth_constrain=True, scale_to_unit_variance=False):
    vdata_list = []
    if scale_to_unit_variance:
        for vdata in original_vdata_list:
            vdata_list.append(standardized_anomalies(vdata))
    else:
        vdata_list = original_vdata_list
    
        
    data, depths = data_table_averaged(vdata_list, False)
    root_cluster = least_squares_cluster(data, True)
    dendrogram = create_dendrogram(root_cluster, depths)
    dendrogram.set_metadata(METADATA_LOCATION_ID, vdata_list[0].metadata(METADATA_LOCATION_ID))
    dendrogram._id = "cluster_%s" % __timenowseconds()
    dendrogram.set_metadata(METADATA_NAME, "Cluster %s" % __timenowformatted())
    dendrogram.key_unit = vdata_list[0].key_unit #may want to check that these are all the same...
    vdata_names = [vdata.name() for vdata in vdata_list]
    proxies_string = " ".join(vdata_names)
    dendrogram.set_metadata(METADATA_DESCRIPTION, "Cluster using proxies %s" % proxies_string)
    return dendrogram
    
    
    
    