from functools import *
from toolz.curried import *
#from languagetools import *
import numpy as np
import pandas as pd
import scipy as sc
from scipy import spatial
# https://toolz.readthedocs.io/en/latest/api.html

def merge_distance_from(x):
    lambda context_influence,ws,c,cs: \
            sq(abs(x - ws))
                #(1 - context_influence) * sq(abs(x - ws)) \
                #+ context_influence * sq(abs(c - cs))
                        # kodel sq(abs?

def calc_context(last_winner_ix,ws,cs):
    if last_winner_ix:
        w[last_winner_ix] + c[last_winner_ix]
    else:
        0

def mng(ws,cs,xs):
    last_winner_ix = None
    for x in xs:
        c = calc_context(last_winner_ix,ws,cs)
        ranks = pipe(
                merge_distance_from(x)(context_influence,ws,c,cs),
                get_order
                )
        ranks = exp(-1 * ranks / neighborhood_influence)
        delta_w = ranks * (x - ws)
        delta_c = ranks * (c - cs)
        ws = ws + delta_w
        cs = cs + delta_c
        last_winner_ix = ix_of_max(ranks)
    return (ws,cs,last_winner_ix)

def euclidian_distance(x, ws):
    return sc.spatial.distance.cdist([x],ws,'euclidian')

def get_order(distances):
    return distances.argsort()

def mse(x,ws):
    return sklearn.metrics.mean_squared_error([x],ws)

def ng(ws,x):
    winner_ix = None
    for x in xs:
        ranks = pipe(
                mse(x,ws),
                get_order
                )
        ranks = np.exp(-1 * ranks / neighborhood_influence)
        delta_w = ranks * (x - ws)
        ws = ws + delta_w
        winner_ix = ranks.idxmax()
    return (ws,winner_ix)

def initialise_ng(data,net_size):
    all_samples = data.values.flatten()
    ws = np.random.choice(all_samples, size=net_size, replace=True)
    zs = np.random.rand(len(all_samples))
    #cs = np.zeros(ws.shape)
    return (ws, zs)

# dataset = dataframe, viena asis yra semplai, kita semplu semplai
    # rows of series. stulpeliai sempliukai semple, kitaip sakant laikas
def train(dataset):
    ws = initialise_ng(dataset, 100)
    for (_, x) in dataset.iterrows():
        (ws,_) = ng(ws,x)
    return ws

def reindex_ngram_group(year_min, year_max, group):
    year_range = np.arange(year_min, year_max + 1)
    ngram = group.ngram.iat[0]
    return (
            group
            .set_index('year')
            .reindex(year_range)
            .assign(ngram=ngram)
            .reset_index()
        )

reindex_ngram_group = curry(reindex_ngram_group)

interpolate_ngram_group = lambda group: \
    group.assign(
            year_fraction = group.year_fraction.interpolate(
                method='index', limit_direction='both')
            )

def normalize_ngram_group(group):
    year_fraction = group.year_fraction
    min = year_fraction.min()
    max = year_fraction.max()
    year_fraction = (year_fraction - min) / (max - min)
    return group.assign(year_fraction = year_fraction)


def downsample_interpolated_ngram_group(year_interval, group):
    start_year = group.year.min()
    number_of_years = len(group)
    period_range = pd.period_range(f'{start_year}-1-1',freq='1A',periods=number_of_years)
    group.index = period_range
    group = group.resample(rule=f'{year_interval}A').sum()
    #int_index = group.index.year.tolist()
    #group.index = int_index
    return group

downsample_interpolated_ngram_group = \
        curry(downsample_interpolated_ngram_group)

angle_from_origin = lambda x,y: np.arctan2(y,x) / (np.pi * 0.5)

curve_to_angles = lambda curve: pipe(
        np.asarray(curve),
        angle_from_origin(1, curve[1:] - curve[:-1]),
        lambda x: np.concatenate([x,[np.nan]])
        )


def curve_to_angles_ngram_group(group):
    angles = group.year_fraction\
            .rolling(2)\
            .apply(lambda x: angle_from_origin(1, x[1] - x[0]), raw=True)
    return group.assign(year_fraction=angles).dropna()

process_data = lambda data: \
        data \
        .groupby('ngram') \
        .apply(
                compose(
                    normalize_ngram_group,
                    downsample_interpolated_ngram_group(3),
                    interpolate_ngram_group,
                    reindex_ngram_group(data.year.min(), data.year.max())
                    )
                ) \
        .reset_index(drop=True)

#from bokeh.plotting import figure, output_file, show
import bokeh.plotting as plt
from bokeh.palettes import viridis
def plot(df,ngrams):
    plt.output_file('plot.html')
    p = plt.figure(plot_width=1000)
    for ngram, color in zip(ngrams,viridis(len(ngrams))):
        data = df.loc[ngram]
        p.line(data.index.values, data.year_fraction, color=color, legend=ngram)
    p.legend.click_policy="hide"
    plt.show(p)

# ngrams = df.groupby('ngram').head(1).reset_index().ngram.values
# plot(df,np.random.choice(ngrams,8))
