import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
from sklearn.svm.libsvm import predict
from matplotlib.collections import LineCollection

from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
from pandas import Series
import pandas as pd
from pandas.plotting import lag_plot
from pandas.plotting import autocorrelation_plot
from pandas import DataFrame
from pandas import concat
from pandas import Series
from statsmodels.graphics.tsaplots import plot_acf
from sklearn.metrics import mean_squared_error
from skimage.filters import threshold_otsu
from skimage.color import rgb2hsv
from skimage.morphology import disk
from skimage.filters.rank import median
import cv2
from statsmodels.tsa.ar_model import AR
from sklearn.metrics import mean_squared_error


def show_baseline(series):
    # Baseline model
    # create lagged dataset
    values = DataFrame(series.values)
    dataframe = concat([values.shift(1), values], axis=1)
    dataframe.columns = ['t-1', 't+1']

    # split into train and test sets
    X = dataframe.values
    train, test = X[1:len(X) - 7], X[len(X) - 7:]
    train_X, train_y = train[:, 0], train[:, 1]
    test_X, test_y = test[:, 0], test[:, 1]


    # persistence model
    def model_persistence(x):
        return x


    # walk-forward validation
    predictions = list()
    for x in test_X:
        yhat = model_persistence(x)
        predictions.append(yhat)
    test_score = mean_squared_error(test_y, predictions)
    print('Test MSE: %.3f' % test_score)
    # plot predictions vs expected
    plt.plot(test_y)
    plt.plot(predictions, color='red')
    plt.show()


def show_autocorrelation(series):
    autocorrelation_plot(series)
    plt.show()

    plot_acf(series, lags=25)
    plt.show()


def make_prediction(train, count=12):
    model = AR(train)
    model_fit = model.fit()
    predictions = model_fit.predict(start=len(train), end=len(train) + count - 1, dynamic=False)
    return predictions


def calculate_mistake(predictions, values):
    assert len(predictions) == len(values) == 12
    one_h = mean_squared_error(predictions[:2], values[:2])
    three_h = mean_squared_error(predictions[:6], values[:6])
    six_h = mean_squared_error(predictions, values)

    return [one_h, three_h, six_h]















# # n = 100
# y = [0.41580611169652265, 0.40919540229885054, 0.40566037735849064, 0.40377436748237244, 0.44341181330028917, 0.41882645656230566, 0.44241277847835225, 0.416269923411302, 0.4294080736247648, 0.4184930077228136, 0.46325213408286486, 0.46468479865064305, 0.47839634399667635, 0.43751295336787566, 0.4798664719382433, 0.48849372384937234, 0.49969027462316745, 0.43288590604026844, 0.45260761589403964, 0.4506108925243322, 0.44877644131065947, 0.47226855713094246, 0.45872129112352583, 0.46731485312370696, 0.4415990006246096, 0.3979144942648592, 0.4222684703433923, 0.417584701725213, 0.4198724542275252, 0.4306431273644388, 0.4182082727083766, 0.3929089778146382, 0.4265676567656766, 0.45760937827088133, 0.4249895528625157, 0.3857318070318888, 0.4728441127694859, 0.4501454092230993, 0.4281859692818597, 0.4294426919032597, 0.4539890249050232, 0.4518925777119898, 0.406494960806271, 0.45169385194479295, 0.44516670161459426, 0.4543173740330336, 0.512712754780416, 0.5098864114429955, 0.5426421404682275, 0.5666172421097225, 0.5549218419940853, 0.5810697868783954, 0.558891454965358, 0.5494320572149769, 0.5675562092876655, 0.58210197710718, 0.6035453597497393, 0.5760937827088131, 0.6079046424090339, 0.56105749055812, 0.5932523051131601, 0.6333757421543681, 0.5639949643306756, 0.52428810720268, 0.5934787211518103, 0.5756302521008404, 0.6154007114459092, 0.6518064652440312, 0.6471332209106239, 0.6136077278454432, 0.603658536585366, 0.5913572477449129, 0.5271693344566133, 0.5531250000000001, 0.5918582577515292, 0.5607082630691399, 0.5746787444701917, 0.5278481012658228, 0.5386406944738513, 0.5742929506120725, 0.5887830687830689, 0.5883966244725739, 0.6143695014662757, 0.5596733668341709, 0.5692148760330579, 0.6202294056308655, 0.5993343041397962, 0.6298850574712643, 0.619487610247795, 0.6835653438603781, 0.6502381445433837, 0.6875, 0.669445602334306, 0.6955440414507772, 0.6603929679420889, 0.6724529861541642, 0.7, 0.6748034753827058, 0.7529631940112289, 0.7430526752384901, 0.7806357172731074, 0.7827707599917167, 0.7604617604617604, 0.7498433911046147, 0.778441879637263, 0.7490713990920347, 0.7506719040727724, 0.7445043550394027, 0.7287432600580671, 0.7394468704512372, 0.7212435233160622, 0.7158243579121789, 0.6824802330420308, 0.7115544472152951, 0.6819217229240009, 0.7260045804705393, 0.6985705407085147, 0.6983309293220689, 0.6509060612372424, 0.6392760557520282, 0.6834547346514048, 0.6798837450695454, 0.6195719925202576, 0.7010631644777986, 0.7212841359182822, 0.6482686253934942, 0.7568363483382415, 0.731636060100167, 0.7707986688851912, 0.7935617860851507, 0.7889228085477541, 0.8723675813656668, 0.8788005871251835, 0.8807668000842638, 0.8235416234170645, 0.9330403850177862, 0.9102618243243243, 0.8966457023060797, 0.9660804020100503, 0.9009197324414716, 0.9706127553147145, 0.9628221377270807, 1.0085541414562904, 0.9120971117622436, 0.9529387157498432, 0.9326424870466321, 0.8952064743722765, 0.8302436992293273, 0.8727841501564129, 0.9241350562734475, 0.7993029930299302, 0.8853114478114478, 0.8408660920748371, 0.8590561224489796, 0.8508403361344539, 0.8432203389830508, 0.8577432233662535, 0.8334743969530258, 0.7551910299003323, 0.7849326599326599, 0.8291457286432161, 0.7719594594594597, 0.7988780386453356, 0.7319220289247537, 0.8396835311263794, 0.8359754808708517, 0.850387110274116, 0.8510370835952231, 0.841540404040404, 0.9036649214659686, 0.8442211055276382, 0.887083157450401, 0.8700811992504685, 0.9251615593079008, 0.9306046125077914, 0.9521541010770505, 1.0012594458438286, 0.960587002096436, 1.0842661034846885, 1.0779057591623036, 1.129575707154742, 1.0846493998736575, 1.1442650450828267, 1.2020736352094794, 1.1990846681922196, 1.1684873949579833, 1.1726648637403785, 1.1429762401000416, 1.2552160168598525, 1.1173640167364016, 1.124344726357727, 1.1488294314381269, 1.1469783112234155, 1.0918324164198057, 1.0777731442869058, 1.1747368421052633, 1.169712245326612, 1.1698429319371728, 1.1043150397989108, 1.1133263378803777, 1.075073191133417, 1.1362962962962964, 1.0985738255033557, 1.1101928374655647, 1.054805895785759, 1.1296060991105463, 1.1403029028186789, 1.2038404726735599, 1.0643759056096045, 1.1571878279118575, 1.1547241597970828, 1.1901542362138178, 1.18050918196995, 1.2884655641616076, 1.3087318087318087, 1.3776617954070982, 1.3735498839907192, 1.4300736067297581, 1.5052192066805847, 1.4870662460567823, 1.3594582593250444, 1.4328922495274101, 1.505258729490955, 1.5877551020408163, 1.5234885190646725, 1.501798180664269, 1.4998955940697432, 1.54591300693423, 1.5288522511097018, 1.5655454937473052, 1.5090252707581229, 1.4584996848077327, 1.4721119699185292, 1.4323646247125235, 1.3971281611658808, 1.3750260579528872, 1.2490044015929573, 1.2872811643113267, 1.2348979162281624, 1.180936313791649, 1.2609797297297298, 1.2807165437302424, 1.3248502994011975, 1.2778831962892683, 1.3986543313709, 1.3027754415475188, 1.340559882130078, 1.3476349937212222, 1.3515494137353434, 1.3399291814205374, 1.3370189240910058, 1.4480656013456683, 1.3780155233899727, 1.4937140421904964, 1.4546610169491525, 1.5987745615888442, 1.6971295905445336, 1.7946071202864968, 2.0101180438448565, 2.273030707610147, 2.0211794019933556, 1.937669376693767, 1.827168234064786, 1.9623078542851127, 1.8820201173512154, 1.8493555884217199, 1.8601694915254237, 1.8443555181128894, 1.8488248994283294, 1.890708058685945, 1.784330244313395, 1.8656842105263158, 1.8862518708573872, 1.84984984984985, 1.947894512973203, 2.0528846153846154, 2.0541107382550337, 2.129045996592845, 2.170235096557515, 2.320217664294684, 2.3190106895828966, 2.360884992694636, 2.568606627017842, 2.605958166068033, 2.612189522020455, 2.7130508117225385, 2.6496304118268217, 2.7263775403310286, 2.66610455311973, 2.7033933808127357, 2.662839248434238, 2.676304211187932, 2.6813376483279394, 2.549385332768122, 2.3810344827586207, 2.4624116513171983, 2.350245150287785, 2.3075127334465195, 2.210267379679144, 2.110242872228089]
# n = len(y)
# x = np.arange(n)
#
# # print(x)
#
# rs = check_random_state(0)
# # y = rs.randint(-50, 50, size=(n,)) + 50. * np.log1p(np.arange(n))
# # print(y)
#
# # #############################################################################
# # Fit IsotonicRegression and LinearRegression models
#
# ir = IsotonicRegression()
#
# y_ = ir.fit_transform(x, y)
#
# lr = LinearRegression()
# lr.fit(x[:, np.newaxis], y)  # x needs to be 2d for LinearRegression
#
# # #############################################################################
# # Plot result
#
# segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
# print(segments)
# lc = LineCollection(segments, zorder=0)
# lc.set_array(np.ones(len(y)))
# lc.set_linewidths(np.full(n, 0.5))
#
# fig = plt.figure()
# plt.plot(x, y, 'r.', markersize=12)
# plt.plot(x, y_, 'g.-', markersize=12)
# plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
# plt.gca().add_collection(lc)
# plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
# plt.title('Isotonic regression')
# plt.show()