import math
import os

import numpy as np
import numpy.random
import torch

import utils.comparator
import algorithms.algorithm
from utils.tensorbackend import get_backend, get_backend_by_name
from utils import noise
from utils import criterions
from utils.misc import compose_matrix
import matplotlib.pyplot as plt

os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
n = 10
L = 1000
cmpor = utils.comparator.EvenComparator(algos=((algorithms.algorithm.NLMS(0.1), "NLMS")
                                               , (algorithms.algorithm.LMS(0.1), "LMS 0.1"),
                                               (algorithms.algorithm.LMS(0.15), "LMS 0.15"),
                                               (algorithms.algorithm.LMS(0.05), "LMS 0.05"),
                                               (algorithms.algorithm.MCC(0.1, 2), "MCC"),
                                               ))

cmpor.compareAndPlot(noiseGenerator=noise.LaplaceNoiseGenerator(loc=0, scale=1), n=n, L=L, repeat=100,
                     criterion=criterions.L2Norm,
                     title="Plot", needLog=False)

# print(compose_matrix([1, 0, 1], [1, 0, 0], backend=get_backend_by_name("numpy")))
#
# backend = get_backend_by_name("numpy")
#
# dt = 0.01
# Len = 1000
# A_k = compose_matrix([1, dt, 0],
#                      [0, 1, dt],
#                      [0, 0, 1], backend=backend)
#
# C_k = numpy.random.randn(3, 3)
# xk = compose_matrix([0, ], [0, ], [1, ], backend=backend)
# wk = numpy.random.randn(3, Len) * 0.01
# vk = numpy.random.randn(3, Len) * 0.3
# Q = numpy.matmul(wk, wk.T) / Len
# R = numpy.matmul(vk, vk.T) / Len
#
# x_real = [xk]
# target = []
# x_value_real = []
#
# for i in range(0, Len):
#     x_real.append(numpy.matmul(A_k, x_real[-1]) + numpy.expand_dims(wk[:, i], axis=1))
#     x_value_real.append(x_real[-1][0])
#
# for x in range(0, len(x_real) - 1):
#     target.append(numpy.matmul(C_k, x_real[x]) + numpy.expand_dims(vk[:, x], axis=1))
# error = []
# filter = algorithms.algorithm.KalmanFiltering(A_k, C_k, Q, R)
# x_predic = compose_matrix([0, ], [0, ], [1, ], backend=backend)
# x_value_pred = []
# for i in range(0, Len):
#     x_predic = filter.iterate(target[i], x_predic)
#     error.append(numpy.linalg.norm(x_predic - x_real[i]))
#     x_value_pred.append(x_predic[0])
#
# x = np.linspace(0, len(error), len(error))
# plt.plot(x, error, label="error")
# plt.plot(x, x_value_real, label="x_value_real")
# plt.plot(x, x_value_pred, label="x_value_pred")
# plt.legend(loc='upper right')
# plt.show()


#torch:
# device = "cuda:0"
# backend = get_backend_by_name("torch")
#
# dt = 0.01
# Len = 1000
# A_k = compose_matrix([1, dt, 0],
#                      [0, 1, dt],
#                      [0, 0, 1], backend=backend, device=device)
#
# C_k = torch.randn(3, 3).to(device)
# xk = compose_matrix([0, ], [0, ], [1, ], backend=backend, device=device)
# wk = torch.randn(3, Len).to(device) * 0.01
# vk = torch.randn(3, Len).to(device) * 0.3
# Q = torch.mm(wk, wk.T) / Len
# R = torch.mm(vk, vk.T) / Len
#
# x_real = [xk]
# target = []
# x_value_real = []
#
# for i in range(0, Len):
#     x_real.append(torch.mm(A_k, x_real[-1]) + torch.unsqueeze(wk[:, i], dim=1))
#     x_value_real.append(x_real[-1][0].detach().cpu().numpy())
#
# for x in range(0, len(x_real) - 1):
#     target.append(torch.mm(C_k, x_real[x]) + torch.unsqueeze(vk[:, x], dim=1))
# error = []
# filter = algorithms.algorithm.KalmanFiltering(A_k, C_k, Q, R)
# x_predic = compose_matrix([0, ], [0, ], [1, ], backend=backend, device=device)
# x_value_pred = []
# for i in range(0, Len):
#     x_predic = filter.iterate(target[i], x_predic)
#     error.append(torch.linalg.norm(x_predic - x_real[i]).detach().cpu().numpy())
#     x_value_pred.append(x_predic[0].detach().cpu().numpy())
#
# x = np.linspace(0, len(error), len(error))
# plt.plot(x, error, label="error")
# plt.plot(x, x_value_real, label="x_value_real")
# plt.plot(x, x_value_pred, label="x_value_pred")
# plt.legend(loc='upper right')
# plt.show()
