import datetime
import numpy as np
from matplotlib import pyplot as plt
from numpy.core.fromnumeric import shape
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, random_split, SubsetRandomSampler
from tqdm import tqdm
from sklearn.metrics import r2_score
import os
from model import Transformer
from model import OZELoss
from dataset import OzeDataset
from utils import compute_loss, fit, Logger, kfold
from benchmark import LSTM, BiGRU, BiGRUaffine, cnnGru, FullyConv, FullyConv4layer, cnnLSTM, FFN, convGRU, convLSTM
from benchmark import FullyConvAffine, LSTMaffine, cnnLSTMaffine, convLSTMaffine, cnnGruaffine,convGRUaffine
from metrics import MSE
import matplotlib.pyplot as plt
import matplotlib as mpl

mpl.rcParams['font.size'] = 13
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
current_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(current_path)
# 
DATASET_FILE_NAME = '2U6U3mFACmax_length_96.npz'
# 
DATASET_PATH = os.path.join(os.getcwd(), DATASET_FILE_NAME)
# Training parameters
BATCH_SIZE = 20
NUM_WORKERS = 0
# LR values can affect the prediction value largely,   1e-4 may be good
LR = 2e-4
EPOCHS = 200
# Model parameters
d_model = 512 # Lattent dim
# d_model = 512 # Lattent dim
q = 8 # Query size
v = 8 # Value size
# h = 12 # Number of heads
h = 8 # Number of heads
N = 8 # Number of encoder and decoder to stack
attention_size = 0 # Attention window size
dropout = 0.3 # Dropout rate
pe = None # Positional encoding
# pe = 'regular' # Positional encoding
chunk_mode = None
# chunk_mode = 'window'

# input var numbers
# d_input nums
d_input  = 3
# output var numbers
d_output = 2
#
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"Using device {device}")

# load dataset
ozeDataset = OzeDataset(DATASET_PATH)
# original split dataset in randomly way
# dataset_train, dataset_val, dataset_test = random_split(ozeDataset, (816, 100, 178))
# 
# Creating data indices for training, validation and test splits 1094
dataset_size = len(ozeDataset)
indices = list(range(dataset_size))
# predict one sequtial data series at one time
train_indices, val_indices, test_indices = indices[:989], indices[800:990], indices[1086:1087]

# Creating data samplers and DataLoaders:
train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(val_indices)
test_sampler = SubsetRandomSampler(test_indices)
# split train, val, test dataset
dataloader_train = DataLoader(ozeDataset, batch_size=BATCH_SIZE, sampler = train_sampler, drop_last=False)
dataloader_val = DataLoader(ozeDataset, batch_size=BATCH_SIZE, sampler = valid_sampler, drop_last=False)
dataloader_test = DataLoader(ozeDataset, batch_size=BATCH_SIZE, sampler = test_sampler, drop_last=False)
# 
print("length of dataloader train val and test:", len(dataloader_train), len(dataloader_val), len(dataloader_test))

# define transformer network architenture
net = Transformer(d_input, d_model, d_output, q, v, h, N, attention_size=attention_size, dropout=dropout, chunk_mode=chunk_mode, pe=pe).to(device)
optimizer = optim.Adam(net.parameters(), lr=LR)
loss_function = OZELoss(alpha=0.3)
# Save Model
# model_save_path = f'model_{datetime.datetime.now().strftime("%Y_%m_%d__%H%M%S")}.pth'
# define val loss var
val_loss_best = np.inf

# Prepare loss history
hist_loss = np.zeros(EPOCHS)
hist_loss_val = np.zeros(EPOCHS)

for idx_epoch in range(EPOCHS):
    running_loss = 0
    with tqdm(total = len(train_indices), desc = f"[Epoch {idx_epoch + 1:3d}/{EPOCHS}]") as pbar:
        for idx_batch, (x, y) in enumerate(dataloader_train):
            optimizer.zero_grad()
            # Propagate input
            netout = net(x.to(device))
            # Comupte loss
            loss = loss_function(y.to(device), netout)
            # Backpropage loss
            loss.backward()
            # Update weights
            optimizer.step()
            running_loss += loss.item()
            pbar.set_postfix({'loss': running_loss/(idx_batch + 1)})
            pbar.update(x.shape[0])
        train_loss = running_loss / len(train_indices)
        # adapt for dataloader_test, just view test as val test data
        val_loss = compute_loss(net, dataloader_val, loss_function, device).item()
        pbar.set_postfix({'loss': train_loss, 'val_loss': val_loss})

        hist_loss[idx_epoch] = train_loss
        hist_loss_val[idx_epoch] = val_loss

        if val_loss < val_loss_best:
            val_loss_best = val_loss
            # Save Model
            # torch.save(net.state_dict(), model_save_path)
# print(f"model exported to {model_save_path} with loss {val_loss_best:5f}")

# set figsize=(10, 8)
plt.figure(0)
plt.figure(figsize=(8, 6))
plt.plot(hist_loss, marker='o', markersize=2, label = 'Training loss')
plt.plot(hist_loss_val, marker='o', markersize=2, label = 'Validation loss')
plt.xlim((0, 105))
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend(loc='upper right')
plt.grid(axis='y')
# plt.savefig(DATASET_FILE_NAME[:-4]+"_hist_loss", dpi=300)
print("saving loss fig0.")
# plt.show()

_ = net.eval()

# revise the testing code parts
predictions = np.empty(shape = (len(test_indices), 96, d_output))

idx_prediction = 0
# predict one data frame  at a time, reference to train process setting
with torch.no_grad():
    for x, y in tqdm(dataloader_test, total = len(test_indices)):
        # print("x.shape, y.shape in test: ", x.shape, y.shape,x)
        # predict the testing data
        netout = net(x.to(device)).cpu().numpy()
        # print("netout info :", netout.shape)
        # fill predicted data in predictions array
        predictions[idx_prediction :idx_prediction + x.shape[0]] = netout
        idx_prediction += x.shape[0]
# 
labels = ozeDataset.labels['X']
dataset_y = ozeDataset._y[test_indices].numpy()

y_pred = predictions[:, :, 0]
y_true = dataset_y[:, :, 0]
# 
y_pred = ozeDataset.rescale(y_pred, 0)
y_true = ozeDataset.rescale(y_true, 0)

# choice one series data randomly for draw line when predict multilines at a time
idx = np.random.randint(0, y_true.shape[0])
# idx value randomly selected
y_true = y_true[idx]
y_pred = y_pred[idx]

# generating figures
t = np.linspace(50.4, 59.9, 96, endpoint=True)
# print(t)
plt.figure(1)
plt.figure(figsize=(8, 6))
# t = np.arange(51.1, 60, 0.1)
plt.plot(t, y_true, color ='red', marker='o', markersize=2, label="Equiphase surface hight_actual")
plt.plot(t, y_pred, color ='dimgray', marker='s',  markersize=2, label="Equiphase surface hight_predicted")
plt.grid(axis='y')
plt.xlim((50, 60.2))
plt.xlabel('Frequency(GHz)')
# plt.ylabel('Scanning Height /m')
plt.ylabel('Electric field value(V/m)')
plt.legend(loc='upper left')
plt.grid(axis='y')
plt.tight_layout()
# plt.title('Bias map', size = 14)

# plt.savefig(DATASET_FILE_NAME[:-4] + "_MeamtHigt_map", dpi=300)
plt.savefig(DATASET_FILE_NAME[:-4] + "_SphereEt_map", dpi=300)
plt.savefig(DATASET_FILE_NAME[:-4] + "_SphereEt_map"+"fig.svg", dpi=300)
plt.savefig(DATASET_FILE_NAME[:-4] + "_SphereEt_map"+"fig.pdf", dpi=300)
plt.savefig(DATASET_FILE_NAME[:-4] + "_SphereEt_map"+"fig.tif", dpi=300)
np.savetxt(DATASET_FILE_NAME[:-4] + "_SphereEt.csv", y_true, delimiter=',')
np.savetxt(DATASET_FILE_NAME[:-4] + "_SphereEt_pred.csv", y_pred, delimiter=',')

# np.savetxt(DATASET_FILE_NAME[:-4] + "_MeamtHight.csv", y_true, delimiter=',')
# np.savetxt(DATASET_FILE_NAME[:-4] + "_MeamtHight_pred.csv", y_pred, delimiter=',')
# plt.show()
print("saving fig1.")

# generate the second figure
plt.figure(2)
plt.figure(figsize=(6, 4))
y_pred = predictions[:, :, 1]
y_true = dataset_y[:, :, 1]
y_pred = ozeDataset.rescale(y_pred, 1)
y_true = ozeDataset.rescale(y_true, 1)
# 
idx = np.random.randint(0, y_true.shape[0])
y_true = y_true[idx]
y_pred = y_pred[idx]
# 
# t = np.arange(51.1, 60, 0.1)
# t = np.arange(51.6, 60, 0.1)
plt.plot(t, y_true, color ='red', marker='o', markersize=2, label="E_actual")
plt.plot(t, y_pred, color ='dimgray', marker='s',  markersize=2, label="E_predicted")
plt.grid(axis='y')
plt.xlim((50, 60))
plt.xlabel('Frequency(GHz)')
plt.ylabel('Electric field value(V/m)')
plt.legend(loc='upper left')
plt.grid(axis='y')
plt.savefig(DATASET_FILE_NAME[:-4]+"_SphereEt_map", dpi=300)
# np.savetxt("test02-1.csv", y_true, delimiter=',')
# np.savetxt("test02-2.csv", y_pred, delimiter=',')
# np.savetxt(DATASET_FILE_NAME[:-4] + "_SphereEt.csv", y_true, delimiter=',')
# np.savetxt(DATASET_FILE_NAME[:-4] + "_SphereEt_pred.csv", y_pred, delimiter=',')
# plt.show()
print("saving fig2.")

# generate the third figure
fig = plt.figure(3)
plt.figure(figsize=(9, 6))
y_pred = predictions[:, :, 0]
y_true = dataset_y[:, :, 0]
y_pred = ozeDataset.rescale(y_pred, 0)
y_true = ozeDataset.rescale(y_true, 0)
y_true = y_true[idx]
y_pred = y_pred[idx]
# print(y_pred.shape, y_pred)
diff = y_pred - y_true
# t = np.arange(51.1, 60, 0.1)
# t = np.arange(51.6, 60, 0.1)
fig, vax = plt.subplots(1, 1, figsize=(6, 4))
vax.plot(t, diff, 'o', markersize=3)
vax.vlines(t, [0], diff)
# By using `transform=vax.get_xaxis_transform()` the y coordinates are scaled
# such that 0 maps to the bottom of the axes and 1 to the top.
# vax.vlines([1, 2], 0, 1, transform=vax.get_xaxis_transform(), colors='r')
vax.set_xlabel('Frequency(GHz)')
# vax.set_ylabel('Deviation(V/m)')
# vax.set_title('Predictions error')
plt.xlim((50, 60.2))
# plt.ylabel('Frequency(GHz)')
plt.ylabel('Deviation(V/m)')
# vax.legend(loc='upper left')
# plt.legend(loc='upper left')
vax.grid(axis="y")
plt.tight_layout()
# plt.title('Bias map', size = 14)

plt.savefig(DATASET_FILE_NAME[:-4]+"_bias_map", dpi=300)
plt.savefig(DATASET_FILE_NAME[:-4] + "_bias_map"+"fig.svg", dpi=300)
plt.savefig(DATASET_FILE_NAME[:-4] + "_bias_map"+"fig.pdf", dpi=300)
plt.savefig(DATASET_FILE_NAME[:-4] + "_bias_map"+"fig.tif", dpi=300)
np.savetxt(DATASET_FILE_NAME[:-4] + "_bias_map.csv", diff, delimiter=',')

print("saving fig3.")

