import datetime
import numpy as np
from matplotlib import pyplot as plt
from numpy.core.fromnumeric import shape
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, random_split, SubsetRandomSampler
from tqdm import tqdm
from sklearn.metrics import r2_score
from trans_model_with_MoE import Transformer

from trans_model_with_MoE import OZELoss
from dataset import OzeDataset
from utils import compute_loss2, compute_loss, fit, Logger, kfold
from benchmark import LSTM, BiGRU, BiGRUaffine, cnnGru, FullyConv, FullyConv4layer, cnnLSTM, FFN, convGRU, convLSTM
from benchmark import FullyConvAffine, LSTMaffine, cnnLSTMaffine, convLSTMaffine, cnnGruaffine,convGRUaffine
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader
from time import time
import matplotlib as mpl
mpl.rcParams['font.size'] = 11

import os
current_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(current_path)
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"Using device {device}")
# 
starttime = datetime.datetime.now()
# 
DATASET_FILE_NAME = '6U2U3mFAC_length_128_VH_dataset.npz'
DATASET_PATH = os.path.join(os.getcwd(), DATASET_FILE_NAME)
# Training parameters
BATCH_SIZE = 64
NUM_WORKERS = 0
# LR values less than 1e-4 may be good
LR = 2e-5
EPOCHS = 250
# Model parameters
d_model = 512 # Lattent dim, 512, 768
q = 8 # Query size
v = 8 # Value size
h = 12 # Number of heads
N = 8 # Number of encoder and decoder to stack
attention_size = None # Attention window size
dropout = 0.2 # Dropout rate
pe = None  # Positional encoding
# pe = 'regular'
# pe = 'original'  
chunk_mode = None
# chunk_mode = 'window'
d_input  = 7
d_output = 3
time_Count = []
# load dataset
ozeDataset = OzeDataset(DATASET_PATH)
# original split dataset in randomly way
# dataset_train, dataset_val, dataset_test = random_split(ozeDataset, (816, 100, 178))
# Creating data indices for training, validation and test splits 1054
dataset_size = len(ozeDataset)
indices = list(range(dataset_size))
# predict one sequtial data series at one time
train_indices, val_indices, test_indices = indices[:926], indices[870:926], indices[1053:1054]

# Creating data samplers and DataLoaders:
train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(val_indices)
test_sampler = SubsetRandomSampler(test_indices)
dataloader_train = DataLoader(ozeDataset, batch_size=BATCH_SIZE, sampler = train_sampler, drop_last=False)
dataloader_val = DataLoader(ozeDataset, batch_size=BATCH_SIZE, sampler = valid_sampler, drop_last=False)
dataloader_test = DataLoader(ozeDataset, batch_size=BATCH_SIZE, sampler = test_sampler, drop_last=False)
# 
print("length of train val and test:", len(dataloader_train), len(dataloader_val), len(dataloader_test))
# 
net = Transformer(d_input, d_model, d_output, q, v, h, N, attention_size=attention_size, dropout=dropout, chunk_mode=chunk_mode, pe=pe).to(device)

Postfix = 'net_'+str(h)+'_'+str(N)+'_'+str(pe)+str(BATCH_SIZE)+'_8-experts-SparseMoE'+f'_{datetime.datetime.now().strftime("%Y_%m_%d_%H%M")}'

torch.cuda.empty_cache()
def runBatch():
    torch.cuda.empty_cache()
    loss_function = OZELoss(alpha=0.3)
    # Save Model
    model_save_path = f'model_{datetime.datetime.now().strftime("%Y_%m_%d__%H%M%S")}.pth'
    # define val loss var
    val_loss_best = np.inf
    # Prepare loss history
    hist_loss = np.zeros(EPOCHS)
    hist_loss_val = np.zeros(EPOCHS)
    running_loss = np.zeros(EPOCHS)
    optimizer = optim.Adam(net.parameters(), lr=LR)
    
    for idx_epoch in range(EPOCHS):
        running_loss = 0
        train_loss = 0
        with tqdm(total = len(train_indices), desc = f"[Epoch {idx_epoch + 1:3d}/{EPOCHS}]") as pbar:
            for idx_batch, (x, y) in enumerate(dataloader_train):
                # # Propagate input
                optimizer.zero_grad()
                # netout = dis_net(x_0.to(device), t)
                netout = net(x.to(device))
                # # Comupte loss
                loss = loss_function(y.to(device), netout)
                # Backpropage loss
                loss.backward()
                # Update weights
                optimizer.step()
                running_loss += loss.item()
                pbar.set_postfix({'running_loss': running_loss/(idx_batch+1)})
                pbar.update(x.shape[0])
                
            train_loss = running_loss / len(train_indices)
            ## adataloader_val in eval mode
            t = torch.tensor([1], device=device)
            val_loss = compute_loss(net, t, dataloader_val, loss_function, device).item()
            pbar.set_postfix({'loss': train_loss, 'val_loss': val_loss})
            hist_loss[idx_epoch] = running_loss
            hist_loss_val[idx_epoch] = val_loss

            if val_loss < val_loss_best:
                val_loss_best = val_loss
                ##Save Model
                print("may be save a model.\n")
                # torch.save(net.state_dict(), model_save_path)

    model_save_path = 'final_saved_model_' + model_save_path
    print(f"model exported to {model_save_path} with loss {val_loss_best:5f}")
    torch.save(net.state_dict(), model_save_path)

    plt.figure(0)
    plt.figure(figsize=(6, 4.5))
    plt.plot(hist_loss, marker='o', markersize=2, label = 'Training loss')
    plt.plot(hist_loss_val, marker='o', markersize=2, label = 'Validation loss')
    plt.xlim((0, EPOCHS+5))
    plt.xlabel('Epochs')
    plt.ylabel('Loss')
    plt.legend(loc='upper right')
    plt.grid(axis='y')
    plt.savefig(DATASET_FILE_NAME[:9] + Postfix, dpi=300)
    print("saving loss figure.")
    # plt.show()
    plt.close('all')

    _ = net.eval()

    # revise the testing code parts
    predictions = np.empty(shape = (len(test_indices), 128, d_output)) ### 96  128  192  

    idx_prediction = 0
    # predict one data frame at a time, reference to train process setting
    with torch.no_grad():
        for x, y in tqdm(dataloader_test, total = len(test_indices)):
            # print("x.shape, y.shape in test: ", x.shape, y.shape,x)
            # predict the testing data
            netout = net(x.to(device)).cpu().numpy()
            # netout = dis_net(x.to(device),t).cpu().numpy()
            # fill predicted data in predictions array
            predictions[idx_prediction :idx_prediction + x.shape[0]] = netout
            idx_prediction += x.shape[0]
    # 
    labels = ozeDataset.labels['X']
    dataset_y = ozeDataset._y[test_indices].numpy()
    # 
    # xx = np.linspace(50.4, 59.9, 96, endpoint=True)
    xx = np.linspace(47.2, 59.9, 128, endpoint=True)  # 96  128  192
    # generating the alpha fig
    y_pred = predictions[:, :, 2]
    y_true = dataset_y[:, :, 2]
    # scale to original data domain
    y_pred = ozeDataset.rescale(y_pred, 2)
    y_true = ozeDataset.rescale(y_true, 2)

    # choice one series data randomly for draw line when predict multilines at a time
    idx = np.random.randint(0, y_true.shape[0])
    # idx value randomly selected
    y_true = y_true[idx]
    y_pred = y_pred[idx]
    plt.figure(1)
    plt.figure(figsize=(6, 4.5))
    plt.plot(xx, y_true, color ='red', marker='o', markersize=2, label='Actual '+'E$_T$'+" on equiphase surface")
    plt.plot(xx, y_pred, color ='dimgray', marker='s',  markersize=2, label='Predicted '+'E$_y$'+" on scanning plane")
    plt.grid(axis='y')
    # plt.xlim((50, 60.2))
    plt.xlim((47.0, 60.2))
    plt.xlabel('Frequency(GHz)')
    plt.ylabel('Angle α(°)')
    plt.legend(loc='upper left')
    plt.grid(axis='y')
    plt.tight_layout()
    # save figs
    # plt.savefig("Equiphase_alpha_"+Postfix, dpi=300)
    plt.savefig(DATASET_FILE_NAME[:9] + "_Equiphase_alpha_map_"+Postfix+"_fig.png", dpi=300)
    # plt.savefig(DATASET_FILE_NAME[:-4] + "_Equiphase_alpha_map_"+Postfix+"_fig.pdf", dpi=300)
    # plt.savefig(DATASET_FILE_NAME[:-4] + "_Equiphase_alpha_map_"+Postfix+"_fig.tif", dpi=300)
    np.savetxt(DATASET_FILE_NAME[:9] + "_Equiphase_alpha_actual_"+Postfix+".csv", y_true, delimiter=',')
    np.savetxt(DATASET_FILE_NAME[:9] + "_Equiphase_alpha_pred_"+Postfix+".csv", y_pred, delimiter=',')
    print("saving alpha fig.1.")
    plt.close('all')


### Postfix = 'FullyConv_128_200_01'
### net = FullyConv(d_input, 128, d_output, dropout=0.2).to(device)
# runBatch()
BATCH_SIZE = 64

# # BATCH_SIZE = 32
# h = 8 # Number of heads
# N = 6 # Number of encoder and decoder to stack
# Postfix = 'net_'+str(h)+'_'+str(N)+'_'+str(pe)+str(BATCH_SIZE)+'_8-experts-SparseMoE'+f'_{datetime.datetime.now().strftime("%Y_%m_%d_%H%M")}'
# net = Transformer(d_input, d_model, d_output, q, v, h, N, attention_size=attention_size, dropout=dropout, chunk_mode=chunk_mode, pe=pe).to(device)
# runBatch()


# Model parameters
d_model = 512 # Lattent dim, 512, 768
h = 12 # Number of heads
N = 8 # Number of encoder and decoder to stack
Postfix = 'net_'+str(h)+'_'+str(N)+'_'+str(pe)+str(BATCH_SIZE)+'_CNN-value'+f'_{datetime.datetime.now().strftime("%Y_%m_%d_%H%M")}'
net = Transformer(d_input, d_model, d_output, q, v, h, N, attention_size=attention_size, dropout=dropout, chunk_mode=chunk_mode, pe=pe).to(device)
runBatch()


# BATCH_SIZE = 64
# h = 10 # Number of heads
# N =8 # Number of encoder and decoder to stack
# Postfix = 'net_'+str(h)+'_'+str(N)+'_'+str(pe)+str(BATCH_SIZE)+'_8-experts-SparseMoE'+f'_{datetime.datetime.now().strftime("%Y_%m_%d_%H%M")}'
# net = Transformer(d_input, d_model, d_output, q, v, h, N, attention_size=attention_size, dropout=dropout, chunk_mode=chunk_mode, pe=pe).to(device)
# runBatch()

# EPOCHS = 300
# Postfix = Postfix + 'cnnLSTM_512_3_01'
# net = cnnLSTM(d_input, 512, d_output, 3, dropout=dropout, bidirectional=False).to(device)
# runBatch()

# Postfix = Postfix + 'cnnGru_512_3_01'
# net = cnnGru(d_input, 512, d_output, 3, dropout=dropout, bidirectional=False).to(device)
# runBatch()