# -*- coding: utf-8 -*-
import os
import sys

import numpy as np
import pandas as pd
import math
import pymongo
import torch
from torch import nn

import ParallelNet_1d  # type: ignore

sys.path.append("/gpfs/scratch/chgwang/XI/Scripts/Refactoring_1/getData")
import FedSeq  # type: ignore
import matplotlib
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from tqdm import tqdm
import time
matplotlib.use("Agg")

os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
# def accuracy_sco
def map_conti_discr(labels, theta):
    discrete_labels = torch.zeros_like(labels)
    ids_positive = labels >= theta
    discrete_labels[ids_positive] = 1
    ids_negative = labels < theta
    discrete_labels[ids_negative] = 0
    discrete_labels = discrete_labels.int()
    return(discrete_labels)

# evaluation model
def eval_model(model, data_iter, loss, device):
    model.eval()
    loss_list = []
    acc_list = []
    for out_seq, label in data_iter:
        out_seq = out_seq.to(device)
        label = label.to(device)
        # out_seq = torch.unsqueeze(out_seq, 1)
        with torch.no_grad():
            modeled_label = model(out_seq)
            l = loss(modeled_label, label)
            loss_list.append(l.cpu().item())
            modeled_label = map_conti_discr(modeled_label, theta=0.5)
            label = label.int()
            # the acc is int not a tensor.
            acc = accuracy_score(label.cpu(), modeled_label.cpu())
            acc_list.append(acc)
    return(np.mean(loss_list), np.mean(acc_list))
# initization model
def xavier_init_weights(m):
    if type(m) == nn.Linear:
        torch.nn.init.xavier_uniform_(m.weight)
    if type(m) == nn.GRU:
        for param in m._flat_weights_names:
            if "weight" in param:
                torch.nn.init.xavier_uniform_(m._parameters[param])
    if type(m) == nn.Conv2d:
        torch.nn.init.xavier_uniform_(m.weight)

def train_model(ids, lr, l2_penalty, batch_size, num_worker, epochs):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    offsetList = range(0, 400, 5)
    delimit_0 = int(len(ids) * 0.9)
    delimit_1 = int(len(ids) * 0.8)
    train_ids = ids[:delimit_0]
    train_times = math.ceil(len(train_ids) / batch_size) * len(offsetList)
    valid_ids = ids[delimit_1:]
    valid_times = math.ceil(len(valid_ids) / batch_size) * len(offsetList)
    # inception Class
    model = ParallelNet_1d.ParallelNet_1d(output_size=6)
    # initialize the model
    model.apply(xavier_init_weights)
    if torch.cuda.is_available():
        model = model.cuda()
        model = nn.DataParallel(model, device_ids=[0,1,2,3])
    loss = nn.BCELoss()
    lambdaLR = lambda epoch: 0.90 **(epoch // 2)
    optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=l2_penalty)
    scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, 
                    lr_lambda=lambdaLR)

    # set data storage location
    scriptPath = os.path.realpath(__file__)
    basedir = os.path.dirname(os.path.dirname(scriptPath))
    basedir = os.path.dirname(basedir)
    basedir = os.path.dirname(basedir)
    datadir = os.path.join(basedir, "DataBase")
    modeldir = os.path.join(datadir, "Model_1d")
    val_loss_epochs = []
    tra_loss_epochs = []
    val_acc_epochs = []
    tra_acc_epochs = []
    for epoch in range(epochs):
        start = time.time()
        print(f"Epoch: {epoch+1}")
        train_pbar = tqdm(total=train_times)
        model.train()
        for offset in offsetList:
            train_iter = FedSeq.gen_seq(train_ids, batch_size, num_worker, offset=offset)
            for out_seq, label in train_iter:
                train_pbar.update(1)
                out_seq = out_seq.to(device)
                label = label.to(device)
                # out_seq = torch.unsqueeze(out_seq, 1)
                optimizer.zero_grad()
                modeled_label = model(out_seq)
                l = loss(modeled_label, label)
                l.backward()
                optimizer.step()
        train_pbar.close()

        # evaluate the model.
        tra_loss_list = []
        val_loss_list = []
        tra_accuracy_list = []
        val_acccuracy_list = []
        for offset in offsetList:
            train_iter = FedSeq.gen_seq(train_ids,
                                        batch_size,
                                        num_worker,
                                        offset=offset)
            valid_iter = FedSeq.gen_seq(valid_ids, 
                                        batch_size,
                                        num_worker,
                                        offset=offset)
            tra_loss, tra_accuracy = eval_model(model, 
                                                train_iter,
                                                loss,
                                                device)
            val_loss, val_accuracy = eval_model(model, 
                                                valid_iter, 
                                                loss,
                                                device)
            tra_loss_list.append(tra_loss)
            val_loss_list.append(val_loss)
            tra_accuracy_list.append(tra_accuracy)
            val_acccuracy_list.append(val_accuracy)
        scheduler.step()
        mean_tra_loss = np.mean(tra_loss_list) 
        mean_val_loss = np.mean(val_loss_list)
        mean_tra_acc = np.mean(tra_accuracy_list)
        mean_val_acc = np.mean(val_acccuracy_list)
        modelPath = os.path.join(modeldir, "PN-%d-%.3f-%.3f.pt"%(epoch+1, mean_tra_acc, mean_val_acc))
        # torch.save(model.module.state_dict(), modelPath)
        # for single-gpu
        torch.save(model.state_dict(), modelPath)
        print(f"Epoch:{epoch+1} training loss: {mean_tra_loss:.3f} validation loss: {mean_val_loss:.3f}")
        tra_acc_epochs.append(mean_tra_acc)
        val_acc_epochs.append(mean_val_acc)
        tra_loss_epochs.append(mean_tra_loss)
        val_loss_epochs.append(mean_val_loss)
        end = time.time()
        print(f"Duration time {end - start}")
    df_path = os.path.join(modeldir, "evaluation.csv")  
    df = pd.DataFrame(index=range(1, epochs+1), 
        columns=["tra_acc", "val_acc", "tra_loss", "val_loss"])
    df.loc[:,"tra_acc"] = tra_acc_epochs
    df.loc[:,"val_acc"] = val_acc_epochs
    df.loc[:,"tra_loss"] = tra_loss_epochs
    df.loc[:,"val_loss"] = val_loss_epochs
    df.to_csv(df_path, index=False)

if __name__ == "__main__":
    model = ParallelNet_1d.ParallelNet_1d(output_size=6)
    lr = 1e-3
    l2_penalty = 0
    batch_size = 64
    num_worker = 4
    num_epochs = 100

    client = pymongo.MongoClient("mongodb://127.0.0.1:27017/")
    db  = client["Power_Fault"]
    col_sour = db["data_sour"]
    # all ID of the data, split data by ID
    ids = col_sour.distinct("_id")
    # ids = ids[:64]
    # set random seed for re-produce.
    np.random.seed(42)
    np.random.shuffle(ids)
    train_model(ids, lr, l2_penalty, batch_size, num_worker, num_epochs)
    