import collections
import datetime
import math
import os
import warnings
import time
import numpy as np
import sklearn.exceptions
import torch
import torch.nn.functional as F

from dataloader import get_class_weight
from models.CBLstm import CBLstm
from models.CLAttention import create_model
from models.CNN_LSTM_Attention import CNN_LSTM_Attention
from models.LightX3ECG import LightX3ECG
from models.ResNet1D import resnet34_1d
# import matplotlib.pyplot as plt

from models.lwCETModel import lwCET
from models.models import ecgTransForm
from myutils.dataset import SignalDataset

warnings.filterwarnings("ignore", category=sklearn.exceptions.UndefinedMetricWarning)
warnings.simplefilter(action='ignore', category=FutureWarning)
import pandas as pd
from configs.data_configs import get_dataset_class
from configs.hparams import get_hparams_class
from utils import AverageMeter, to_device, _save_metrics, copy_Files, save_data, printPlt
from utils import fix_randomness, starting_logs, save_checkpoint, _calc_metrics
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader


# 病人间
class trainer2(object):
    def __init__(self, args):
        # dataset parameters
        self.dataset = args.dataset
        self.dataType = args.dataType
        self.dataPath = args.dataPath
        self.seed_id = args.seed_id

        self.device = torch.device(args.device)

        # Exp Description
        self.run_description = f"{args.run_description}_{datetime.datetime.now().strftime('%Y_%m_%d_%H_%M')}"
        self.experiment_description = args.experiment_description

        # paths
        self.home_path = os.getcwd()
        self.save_dir = os.path.join(os.getcwd(), args.save_dir)
        self.exp_log_dir = os.path.join(self.save_dir, self.experiment_description, self.run_description)
        os.makedirs(self.exp_log_dir, exist_ok=True)

        self.data_path = args.data_path

        # Specify runs
        self.num_runs = args.num_runs

        # get dataset and base model configs
        self.dataset_configs, self.hparams_class = self.get_configs()

        # Specify hparams
        self.hparams = self.hparams_class.train_params

    def get_configs(self):
        dataset_class = get_dataset_class(self.dataset)
        hparams_class = get_hparams_class("supervised")
        return dataset_class(), hparams_class()

    def load_data(self, data_set, data_type, data_path):

        # self.train_dl, self.val_dl, self.test_dl, self.cw_dict = \
        #     data_generator(self.data_path, data_type, self.hparams)
        if data_type == 'brn':
            train_data = pd.read_csv(data_path, header=None).values
            raw_train, raw_valid, _, _ = train_test_split(train_data, list(train_data[:, 0]), test_size=0.3,
                                                          random_state=self.seed_id, stratify=list(train_data[:, 0]))
            raw_valid, raw_test, _, _ = train_test_split(raw_valid, list(raw_valid[:, 0]), test_size=0.66,
                                                         random_state=self.seed_id, stratify=list(raw_valid[:, 0]))
            print(train_data.shape)
            print(raw_train.shape)
            print(raw_valid.shape)
            print(raw_test.shape)
        else:
            train_file = 'data/mit/brn52_268/data_4-train-251020.csv'
            train_data = pd.read_csv(train_file, header=None).values
            # 验证数据
            test_file = 'data/mit/brn52_268/data_4-test-251020.csv'
            test_data = pd.read_csv(test_file, header=None).values
            # TODO:random_state
            raw_train = train_data
            raw_valid, raw_test, _, _ = train_test_split(test_data, list(test_data[:, 0]), test_size=0.5,
                                                         random_state=self.seed_id, stratify=list(test_data[:, 0]))
            print(train_data.shape)
            print(test_data.shape)

        train_data = SignalDataset(raw_train)
        valid_data = SignalDataset(raw_valid)
        test_data = SignalDataset(raw_test)
        # batch_size
        batch_size = 128
        self.train_dl = DataLoader(dataset=train_data,
                                   batch_size=batch_size,
                                   num_workers=2,
                                   shuffle=True)
        self.val_dl = DataLoader(dataset=valid_data,
                                 batch_size=batch_size,
                                 num_workers=2,
                                 shuffle=True)
        self.test_dl = DataLoader(dataset=test_data,
                                  batch_size=batch_size,
                                  num_workers=2,
                                  shuffle=True)
        self.cw_dict = get_class_weight(train_data._label)

    def calc_results_per_run(self):
        acc, f1 = _calc_metrics(self.pred_labels, self.true_labels, self.dataset_configs.class_names)
        return acc, f1

    def train(self):
        self.metrics = {'accuracy': [], 'f1_score': []}

        # fixing random seed
        fix_randomness(int(self.seed_id))

        # Logging
        self.logger, self.scenario_log_dir = starting_logs(self.dataset, self.exp_log_dir, self.seed_id)
        self.logger.debug(self.hparams)

        # Load data
        self.load_data(self.dataset, self.dataType, self.dataPath)

        # model = ecgTransForm(configs=self.dataset_configs, hparams=self.hparams)
        # model = lwCET(configs=self.dataset_configs, hparams=self.hparams, add_fea=True)
        # model = CBLstm(configs=self.dataset_configs, hparams=self.hparams)
        # model = CNN_LSTM_Attention(configs=self.dataset_configs, hparams=self.hparams)
        # model = resnet34_1d(num_classes=4, input_channels=1)
        # model = create_model(
        #     model_type="cnn_lstm_attention",
        #     input_channels=1,
        #     seq_length=268,
        #     hidden_size=128,
        #     num_layers=2,
        #     num_classes=4,
        #     dropout=0.3
        # )
        model = LightX3ECG(268,4)

        model.to(self.device)

        self.optimizer = torch.optim.Adam(
            model.parameters(),
            lr=self.hparams["learning_rate"],
            weight_decay=self.hparams["weight_decay"],
            betas=(0.9, 0.99)
        )

        weights = [float(value) for value in self.cw_dict.values()]
        # Now convert the list of floats to a numpy array, then to a PyTorch tensor
        weights_array = np.array(weights).astype(np.float32)  # Ensuring the correct dtype
        weights_tensor = torch.tensor(weights_array).to(self.device)
        self.cross_entropy = torch.nn.CrossEntropyLoss(weight=weights_tensor)

        best_acc = 0
        best_f1 = 0

        train_accs = []
        valid_accs = []
        train_f1s = []
        train_losses = []
        valid_f1s = []
        train_time = []
        valid_time = []
        # training..
        for epoch in range(1, self.hparams["num_epochs"] + 1):
            model.train()
            start = time.time()
            self.pred_labels = np.array([])
            self.true_labels = np.array([])
            # Average meters
            loss_avg_meters = collections.defaultdict(lambda: AverageMeter())

            for step, batches in enumerate(self.train_dl):
                # print("srtp:" + str(step))
                batches = to_device(batches, self.device)

                data = batches[0].float()
                feature1 = batches[1].float()
                # print( data.shape)
                labels = batches[2].long()

                # ====== Source =====================
                self.optimizer.zero_grad()
                data = data.unsqueeze(1)
                # Src original features
                logits = model(data)
                # logits = model(data, feature1)

                # Cross-Entropy loss
                x_ent_loss = self.cross_entropy(logits, labels)

                x_ent_loss.backward()
                self.optimizer.step()

                losses = {'Total_loss': x_ent_loss.item()}
                for key, val in losses.items():
                    loss_avg_meters[key].update(val, self.hparams["batch_size"])
                self.pred_labels = np.append(self.pred_labels, logits.max(1)[1].cpu().numpy())
                self.true_labels = np.append(self.true_labels, labels.data.cpu().numpy())

            cost_time = time.time() - start
            train_time.append(cost_time)
            # self.evaluate(model, self.val_dl)
            tr_acc, tr_f1 = self.calc_results_per_run()
            train_accs.append(tr_acc)
            train_f1s.append(tr_f1)
            train_losses.append(loss_avg_meters['Total_loss'].avg)
            # logging
            self.logger.debug(f'[Epoch : {epoch}/{self.hparams["num_epochs"]}]')
            for key, val in loss_avg_meters.items():
                self.logger.debug(f'{key}\t: {val.avg:2.4f}')
            self.logger.debug(f'TRAIN: Acc:{tr_acc:2.4f} \t F1:{tr_f1:2.4f}, cost_time: {cost_time}]')

            # VALIDATION part
            start = time.time()
            self.evaluate(model, self.val_dl)
            cost_time = time.time() - start
            valid_time.append(cost_time)
            ts_acc, ts_f1 = self.calc_results_per_run()
            valid_accs.append(ts_acc)
            valid_f1s.append(ts_f1)
            if ts_f1 > best_f1:  # save best model based on best f1.
                best_f1 = ts_f1
                best_acc = ts_acc
                save_checkpoint(self.exp_log_dir, model, self.dataset, self.dataset_configs, self.hparams, "best")
                _save_metrics(self.pred_labels, self.true_labels, self.exp_log_dir, "validation_best", cost_time)

            # logging
            self.logger.debug(
                f'VAL  : Acc:{ts_acc:2.4f} \t F1:{ts_f1:2.4f} (best: {best_f1:2.4f}), cost_time: {cost_time}]')
            self.logger.debug(f'-------------------------------------')

        # LAST EPOCH
        # _save_metrics(self.pred_labels, self.true_labels, self.exp_log_dir, "validation_last", cost_time)
        # self.logger.debug("LAST EPOCH PERFORMANCE on validation set...")
        # self.logger.debug(f'Acc:{ts_acc:2.4f} \t F1:{ts_f1:2.4f}')

        self.logger.debug(":::::::::::::")
        # BEST EPOCH
        self.logger.debug("BEST EPOCH PERFORMANCE on validation set ...")
        self.logger.debug(f'Acc:{best_acc:2.4f} \t F1:{best_f1:2.4f}')
        save_checkpoint(self.exp_log_dir, model, self.dataset, self.dataset_configs, self.hparams, "last")

        save_data(self.exp_log_dir + "\\train_losses.txt", train_losses)
        save_data(self.exp_log_dir + "\\train_accs.txt", train_accs)
        save_data(self.exp_log_dir + "\\train_f1.txt", train_f1s)
        # save_data(self.exp_log_dir + "valid_losses.txt", valid_losses)
        save_data(self.exp_log_dir + "\\valid_accs.txt", valid_accs)
        save_data(self.exp_log_dir + "\\valid_f1.txt", valid_f1s)
        save_data(self.exp_log_dir + "\\train_time.txt", train_time, True)
        save_data(self.exp_log_dir + "\\valid_time.txt", valid_time, True)

        printPlt(1, train_losses, self.exp_log_dir + '\\train_losses.png', 'epoch', 'train loss', 'loss')
        printPlt(2, train_accs, self.exp_log_dir + '\\train_acc.png', 'epoch', 'train accuracy', 'accuracy')
        printPlt(3, train_f1s, self.exp_log_dir + '\\train_f1.png', 'epoch', 'train f1', 'f1')
        printPlt(4, valid_accs, self.exp_log_dir + '\\valid_acc.png', 'epoch', 'valid accuracy', 'valid accuracy')
        printPlt(2, valid_f1s, self.exp_log_dir + '\\valid_f1.png', 'epoch', 'valid f1', 'f1')
        printPlt(5, train_time, self.exp_log_dir + '\\train_time.png', 'epoch', 'train time', 'train time')
        printPlt(6, valid_time, self.exp_log_dir + '\\valid_time.png', 'epoch', 'valid time', 'valid time')
        # plt.show()

        # TESTING
        print(" === Evaluating on TEST set ===")
        test_model_name = os.path.join(self.exp_log_dir, "checkpoint_best.pt")
        # model1 = ecgTransForm(configs=self.dataset_configs, hparams=self.hparams)
        # model1 =  lwCET(configs=self.dataset_configs, hparams=self.hparams, add_fea=True)
        # model1 = CBLstm(configs=self.dataset_configs, hparams=self.hparams)
        # model1 = CNN_LSTM_Attention(configs=self.dataset_configs, hparams=self.hparams)
        # model1 = resnet34_1d(num_classes=4, input_channels=1)
        # model1 = create_model(
        #     model_type="cnn_lstm_attention",
        #     input_channels=1,
        #     seq_length=268,
        #     hidden_size=128,
        #     num_layers=2,
        #     num_classes=4,
        #     dropout=0.3
        # )
        model1 = LightX3ECG(268, 4)
        chkpoint = torch.load(test_model_name, map_location='cuda:0' if torch.cuda.is_available() else 'cpu')
        model1.load_state_dict(chkpoint['model'])
        model1 = model1.to(self.device)

        start = time.time()
        self.evaluate(model1, self.test_dl)
        cost_time = time.time() - start
        test_acc, test_f1 = self.calc_results_per_run()
        _save_metrics(self.pred_labels, self.true_labels, self.exp_log_dir, "test_best", cost_time)
        self.logger.debug(f'Acc:{test_acc:2.4f} \t F1:{test_f1:2.4f}, cost_time: {cost_time}]')
        # 文件复制到  experiments_logs/${experiment_description}参数目录下
        copy_Files(self.exp_log_dir)  # save a copy of training files

    def evaluate(self, model, dataset):
        model.to(self.device).eval()

        total_loss_ = []

        self.pred_labels = np.array([])
        self.true_labels = np.array([])

        with torch.no_grad():
            for batches in dataset:
                batches = to_device(batches, self.device)
                data = batches[0].float()
                feature1 = batches[1].float()
                # print( data.shape)
                labels = batches[2].long()

                # forward pass
                data = data.unsqueeze(1)
                predictions = model(data)
                # predictions = model(data, feature1)

                # compute loss
                loss = F.cross_entropy(predictions, labels)
                total_loss_.append(loss.item())
                pred = predictions.detach().argmax(dim=1)  # get the index of the max log-probability

                self.pred_labels = np.append(self.pred_labels, pred.cpu().numpy())
                self.true_labels = np.append(self.true_labels, labels.data.cpu().numpy())

        self.trg_loss = torch.tensor(total_loss_).mean()  # average loss
