import collections
import datetime
import math
import os
import warnings
import time
import numpy as np
import sklearn.exceptions
import torch
import torch.nn.functional as F

from models.lwCETModel import lwCET
from models.models import ecgTransForm
from myutils.dataset import SignalDataset

warnings.filterwarnings("ignore", category=sklearn.exceptions.UndefinedMetricWarning)
warnings.simplefilter(action='ignore', category=FutureWarning)
import pandas as pd
from configs.data_configs import get_dataset_class
from configs.hparams import get_hparams_class
from utils import AverageMeter, to_device, _save_metrics, copy_Files, save_data, printPlt
from utils import fix_randomness, starting_logs, save_checkpoint, _calc_metrics
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader


# 病人间,只用于测试
class test2(object):
    def __init__(self, args):
        # dataset parameters
        self.dataset = args.dataset
        self.seed_id = args.seed_id

        self.device = torch.device(args.device)

        # Exp Description
        self.run_description = f"{args.run_description}_{datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S')}"
        self.experiment_description = args.experiment_description

        # paths
        self.home_path = os.getcwd()
        self.save_dir = os.path.join(os.getcwd(), args.save_dir)
        self.exp_log_dir = os.path.join(self.save_dir, self.experiment_description, self.run_description)
        os.makedirs(self.exp_log_dir, exist_ok=True)

        self.data_path = args.data_path

        # Specify runs
        self.num_runs = args.num_runs

        # get dataset and base model configs
        self.dataset_configs, self.hparams_class = self.get_configs()

        # Specify hparams
        self.hparams = self.hparams_class.train_params

    def get_configs(self):
        dataset_class = get_dataset_class(self.dataset)
        hparams_class = get_hparams_class("supervised")
        return dataset_class(), hparams_class()

    def load_data(self, data_type):
        # 验证数据
        test_file = 'data/mit/bak/train-250923.csv'
        test_data = pd.read_csv(test_file, header=None).values
        print(test_data.shape)

        # TODO:random_state
        raw_test = test_data
        test_data = SignalDataset(raw_test)
        # batch_size
        batch_size = 128
        self.test_dl = DataLoader(dataset=test_data,
                                  batch_size=batch_size,
                                  num_workers=2,
                                  shuffle=True)

    def calc_results_per_run(self):
        acc, f1 = _calc_metrics(self.pred_labels, self.true_labels, self.dataset_configs.class_names)
        return acc, f1

    def train(self):
        # 文件复制到  experiments_logs/${experiment_description}参数目录下
        copy_Files(self.exp_log_dir)  # save a copy of training files

        self.metrics = {'accuracy': [], 'f1_score': []}

        # fixing random seed
        fix_randomness(int(self.seed_id))

        # Logging
        self.logger, self.scenario_log_dir = starting_logs(self.dataset, self.exp_log_dir, self.seed_id)
        self.logger.debug(self.hparams)

        # Load data
        self.load_data(self.dataset)

        # TESTING
        print(" === Evaluating on TEST set ===")
        test_model_name = os.path.join("E:\\3_code_self\\ecg-detection\\experiments_logs\\Exp1\\lwCET_2025_09_23_22_53_29", "checkpoint_best.pt")
        # model1 = ecgTransForm(configs=self.dataset_configs, hparams=self.hparams)
        model1 = lwCET(configs=self.dataset_configs, hparams=self.hparams)
        chkpoint = torch.load(test_model_name, map_location='cuda:0' if torch.cuda.is_available() else 'cpu')
        model1.load_state_dict(chkpoint['model'])
        model1 = model1.to(self.device)

        start = time.time()
        self.evaluate(model1, self.test_dl)
        cost_time = time.time() - start
        test_acc, test_f1 = self.calc_results_per_run()
        _save_metrics(self.pred_labels, self.true_labels, self.exp_log_dir, "test_best", cost_time)
        self.logger.debug(f'Acc:{test_acc:2.4f} \t F1:{test_f1:2.4f}, cost_time: {cost_time}]')

    def evaluate(self, model, dataset):
        model.to(self.device).eval()

        total_loss_ = []

        self.pred_labels = np.array([])
        self.true_labels = np.array([])

        with torch.no_grad():
            for batches in dataset:
                batches = to_device(batches, self.device)
                data = batches[0].float()
                # print( data.shape)
                labels = batches[2].long()

                # forward pass
                data = data.unsqueeze(1)
                predictions = model(data)

                # compute loss
                loss = F.cross_entropy(predictions, labels)
                total_loss_.append(loss.item())
                pred = predictions.detach().argmax(dim=1)  # get the index of the max log-probability

                self.pred_labels = np.append(self.pred_labels, pred.cpu().numpy())
                self.true_labels = np.append(self.true_labels, labels.data.cpu().numpy())

        self.trg_loss = torch.tensor(total_loss_).mean()  # average loss


def get_class_weight(labels_dict):
    cw = labels_dict.cpu().numpy().tolist()
    cw_dict = {}
    for i in range(len(np.unique(labels_dict.numpy()))):
        cw_dict[i] = cw.count(i + 1)
    total = sum(cw_dict.values())
    max_num = max(cw_dict.values())
    mu = 1.0 / (total / max_num)
    class_weight = dict()
    for key, value in cw_dict.items():
        score = math.log(mu * total / float(value))
        class_weight[key] = score if score > 1.0 else 1.0
    return class_weight
