import logging
import os
import random
import sys
from datetime import datetime
from shutil import copy

import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
from sklearn.metrics import classification_report, accuracy_score, confusion_matrix
import seaborn as sns


def count_parameters(model):
    return sum(p.numel() for p in model.parameters() if p.requires_grad)


class AverageMeter(object):
    """Computes and stores the average and current value"""

    def __init__(self):
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count


def fix_randomness(SEED):
    random.seed(SEED)
    np.random.seed(SEED)
    torch.manual_seed(SEED)
    torch.cuda.manual_seed(SEED)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False


def _logger(logger_name, level=logging.DEBUG):
    """
    Method to return a custom logger with the given name and level
    """
    logger = logging.getLogger(logger_name)
    logger.setLevel(level)
    format_string = "%(message)s"
    log_format = logging.Formatter(format_string)
    # Creating and adding the console handler
    console_handler = logging.StreamHandler(sys.stdout)
    console_handler.setFormatter(log_format)
    logger.addHandler(console_handler)
    # Creating and adding the file handler
    file_handler = logging.FileHandler(logger_name, mode='a')
    file_handler.setFormatter(log_format)
    logger.addHandler(file_handler)
    return logger


def starting_logs(data_type, exp_log_dir, seed_id):
    log_dir = os.path.join(exp_log_dir, "_seed_" + str(seed_id))
    os.makedirs(log_dir, exist_ok=True)
    log_file_name = os.path.join(log_dir, f"logs_{datetime.now().strftime('%Y_%m_%d_%H_%M_%S')}.log")
    logger = _logger(log_file_name)
    logger.debug("=" * 45)
    logger.debug(f'Dataset: {data_type}')
    logger.debug("=" * 45)
    logger.debug(f'Seed: {seed_id}')
    logger.debug("=" * 45)
    return logger, log_dir


def save_checkpoint(exp_log_dir, model, dataset, dataset_configs, hparams, status):
    save_dict = {
        "dataset": dataset,
        "configs": dataset_configs.__dict__,
        "hparams": dict(hparams),
        "model": model.state_dict()
    }
    # save classification report
    save_path = os.path.join(exp_log_dir, f"checkpoint_{status}.pt")

    torch.save(save_dict, save_path)


def _calc_metrics(pred_labels, true_labels, classes_names):
    pred_labels = np.array(pred_labels).astype(int)
    true_labels = np.array(true_labels).astype(int)

    r = classification_report(true_labels, pred_labels, target_names=classes_names, digits=6, output_dict=True)
    accuracy = accuracy_score(true_labels, pred_labels)

    return accuracy * 100, r["macro avg"]["f1-score"] * 100


def _save_metrics(pred_labels, true_labels, log_dir, status, costTime):
    pred_labels = np.array(pred_labels).astype(int)
    true_labels = np.array(true_labels).astype(int)

    # 1. 生成并保存分类报告
    r = classification_report(true_labels, pred_labels, digits=4, output_dict=True,
                              target_names=['N', 'S', 'V', 'F'])
    df = pd.DataFrame(r)
    accuracy = accuracy_score(true_labels, pred_labels)
    df["accuracy"] = accuracy
    # df = df * 100
    # save classification report
    file_name = f"classification_report_{status}.xlsx"
    report_Save_path = os.path.join(log_dir, file_name)
    df.to_excel(report_Save_path)
    # 2. 计算混淆矩阵
    cm = confusion_matrix(true_labels, pred_labels)
    cal_and_save_statistic(cm, log_dir, status, costTime)
    # 3.将混淆矩阵保存为文本文件(可选，便于后续程序读取)
    cm_df = pd.DataFrame(cm)
    cm_file_name = f"confusion_matrix_{status}.csv"
    cm_save_path = os.path.join(log_dir, cm_file_name)
    cm_df.to_csv(cm_save_path, index=False)  # 不保存索引
    # 4. 生成并保存混淆矩阵可视化图
    plt.figure(figsize=(10, 8))
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', cbar=False)
    plt.xlabel('Predicted Labels')
    plt.ylabel('True Labels')
    plt.title(f'Confusion Matrix - {status}')
    # 保存图片
    img_file_name = f"confusion_matrix_{status}.png"
    img_save_path = os.path.join(log_dir, img_file_name)
    plt.savefig(img_save_path, bbox_inches='tight', dpi=300)
    plt.close()  # 关闭图形，避免内存泄漏


import collections


def calNums(acc_i):
    # 四舍五入并格式化每个元素
    rounded_acc_i = np.round(acc_i, decimals=5)
    # 将结果转换为字符串并用逗号分隔
    formatted_acc_i = ', '.join(f'{x:.5f}' for x in rounded_acc_i)
    return formatted_acc_i


def cal_and_save_statistic(cm, log_dir, status, costTime):
    # 按列的方向求和，TP+FP
    total_pred = cm.sum(0)
    # 按行的方向求和,TP+FN
    total_true = cm.sum(1)
    class_num = len(total_pred)
    acc_SP = cm.diagonal().sum() / cm.sum()

    # 计算 Precision
    pre_i = [cm[i, i] / max(1, total_pred[i]) if total_pred[i] != 0 else 0 for i in range(class_num)]
    # 计算 Recall
    rec_i = [cm[i, i] / max(1, total_true[i]) if total_true[i] != 0 else 0 for i in range(class_num)]

    # 计算 Accuracy 和 Specificity
    acc_i = [cm[i, i] / max(1, total_true[i]) if total_true[i] != 0 else 0 for i in range(class_num)]
    spe_i = [(cm.sum() - cm[i].sum() - cm[:, i].sum() + cm[i, i]) / max(1, cm.sum() - total_true[i]) if cm.sum() -
                                                                                                        total_true[
                                                                                                            i] != 0 else 0
             for i in range(class_num)]
    # 计算 F1 Score
    F1_i = [2 * pre_i[i] * rec_i[i] / max(1e-6, pre_i[i] + rec_i[i]) if (pre_i[i] + rec_i[i]) != 0 else 0 for i in
            range(class_num)]
    # 计算总体 Precision 和 Recall
    total_precision = np.mean(pre_i)
    total_recall = np.mean(rec_i)
    # 计算总体 F1 Score
    average_f1_score = 2 * total_precision * total_recall / (total_precision + total_recall)
    acc_i = np.array(acc_i)
    spe_i = np.array(spe_i)
    pre_i = np.array(pre_i)
    rec_i = np.array(rec_i)
    F1_i = np.array(F1_i)
    acc_i[np.isnan(acc_i)] = 0
    spe_i[np.isnan(spe_i)] = 0
    pre_i[np.isnan(pre_i)] = 0
    rec_i[np.isnan(rec_i)] = 0
    F1_i[np.isnan(F1_i)] = 0

    file_path5 = log_dir + f"\\confusion_matrix_{status}.txt"
    with open(file_path5, "w") as file:
        file.write('acc_SP is : {acc_SP:.4f}'.format(acc_SP=acc_SP) + "\n")
        file.write('f1_score is : {f1_score:.4f}'.format(f1_score=average_f1_score) + "\n")
        # file.write('acc_i is : {acc_i}'.format(acc_i=calNums(acc_i)) + "\n")
        file.write('spe_i is : {spe_i}'.format(spe_i=calNums(spe_i)) + "\n")
        file.write('pre_i is : {pre_i}'.format(pre_i=calNums(pre_i)) + "\n")
        file.write('rec_i is : {rec_i}'.format(rec_i=calNums(rec_i)) + "\n")
        file.write('F1_i is : {F1_i}'.format(F1_i=calNums(F1_i)) + "\n")
        # file.write('test_acc is : {test_acc:.4f}'.format(test_acc=test_acc) + "\n")
        file.write('confusion matrix' + "\n")
        for row in cm:
            line = '\t\t'.join(str(item) for item in row)
            file.write(line + '\n')
        file.write('test_time is : {test_time} sec.'.format(test_time=costTime) + "\n")

    return acc_SP, average_f1_score, list(spe_i), list(pre_i), list(rec_i), list(F1_i)


def to_device(input, device):
    if torch.is_tensor(input):
        return input.to(device=device)
    elif isinstance(input, str):
        return input
    elif isinstance(input, collections.abc.Mapping):
        return {k: to_device(sample, device=device) for k, sample in input.items()}
    elif isinstance(input, collections.abc.Sequence):
        return [to_device(sample, device=device) for sample in input]
    else:
        raise TypeError("Input must contain tensor, dict or list, found {type(input)}")


def copy_Files(destination):
    destination_dir = os.path.join(destination, "MODEL_BACKUP_FILES")
    os.makedirs(destination_dir, exist_ok=True)
    copy("main.py", os.path.join(destination_dir, "main.py"))
    copy("dataloader.py", os.path.join(destination_dir, "dataloader.py"))
    copy(f"models/lwCETModel.py", os.path.join(destination_dir, f"lwCETModel.py"))
    copy(f"models/models.py", os.path.join(destination_dir, f"models.py"))
    copy(f"models/MLLA.py", os.path.join(destination_dir, f"MLLA.py"))
    copy(f"trainer.py", os.path.join(destination_dir, f"trainer.py"))
    copy("utils.py", os.path.join(destination_dir, "myutils.py"))
    copy(f"configs/data_configs.py", os.path.join(destination_dir, f"data_configs.py"))
    copy(f"configs/hparams.py", os.path.join(destination_dir, f"hparams.py"))


def _plot_umap(model, data_loader, device, save_dir):
    import umap
    import umap.plot
    from matplotlib.colors import ListedColormap
    classes_names = ['N', 'S', 'V', 'F', 'Q']

    font = {'family': 'Times New Roman',
            'weight': 'bold',
            'size': 17}
    plt.rc('font', **font)

    with torch.no_grad():
        # Source flow
        data = data_loader.dataset.x_data.float().to(device)
        labels = data_loader.dataset.y_data.view((-1)).long()
        out = model[0](data)
        features = model[1](out)

    if not os.path.exists(os.path.join(save_dir, "umap_plots")):
        os.mkdir(os.path.join(save_dir, "umap_plots"))

    # cmaps = plt.get_cmap('jet')
    model_reducer = umap.UMAP()  # n_neighbors=3, min_dist=0.3, metric='correlation', random_state=42)
    embedding = model_reducer.fit_transform(features.detach().cpu().numpy())

    # Normalize the labels to [0, 1] for colormap
    norm_labels = labels / 4.0

    # Create a new colormap by extracting the first 5 colors from "Paired"
    paired = plt.cm.get_cmap('Paired', 12)  # 12 distinct colors
    new_colors = [paired(0), paired(1), paired(2), paired(4),
                  paired(6)]  # Skip every second color, but take both from the first pair
    new_cmap = ListedColormap(new_colors)

    print("Plotting UMAP ...")
    plt.figure(figsize=(16, 10))
    # scatter = plt.scatter(embedding[:, 0], embedding[:, 1], c=labels,  s=10, cmap='Spectral')
    scatter = plt.scatter(embedding[:, 0], embedding[:, 1], c=norm_labels, cmap=new_cmap, s=15)

    handles, _ = scatter.legend_elements(prop='colors')
    plt.legend(handles, classes_names, title="Classes")
    file_name = "umap_.png"
    fig_save_name = os.path.join(save_dir, "umap_plots", file_name)
    plt.xticks([])
    plt.yticks([])
    plt.savefig(fig_save_name, bbox_inches='tight')
    plt.close()


def save_data(file_path, datas, avg=False):
    # 指定保存文件路径
    # 将损失值写入 txt 文件
    with open(file_path, "w") as file1:
        for data in datas:
            file1.write(str(data) + "\n")
        if avg:
            valid_time_array = np.array(datas)
            file1.write('avg valid_time is : {avg_time} sec.'.format(avg_time=np.mean(valid_time_array)) + "\n")
            file1.write('max valid_time is : {max_time} sec.'.format(max_time=np.max(valid_time_array)) + "\n")
            file1.write('min valid_time is : {min_time} sec.'.format(min_time=np.min(valid_time_array)) + "\n")


def printPlt(num, data, path, xLabel, yLabel, title):
    # 绘制第一张图  训练loss
    plt.figure(num)
    plt.plot(data)
    plt.xlabel(xLabel)
    plt.ylabel(yLabel)
    plt.title(title)
    # 保存图形为 PNG 格式图片
    plt.savefig(path, dpi=300, bbox_inches='tight')
