from torch.utils.data import DataLoader, TensorDataset

import math
import os
import platform
import sys
import numpy as np

import torch.nn as nn
import torch.nn.functional as F
import torch
import matplotlib.pyplot as plt
from tools.data_generator import generate_logic_data
from tools.remove_temp_file import remove_path
from bdtime import Time
import evaluate
from tools.set_hf_proxies import set_hf_proxies

from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, mean_squared_error, \
    mean_absolute_percentage_error, mean_absolute_error


tt = Time()


set_hf_proxies()


MODEL_SAVE_DIR = 'tempdir/models'
LOCAL_METRICS_PATH = r'E:\projects\pycharmProjects\evaluate-main\metrics'

# judge the platform is macos?
if platform.platform().lower().startswith('macos'):
    LOCAL_METRICS_PATH = '/Users/mac/PycharmProjects/evaluate-main/metrics'


def _get_pure_metric_name(metric_name):
    _metric_name = metric_name
    if '\\' in metric_name or '/' in metric_name:
        _metric_name = os.path.basename(metric_name)
    return _metric_name


def _get_filtered_y(y_true, y_pred, metric_name):
    if metric_name == 'mape':
        y_true_filtered = np.array([y for y in y_true if y != 0])
        y_pred_filtered = np.array([p for y, p in zip(y_true, y_pred) if y != 0])

        # y_pred_filtered = np.array([y for y, p in zip(y_pred, y_true) if not(y == 0 and p != 0)])
        # y_true_filtered = np.array([p for y, p in zip(y_pred, y_true) if not(y == 0 and p != 0)])

        # print(f'--- y_pred_filtered: {y_pred_filtered}')
        # print(f'--- y_true_filtered: {y_true_filtered}')

        # y_pred_filtered = np.array([y for y in y_pred if y != 0])
        # y_true_filtered = np.array([p for y, p in zip(y_pred, y_true) if y != 0])
    else:
        y_true_filtered, y_pred_filtered = y_true, y_pred
    return y_true_filtered, y_pred_filtered


@torch.no_grad()
def test_by_evaluate_metrics(y_pred, y_true, is_classification=False, score_name=None, debug=False, local_metrics_path=LOCAL_METRICS_PATH):
    import warnings
    warnings.filterwarnings('ignore', category=FutureWarning, message=".*squared.*")

    if is_classification:
        y_pred = (y_pred >= 0.5).long()

    # mean_absolute_percentage_error(y_pred, y_true)
    # mean_squared_error(y_pred, y_true)

    # use_evaluate = True  # 使用evaluate或者sklearn
    use_evaluate = False  # 使用evaluate或者sklearn
    metric_names = ['accuracy', 'f1', 'precision', 'recall', 'mse'] if is_classification else ['mape', 'mae', 'mse', 'rmse']
    if use_evaluate:
        if local_metrics_path:
            assert os.path.exists(local_metrics_path), f'*** local_metrics_path[{local_metrics_path}]不存在?'
            metric_names = [os.path.join(local_metrics_path, metric_name) for metric_name in metric_names]

        metric_objects = {_get_pure_metric_name(metric): evaluate.load(metric) for metric in metric_names}
    else:
        def rmse(y_true, y_pred) -> float:
            return np.sqrt(mean_squared_error(y_true, y_pred))

        metric_dc = {
            'accuracy': accuracy_score,
            'f1': f1_score,
            'precision': precision_score,
            'recall': recall_score,
            'mse': mean_squared_error,
            'mape': mean_absolute_percentage_error,
            'mae': mean_absolute_error,
            'rmse': rmse
        }
        metric_objects = {_get_pure_metric_name(metric): metric_dc.get(metric) for metric in metric_names}
        # metric_objects = metric_dc
        # metric_objects.keys()
        # mean_absolute_error(line_pred, line_test)
        # mean_squared_error(line_pred, line_test)

    # evaluate.load(metric_names[0])
    # evaluate.load(['mae', 'mse'][0])

    if isinstance(y_true, torch.Tensor):
        y_true = y_true.view(-1)
        y_true = y_true.cpu().detach().numpy()
    else:
        y_true = np.array(y_true).reshape(-1)

    if isinstance(y_pred, torch.Tensor):
        y_pred = y_pred.view(-1)
        y_pred = y_pred.cpu().detach().numpy()
    else:
        y_pred = np.array(y_pred).reshape(-1)

    results = {}
    for metric_name, metric in metric_objects.items():
        if hasattr(metric, 'compute'):
            result = metric.compute(predictions=y_pred.tolist(), references=y_true.tolist())
            results[metric_name] = round(result[metric_name], 4)
        else:
            result = metric(*_get_filtered_y(y_true, y_pred, metric_name))
            results[metric_name] = round(result, 4)

    if score_name is None:
        score_name = _get_pure_metric_name(metric_names[0])

    score = results[score_name] if is_classification else 1 - results[score_name]
    score = max(0, min(1, score))
    score = round(score, 4)

    if debug:
        from bdtime import show_json
        show_json(results)
        print(f'--- is_classification: {is_classification} --- score[{f"1 - {score_name}" if not is_classification else score_name}]: {score}')

    return score, results


def test(data, model, evaluate_func=None, is_classification=False, score_name=None, debug=False):
    evaluate_func = evaluate_func if evaluate_func else test_by_evaluate_metrics

    x_test, y_test = data
    model.eval()
    with torch.no_grad():
        y_pred = model(x_test)
        y_pred = y_pred[0] if isinstance(y_pred, tuple) else y_pred
        y_pred.resize_as_(y_test)

        score, results = evaluate_func(y_pred, y_test, is_classification=is_classification, score_name=score_name)

        # print('------ test result:', results)

        # if evaluate_func:
        # else:
        #     if is_classification:
        #         y_pred = y_pred.squeeze(-1) > 0.5
        #         score = round(torch.sum(y_pred == y_test).item() / len(y_test), 3)
        #     else:

    return score, results


def train(data: tuple, model, optimizer, criterion, total_epoch=10000, log_interval=0.1, experiment_name=None,
          save=False, device=None, scheduler=None, scheduler_step_size=0.1,
          evaluate_func=None, is_classification=False, score_name=None,
          batch_size=32):
    if device is None:
        # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        import platform
        # use `mps` device if is macos system
        if platform.system() == 'Darwin':
            device = 'mps'
        else:
            device = 'cuda' if torch.cuda.is_available() else 'cpu'
    print('--- device:', device)

    # x_train, y_train, x_test, y_test = data
    assert len(data) == 4, f'data should be a tuple of (x_train, x_test, y_train, y_test)'
    x_train, x_test, y_train, y_test = data
    mean_train = x_train.cpu().detach().mean().item()
    # x_train.shape, y_train.shape, x_test.shape, y_test.shape

    model.to(device)
    model.train()

    # 初始化最小验证损失
    best_score = -float('inf')
    best_idx = -1
    best_results = None

    if save:
        assert experiment_name, '*** save为True时, 必须指定实验名称experiment_name'

    best_model_save_name = f'best.pth'
    newest_model_save_name = f'newest.pth'

    if experiment_name and save:
        model_save_dir = os.path.join(MODEL_SAVE_DIR, experiment_name)
        remove_path(model_save_dir)
        os.makedirs(model_save_dir, exist_ok=True)
        model_save_path = os.path.join(model_save_dir, best_model_save_name)
        newest_model_save_path = os.path.join(model_save_dir, newest_model_save_name)

    if log_interval < 1:
        log_interval = int(total_epoch * log_interval)

    x_test = x_test.to(device)
    y_test = y_test.to(device)

    if scheduler_step_size < 1:
        scheduler_step_size = int(total_epoch * scheduler_step_size)

    if batch_size != 0:
        train_dataset = TensorDataset(x_train, y_train)
        train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)

        for epoch in range(total_epoch):  # 遍历每个 epoch
            model.train()  # 设置模型为训练模式

            for batch_idx, (data, target) in enumerate(train_loader):
                # 如果使用 GPU，将数据和标签移动到 GPU 上
                batch_x, batch_y = data.to(device), target.to(device)

                # 前向传播
                output = model(batch_x)

                if hasattr(model, 'get_loss'):
                    from tools.lstm import LstmModel
                    model: LstmModel
                    loss_val = model.get_loss(output, batch_y, criterion=criterion, is_output=True)
                elif not isinstance(output, tuple):
                    loss_val = criterion(output.squeeze(-1), batch_y)
                else:
                    raise TypeError(f'*** type output error: [{output}], output: {output}')

                    # from tools.moe import MyMoEClassifier
                    # model: MyMoEClassifier

                # 计算损失
                # loss = loss_function(output, target)

                optimizer.zero_grad()
                loss_val.backward()
                optimizer.step()

            flag__scheduler_step = False
            if scheduler and scheduler_step_size > 0 and epoch % scheduler_step_size == 0:
                scheduler.step()
                current_lr = scheduler.get_last_lr()[0]
                flag__scheduler_step = True

            if epoch % log_interval == 0 or epoch == total_epoch - 1:
                score, test_results = test((x_test, y_test), model,
                                           evaluate_func=evaluate_func, is_classification=is_classification,
                                           score_name=score_name)
                msg = f'epoch: {epoch} --- tt: {tt.now()} --- loss: {loss_val.item(): .3e}, test_results: {test_results}, score: {score}'
                if flag__scheduler_step and scheduler:
                    msg += f' --- lr: {current_lr: .3e}'
                print(msg)

                # if loss_val.item() < best_loss:
                if score > best_score:
                    best_score = score
                    best_idx = epoch
                    best_results = test_results
                    print(f'------ best_score: {best_score}, best_idx: {best_idx}')
                    # 保存最佳模型
                    if experiment_name and save:
                        torch.save(model.state_dict(), model_save_path)

                # newest_model_save_path
                if experiment_name and save:
                    torch.save(model.state_dict(), newest_model_save_path)

    else:
        x_train = x_train.to(device)
        y_train = y_train.to(device)

        for epoch in range(total_epoch):
            model.train()

            output = model(x_train)

            if not isinstance(output, tuple):
                y_pred = output
                loss_val = criterion(y_pred.squeeze(-1), y_train)
            else:
                # from tools.moe import MyMoEClassifier
                # model: MyMoEClassifier
                # from tools.lstm import LstmModel
                # model: LstmModel
                loss_val = model.get_loss(output, y_train, criterion=criterion, is_output=True)

            # loss_val += torch.mean(torch.abs((mean_train - output))) * 0.1  # 均值惩罚
            # loss_val += torch.mean(torch.abs(output - 0.5)) * 0.1  # 和非0惩罚
            optimizer.zero_grad()
            loss_val.backward()
            optimizer.step()

            flag__scheduler_step = False
            if scheduler and scheduler_step_size > 0 and epoch % scheduler_step_size == 0:
                scheduler.step()
                current_lr = scheduler.get_last_lr()[0]
                flag__scheduler_step = True

            if epoch % log_interval == 0 or epoch == total_epoch - 1:
                score, test_results = test((x_test, y_test), model,
                             evaluate_func=evaluate_func, is_classification=is_classification, score_name=score_name)
                msg = f'epoch: {epoch} --- tt: {tt.now()} --- loss: {loss_val.item(): .3e}, test_results: {test_results}, score: {score}'
                if flag__scheduler_step and scheduler:
                    msg += f' --- lr: {current_lr: .3e}'
                print(msg)

                # if loss_val.item() < best_loss:
                if score > best_score:
                    best_score = score
                    best_idx = epoch
                    best_results = test_results
                    print(f'------ best_score: {best_score}, best_idx: {best_idx}')
                    # 保存最佳模型
                    if experiment_name and save:
                        torch.save(model.state_dict(), model_save_path)

                # newest_model_save_path
                if experiment_name and save:
                    torch.save(model.state_dict(), newest_model_save_path)

    print(f'\n========= best_score: {best_score}, best_idx: {best_idx}, best_results: {best_results}')
    if experiment_name and save:
        print(f'    ------ total_cost_time: {tt.now()} --- 保存最佳模型[{model_save_path}] --- best_score: {best_score}, best_idx: {best_idx}')


if __name__ == '__main__':
    is_classification = 0
    # y_pred = torch.tensor([0.9, 0.0, 0.8, 0, 1])  # 预测的概率值
    # y_true = torch.tensor([1.0, 0.0, 1.0, 1, 0]).long()  # 真实标签

    y_pred = torch.FloatTensor([1, 0, 0, 1, 1])  # 预测的概率值
    y_true = torch.FloatTensor([1, 0, 1, 1, 0])  # 真实标签
    # y_true += 1e-16

    # y_pred_filtered: [1 0 1 1]
    # --- y_true_filtered: [1 0 1 0]

    # y_pred = (y_pred >= 0.5).long()

    y_pred, y_true = np.array(y_pred), np.array(y_true)

    print(f'y_true: {y_true}')
    print(f'y_pred: {y_pred}')

    # print(f'--- mean_absolute_percentage_error(y_pred, y_true) --- : {mean_absolute_percentage_error(y_true, y_pred)}')
    # mape_1 = mean_absolute_percentage_error(*_get_filtered_y(y_true, y_pred, metric_name="mape"))
    # print(f'--- mean_absolute_percentage_error(y_pred, y_true) _get_filtered_y: {mape_1}')
    # exit()

    local_metrics_path \
        = r'E:\projects\pycharmProjects\evaluate-main\metrics'
    res = test_by_evaluate_metrics(y_pred, y_true, is_classification=is_classification, local_metrics_path=local_metrics_path)
    print(res, f'--- now: {tt.now()}')

