import os
import torch
import colorama
from torch.utils.tensorboard import SummaryWriter
from config_regress import config
from loader.loader import get_loader
import wandb
import time
import numpy as np
import math
import warnings
from scipy.special import lambertw
from sklearn.metrics import r2_score  # Import r2_score
from scipy.stats import pearsonr  # 相关性系数

warnings.filterwarnings("ignore")
colorama.init(autoreset=True)

epoch_num = config['epoch_num']
weight_decay = config['weight_decay']
initial_lr = config['initial_lr']
max_lr = config['max_lr']
warmup_epochs = config['warmup_epochs']
output_folder = "output5"


def solve_up_for_natural_exponential_function_inverse(x):
    # Compute the constant C
    C = (x / 50) * np.exp(1)
    # Compute s using the Lambert W function使用拉伯特函数求解
    s = lambertw(C)
    # Calculate y
    y = 50 * s.real  # Use the real part of s 取解的实数部分
    return y


def solve_down_for__natural_exponential_function_inverse(x):
    # Compute the constant C
    C = (x / 40) * np.exp(1)
    # Compute s using the Lambert W function 使用拉伯特函数求解
    s = lambertw(C)
    # Calculate y
    y = 40 * s.real  # Use the real part of s 取解的实数部分
    return y


def lr_lambda(epoch):
    if epoch < warmup_epochs:
        return (max_lr / initial_lr) * (epoch + 1) / warmup_epochs
    else:
        adjusted_epoch = epoch - warmup_epochs
        adjusted_total_epochs = 2 * epoch_num - warmup_epochs
        progress = adjusted_epoch / adjusted_total_epochs
        return max_lr / initial_lr * 0.5 * (1 + math.cos(math.pi * progress))


def evaluate_statistical_coefficient(predictions, targets):
    """评估所有常用系数的函数"""
    predictions[:2] = solve_up_for_natural_exponential_function_inverse(predictions[:2])
    predictions[2:] = solve_down_for__natural_exponential_function_inverse(predictions[2:])
    targets[:2] = solve_up_for_natural_exponential_function_inverse(targets[:2])
    targets[2:] = solve_down_for__natural_exponential_function_inverse(targets[2:])

    rmse = np.sqrt(np.mean((predictions - targets) ** 2))
    mae = np.mean(np.abs(predictions - targets))
    r2 = r2_score(targets, predictions)  # Calculate R² Score
    # 计算皮尔逊相关系数
    correlation_coefficient, pearson = pearsonr(predictions.ravel(), targets.ravel())
    return rmse, mae, r2, correlation_coefficient, pearson


def train(model, log=None):
    wandb.init(project='OCTA_regress',
               name=f"{time.strftime('%m%d%H%M%S')}_wmp-ep:{warmup_epochs}_lr:{initial_lr}_{max_lr}")
    if log is None:
        log_name = str(time.strftime('%m%d%H%M%S'))
    else:
        # log_name = "fold" + '_' + str(log) + "_regression" + '_' + str(time.strftime('%m%d%H%M%S'))
        log_name = "fold" + '_' + str(log) + "_regression"
    writer = SummaryWriter(f'logs/{log_name}')
    print(colorama.Fore.BLACK + colorama.Back.RED + colorama.Style.BRIGHT + 'tensorboard --logdir=logs')

    train_loader, val_loader, test_loader = get_loader(regression=True, fold_index=log)  # 我们假设您有一个 get_loader() 函数
    print(colorama.Fore.CYAN + f'Train size ~ {len(train_loader) * config["train_batch_size"]}', end='\t')
    print(colorama.Fore.CYAN + f'Valid size ~ {len(val_loader) * config["val_batch_size"]}', end='\t')
    print(colorama.Fore.CYAN + f'Test size ~ {len(test_loader) * config["val_batch_size"]}')

    device = torch.device(config['device'])
    print(colorama.Fore.RED + f'Device: {device}')
    model = model.to(device)

    # 使用适合回归任务的均方误差损失
    criterion = torch.nn.MSELoss()
    optimizer = torch.optim.AdamW(model.parameters(), lr=initial_lr, weight_decay=weight_decay)
    scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)

    def evaluate_testset():
        """在整个验证集上评估"""
        loss_list = []
        predictions_list = []
        targets_list = []

        with torch.no_grad():
            for images, targets in val_loader:
                images = images.to(device)
                targets = targets.to(device).float()  # 确保 targets 为 float 类型
                outputs = model(images)

                loss = criterion(outputs, targets)
                loss_list.append(loss.item())

                predictions_list.extend(outputs.cpu().numpy())
                targets_list.extend(targets.cpu().numpy())

        predictions = np.array(predictions_list)
        targets = np.array(targets_list)
        rmse, mae, r2, correlation_coefficient, pearson = evaluate_statistical_coefficient(predictions, targets)

        # # 在这里输出示例的预测和真实值
        # print("Sample predictions and targets (Test Set):")
        # print(len(predictions), len(targets))
        # for i in range(4):  # 输出前四个样本
        #     print(f"Prediction: {predictions[i]}, Target: {targets[i]}")

        log_test = {
            'test_loss': np.mean(loss_list),
            'test_RMSE': rmse,
            'test_MAE': mae,
            'test_R2': r2,  # Log R² Score
            'test_Pearson': correlation_coefficient  # Log Pearson Score
        }
        return log_test

    batch_idx = 0
    best_rmse = float('inf')

    for epoch in range(epoch_num):
        print(colorama.Fore.GREEN + f'--- Epoch {epoch} ---', end='\t')
        model.train()
        total_loss = 0
        predictions_list = []
        targets_list = []

        for images, labels in train_loader:
            batch_idx += 1
            images = images.to(device)
            labels = labels.to(device).float()  # 确保 labels 为 float 类型
            optimizer.zero_grad()
            outputs = model(images)

            # 如果输出与标签的形状不匹配，调整标签形状
            if outputs.shape != labels.shape:
                labels = labels.view_as(outputs)  # 调整标签形状以匹配输出

            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            total_loss += loss.item()
            predictions_list.extend(outputs.detach().cpu().numpy())
            targets_list.extend(labels.detach().cpu().numpy())

        scheduler.step()
        avg_train_loss = total_loss / len(train_loader)
        print(colorama.Fore.BLUE + f'loss: {avg_train_loss:.5f}', end='\t')
        print(colorama.Fore.MAGENTA + f'lr: {scheduler.get_last_lr()[0]:.6f}', end='\t')
        writer.add_scalar('loss', avg_train_loss, epoch)
        predictions = np.array(predictions_list)
        targets = np.array(targets_list)
        rmse, mae, r2, correlation_coefficient, pearson = evaluate_statistical_coefficient(predictions, targets)

        log_train = {
            'epoch': epoch,
            'batch': batch_idx,
            'train_loss': avg_train_loss,
            'train_RMSE': rmse,
            'train_MAE': mae,
            'train_R2': r2,  # Log R² Score
            'train_Pearson': correlation_coefficient  # Log Pearson Score
        }
        wandb.log(log_train)

        model.eval()
        log_test = evaluate_testset()
        wandb.log(log_test)

        print(colorama.Fore.YELLOW + f'RMSE: {log_test["test_RMSE"]:.4f}', end='\t')
        print(colorama.Fore.YELLOW + f'MAE: {log_test["test_MAE"]:.4f}', end='\t')
        print(colorama.Fore.LIGHTGREEN_EX + f'R2: {log_test["test_R2"]:.4f}', end='\t')  # Print R² Score
        print(colorama.Fore.CYAN + f'Pearson: {log_test["test_Pearson"]:.4f}', end='\t')  # Print Pearson Score

        # 保存最佳模型（以RMSE为准）
        if config['save'] and log_test['test_RMSE'] < best_rmse:
            if not os.path.exists(f'{output_folder}/{log_name}'):
                os.makedirs(f'{output_folder}/{log_name}')
            torch.save(model.state_dict(), f'{output_folder}/{log_name}/best.pth')
            best_rmse = log_test['test_RMSE']
        if config['save'] and epoch % 50 == 0:
            if not os.path.exists(f'{output_folder}/{log_name}'):
                os.makedirs(f'{output_folder}/{log_name}')
            torch.save(model.state_dict(), f'{output_folder}/{log_name}/epoch_{epoch}.pth')

        print(colorama.Fore.RED + f'best RMSE: {best_rmse:.4f}', end='\t')
        print()

        # # 在每个 epoch 后输出训练集的预测和真实值
        # with torch.no_grad():
        #     for images, labels in train_loader:
        #         images = images.to(device)
        #         labels = labels.to(device).float()
        #         outputs = model(images)
        #
        #         # 如果输出与标签的形状不匹配，调整标签形状
        #         if outputs.shape != labels.shape:
        #             labels = labels.view_as(outputs)
        #
        #         predictions = outputs.cpu().numpy()
        #         targets = labels.cpu().numpy()
        #         predictions[:2] = solve_up_for_natural_exponential_function_inverse(predictions[:2])
        #         predictions[2:] = solve_down_for__natural_exponential_function_inverse(predictions[2:])
        #         targets[:2] = solve_up_for_natural_exponential_function_inverse(targets[:2])
        #         targets[2:] = solve_down_for__natural_exponential_function_inverse(targets[2:])
        #
        #         print("Sample predictions and targets (Train Set):")
        #         print(len(predictions), len(targets))
        #         for i in range(4):  # 输出前四个样本
        #             print(f"Prediction: {predictions[i]}, Target: {targets[i]}")
        #
        #             # 只输出一次，跳出循环
        #         break

    if config['save']:
        if not os.path.exists(f'{output_folder}/{log_name}'):
            os.makedirs(f'{output_folder}/{log_name}')
        torch.save(model.state_dict(), f'{output_folder}/{log_name}/last.pth')
    writer.close()
