import os
import torch
import colorama
from sklearn.metrics import roc_auc_score
from torch.utils.tensorboard import SummaryWriter
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from config import config
from loader.loader import get_loader, get_class_weights
import wandb
import time
import numpy as np
import math
# 忽略烦人的红色提示
import warnings

warnings.filterwarnings("ignore")
colorama.init(autoreset=True)
epoch_num, weight_decay = config['epoch_num'], config['weight_decay']
initial_lr, max_lr, warmup_epochs = config['initial_lr'], config['max_lr'], config['warmup_epochs']
output_folder = "output5"


# 自定义学习率 schedule 函数
def lr_lambda(epoch):
    if epoch < warmup_epochs:
        # warm-up阶段：线性上升学习率
        x = (max_lr / initial_lr) * (epoch + 1) / warmup_epochs
        return x
    else:
        # 延长的余弦退火阶段，直到训练结束
        adjusted_epoch = epoch - warmup_epochs
        adjusted_total_epochs = epoch_num - warmup_epochs
        progress = adjusted_epoch / adjusted_total_epochs
        # 衰减阶段：使用余弦退火策略
        x = max_lr / initial_lr * 0.5 * (1 + math.cos(math.pi * progress))
        return x


def train(model, log=None):
    wandb.init(project='OCTA', name=f"{time.strftime('%m%d%H%M%S')}_wmp-ep:{warmup_epochs}_lr:{initial_lr}_{max_lr}")
    # 训练参数
    if log is None:
        log_name = str(time.strftime('%m%d%H%M%S'))
    else:
        # log_name = "fold" + '_' + str(log) + " classification" + '_' + str(time.strftime('%m%d%H%M%S'))
        log_name = "fold" + '_' + str(log) + "_classification"
    writer = SummaryWriter(f'logs/{log_name}')
    # 打印tensorboard命令
    print(colorama.Fore.BLACK + colorama.Back.RED + colorama.Style.BRIGHT + 'tensorboard --logdir=logs')

    train_loader, val_loader, test_loader = get_loader(fold_index=log)
    print(colorama.Fore.CYAN + f'Train size ~ {len(train_loader) * config["train_batch_size"]}', end='\t')
    print(colorama.Fore.CYAN + f'Valid size ~ {len(val_loader) * config["val_batch_size"]}', end='\t')
    print(colorama.Fore.CYAN + f'Test size ~ {len(test_loader) * config["val_batch_size"]}')
    log_train = {'epoch': 0, 'batch': 0}
    log_test = {'epoch': 0}

    device = torch.device(config['device'])
    print(colorama.Fore.RED + f'Device: {device}')
    model = model.to(device)
    weights = get_class_weights()
    # loss_func = torch.nn.BCEWithLogitsLoss(weight=weights)
    # 交叉熵损失函数
    criterion = torch.nn.CrossEntropyLoss(weight=weights)
    # weights.to(device)
    criterion.to(device)
    # optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
    # scheduler = torch.optim.lr_scheduler.
    # osineAnnealingLR(opt, T_max=epoch_num)
    # 学习率降低策略
    # scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 15, 0.9)
    # 优化器
    optimizer = torch.optim.AdamW(model.parameters(), lr=initial_lr,
                                  weight_decay=weight_decay)  # 配置 warm-up 和 total 训练 epoch
    # 使用 LambdaLR 调度器
    scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)

    def evaluate_testset():
        '''
        在整个测试集上评估，返回分类评估指标日志
        '''
        loss_list = []
        labels_list = []
        preds_list = []
        with torch.no_grad():
            for images, labels in val_loader:  # 生成一个 batch 的数据和标注
                images = images.to(device)
                labels = labels.to(device)
                outputs = model(images)  # 输入模型，执行前向预测
                probs = torch.nn.functional.softmax(outputs, dim=1)
                # 获取整个测试集的标签类别和预测类别
                preds = probs.argmax(dim=1)
                preds = preds.cpu().numpy()
                loss = criterion(outputs, labels)  # 由 logit，计算当前 batch 中，每个样本的平均交叉熵损失函数值
                loss = loss.detach().cpu().numpy()
                outputs = outputs.detach().cpu().numpy()
                labels = labels.detach().cpu().numpy()
                loss_list.append(loss)
                labels_list.extend(labels)
                preds_list.extend(preds)
        log_test['epoch'] = epoch
        # 计算分类评估指标
        log_test['test_loss'] = np.mean(loss_list)
        log_test['test_accuracy'] = accuracy_score(labels_list, preds_list)
        log_test['test_precision'] = precision_score(labels_list, preds_list, average='macro')
        log_test['test_recall'] = recall_score(labels_list, preds_list, average='macro')
        log_test['test_f1-score'] = f1_score(labels_list, preds_list, average='macro')
        log_test['test_AUC'] = roc_auc_score(labels_list, preds_list)
        return log_test

    batch_idx = 0
    best = 0.
    for epoch in range(epoch_num):
        print(colorama.Fore.GREEN + f'--- Epoch {epoch} ---', end='\t')
        model.train()
        total_loss = 0
        for images, labels in train_loader:
            batch_idx += 1
            images, labels = images.to(device), labels.to(device)
            optimizer.zero_grad()
            outputs = model(images)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            total_loss += loss.item()

            # 记录训练过程,获取当前 batch 的标签类别和预测类别
            _, preds = torch.max(outputs, 1)  # 获得当前 batch 所有图像的预测类别
            preds = preds.cpu().numpy()
            loss = loss.detach().cpu().numpy()
            outputs = outputs.detach().cpu().numpy()
            labels = labels.detach().cpu().numpy()
            log_train['epoch'] = epoch
            log_train['batch'] = batch_idx
            # 计算分类评估指标
            log_train['train_loss'] = loss
            log_train['train_accuracy'] = accuracy_score(labels, preds)
            wandb.log(log_train)
        scheduler.step()
        print(colorama.Fore.BLUE + f'loss: {total_loss / len(train_loader):.5f}', end='\t')
        print(colorama.Fore.MAGENTA + f'lr: {scheduler.get_last_lr()[0]:.6f}', end='\t')
        writer.add_scalar('loss', total_loss, epoch)
        model.eval()
        log_test = evaluate_testset()
        wandb.log(log_test)
        all_probs = []
        all_labels = []
        acc_num = total = 0

        with torch.no_grad():
            for images, labels in val_loader:
                images, labels = images.to(device), labels.to(device)
                outputs = model(images)
                probs = torch.nn.functional.softmax(outputs, dim=1)
                preds = probs.argmax(dim=1)

                acc_num += torch.sum(preds == labels).cpu().item()
                total += len(labels)
                positive_probs = probs[:, 1]
                all_probs.extend(positive_probs.cpu().numpy())  # 收集所有预测概率
                all_labels.extend(labels.cpu().numpy())  # 收集所有真实标签
        # 计算准确率
        acc = acc_num / total * 100
        print(colorama.Fore.YELLOW + f'acc: {acc:.2f}%', end='\t')
        writer.add_scalar('acc', acc, epoch)

        # 计算AUC
        roc_auc = roc_auc_score(all_labels, all_probs)
        print(colorama.Fore.YELLOW + f'AUC: {roc_auc:.4f}', end='\t')
        writer.add_scalar('AUC', roc_auc, epoch)

        if config['save'] and roc_auc > best:
            if not os.path.exists(f'{output_folder}/{log_name}'):
                os.makedirs(f'{output_folder}/{log_name}')
            torch.save(model.state_dict(), f'{output_folder}/{log_name}/best.pth')
        if config['save'] and epoch % 20 == 0:
            if not os.path.exists(f'{output_folder}/{log_name}'):
                os.makedirs(f'{output_folder}/{log_name}')
            torch.save(model.state_dict(), f'{output_folder}/{log_name}/epoch_{epoch}.pth')

        best = max(roc_auc, best)
        print(colorama.Fore.RED + f'best AUC: {best:.4f}', end='\t')
        print()

    if config['save']:
        if not os.path.exists(f'{output_folder}/{log_name}'):
            os.makedirs(f'{output_folder}/{log_name}')
        torch.save(model.state_dict(), f'{output_folder}/{log_name}/last.pth')
    writer.close()
