import os
from tqdm import tqdm
# from data.neu_seg_competition_nation.dataset import Neu_Seg_Competition_Dataset_nation
# from data.neu_seg_competition.dataset import Neu_Seg_Competition_Dataset
from data.neu_seg_competition_nation_self.dataset import Neu_Seg_Competition_Dataset
from torch.utils.data import DataLoader
# from model.UNet import *
# from model.model import *
# E_net_EMA_ZDown_concbam
from model.model_1 import *
from data.neu_seg_competition.pinjie2 import *
from data.neu_seg_competition.pinjie4 import *
from data.neu_seg_competition_nation_self.singleChannel import *
from data.neu_seg_competition.pinjie1_4 import *
import shutil

import logging
import wandb
from utils import *
import torch.nn.functional as F

#忽略警告
import warnings
warnings.filterwarnings("ignore")

scaler = torch.cuda.amp.GradScaler()
if os.path.exists('./save_model/'):
    # 删除文件夹及其内容
    shutil.rmtree('./save_model/')

if __name__ == '__main__':
    epochs = 20000
    batch_size = 256  # 单个GPU的mini_batch
    # lr = 1e-2
    max_lr = 1e-2
    weight_decay = 1e-4
    num_classes = 4
    pin_memory = True
    num_workers = 4
    # model = UNet(3, 4)
    model = self_net()
    # model.load_state_dict(torch.load('./test1_oneline_enhance_dim1_nation_ENet_enhance_b256_random_miou_0.8035534818967184_model_best_epoch_124.pth'))
    model_name = "final_model1_online_enhance_b256_random"
    main2()  #2拼接
    main4()  #4拼接
    main321() #转单通道
    main14()  # 14拼接
    #保证一定的可复现性，cuda不一样可能不能复现
    def set_seed(seed):
        torch.manual_seed(seed)
        np.random.seed(seed)
        if torch.cuda.is_available():
            torch.cuda.manual_seed(seed)
            torch.cuda.manual_seed_all(seed)  # 如果使用多个 GPU
        # # 确保 CUDA 的操作是确定性的
        # torch.backends.cudnn.deterministic = True
        # torch.backends.cudnn.benchmark = False
    # set_seed(42)
    # 创建一个logger
    logger = logging.getLogger(model_name)
    logger.setLevel(logging.INFO)

    # 创建一个handler，用于写入日志文件
    file_handler = logging.FileHandler(f'{model_name}_train.log')
    file_handler.setLevel(logging.INFO)

    # 创建一个handler，用于输出到控制台
    console_handler = logging.StreamHandler()
    console_handler.setLevel(logging.INFO)

    # 定义handler的输出格式
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler.setFormatter(formatter)
    console_handler.setFormatter(formatter)

    # 给logger添加handler
    logger.addHandler(file_handler)
    logger.addHandler(console_handler)


    #启动TF32 加速计算
    torch.backends.cuda.matmul.allow_tf32 = True
    torch.backends.cudnn.allow_tf32 = True


    # 创建设备对象
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    logger.info(f"Device: {device}")

    # 初始化模型
    # model = UNet(in_channels=3, num_classes=num_classes)
    # model = self_net(in_channels=3, num_classes=num_classes)
    # 将模型移动到指定设备
    model.to(device)
    import albumentations as A


    t_train = A.Compose([
        # A.Resize(200, 200, interpolation=cv2.INTER_NEAREST),
        A.HorizontalFlip(),
        A.VerticalFlip(),
        A.Rotate(limit=30, p=0.3), # 旋转-
        A.GridDistortion(p=0.2),# 网格变形
        A.RandomBrightnessContrast(0.2,0.2,p=0.5),# 亮度、对比度
        A.RandomGamma(p=0.2),      # 随机伽马调整-
        A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=15, p=0.5), # 位移、缩放和旋转-
        A.CLAHE(clip_limit=2.0, p=0.3), # 对比度限制自适应直方图均衡化-
        A.Lambda(image=sp_noiseImg, p=0.2)
        ])

    train_dataset = Neu_Seg_Competition_Dataset(data_path='./data/neu_seg_competition_nation_self', data_type='training', transform=t_train)
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers,
                              pin_memory=pin_memory,drop_last=True)
    test_dataset = Neu_Seg_Competition_Dataset(data_path='./data/neu_seg_competition_nation_self', data_type='test')
    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers,
                             pin_memory=pin_memory)


    criterion = torch.nn.CrossEntropyLoss().to(device)
    # optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    optimizer = torch.optim.AdamW(model.parameters(), lr=max_lr, weight_decay=weight_decay)
    sched = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr, epochs=epochs, steps_per_epoch=len(train_loader))
    best_miou = 0
    decrease=1
    not_improve = 0
    wandb.init(
        # set the wandb project where this run will be logged
        project="nation_algorithm_elite_competition_steel_segmentation",
        # track hyperparameters and run metadata
        config={
            "learning_rate": max_lr,
            "architecture": model_name,
            "dataset": "neu_seg_competition_nation_self",
            "epochs": epochs,
        }
    )
    from datetime import datetime
    now = datetime.now()
    formatted_date_time = now.strftime("%Y-%m-%d-%H-%M-%S")
    wandb.run.name = f'{model_name}__{formatted_date_time}'
    dice = False
    lr_adjusted = True  # loss变化时调节lr标志

    for epoch in range(epochs):
        if epochs%5 == 0 and epochs>=50:
            main2()  #2拼接
            main4()  #4拼接
            main321() #转单通道
            main14()  # 14拼接
            train_dataset = Neu_Seg_Competition_Dataset(data_path='./data/neu_seg_competition_nation_self', data_type='training',transform=t_train)
            train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers,
                                      pin_memory=pin_memory,drop_last=True)
        model.train()
        total_loss = torch.zeros(1, device=device)
        total_n = torch.zeros(num_classes, device=device)
        total_u = torch.zeros(num_classes, device=device)
        for data in tqdm(train_loader):
            optimizer.zero_grad()
            image, mask,image_name = data
            image = image.to(device)
            mask = mask.to(device)
            # gt = gt.to(device)
            with torch.amp.autocast(device_type="cuda"):
                pred = model(image)
            #得到 交并 个数
            ni,ui = get_scores(pred, mask ,device)
            ni = ni.to(device)
            ui = ui.to(device)
            total_n += ni
            total_u += ui
            pred = F.softmax(pred, dim=1,dtype=torch.float)  ## 将预测结果转换为概率分布
            true_mask2 = F.one_hot(mask, num_classes=4).permute(0, 3, 1, 2).float()
            # loss = criterion(pred, mask)
            # loss = dice_loss(pred, true_mask2, multiclass=True)

            if not dice:
                loss = lovasz_softmax(pred, mask)
            else:
                loss = dice_loss(pred, true_mask2, multiclass=True)
            # 判断是否切换损失
            # if loss.item() < 5:
            if epoch >600:
                dice = True
                if lr_adjusted:
                    sched = torch.optim.lr_scheduler.OneCycleLR(
                        optimizer,
                        max_lr=max_lr * 0.5,  # 设定较小的最大学习率
                        epochs=epochs,
                        steps_per_epoch=len(train_loader),
                        final_div_factor=10  # 设置最终学习率较小
                    )
                    lr_adjusted = False

            # sched.step()
            # scaler.scale(loss).backward()
            # scaler.step(optimizer)
            # scaler.update()
            scaler.scale(loss).backward()  # 混合精度训练时反向传播
            scaler.step(optimizer)  # 使用 scaler 更新权重
            scaler.update()  # 更新 scaler 状态
            sched.step()  # 调整学习率
            # loss.backward()
            # optimizer.step()
            total_loss += loss
        total_u = torch.where(total_u < 1, 1, total_u)
        total_n = torch.where(total_n < 1, 1, total_n)
        class_iou = total_n / total_u
        tavg_class1_iou = class_iou[1].item()
        tavg_class2_iou = class_iou[2].item()
        tavg_class3_iou = class_iou[3].item()
        tavg_miou = (tavg_class1_iou + tavg_class2_iou + tavg_class3_iou)/3
        logger.info('[train]epoch:{}, loss:{},miou:{}'.format(epoch, total_loss.item(), tavg_miou))
        if not os.path.exists('./save_model'):
            os.mkdir('./save_model')
        if (epoch + 1) % 10 == 0 and epoch > 30:
            torch.save(model.state_dict(), './save_model/{}_model_epoch_{}_loss_{}.pth'.format(model_name,epoch, total_loss.item()))
        # optimizer.zero_grad()
        model.eval()
        # 初始化统计变量
        min_loss=1000
        train_loss = 0
        total_n = torch.zeros(num_classes, device=device)
        total_u = torch.zeros(num_classes, device=device)
        with torch.no_grad():
            for data in tqdm(test_loader):
                image, mask,image_name = data
                # 将数据移动到合适的设备（例如 GPU）
                image = image.to(device)
                mask = mask.to(device)
                # 前向传播
                outputs = model(image)
                ni, ui = get_scores(outputs, mask,device)
                ni = ni.to(device)
                ui = ui.to(device)
                total_n += ni
                total_u += ui

                #loss
                pred = F.softmax(outputs, dim=1)
                true_mask2 = F.one_hot(mask, num_classes=num_classes).permute(0, 3, 1, 2).float()
                # loss = dice_loss(pred, true_mask2, multiclass=True)
                if not dice:
                    loss = lovasz_softmax(pred, mask)
                else:
                    loss = dice_loss(pred, true_mask2, multiclass=True)
                train_loss += loss

        # 计算平均 mIoU
        total_u = torch.where(total_u < 1, 1, total_u)
        class_iou = total_n / total_u
        avg_class1_iou = class_iou[1].item()
        avg_class2_iou = class_iou[2].item()
        avg_class3_iou = class_iou[3].item()
        avg_miou = (avg_class1_iou + avg_class2_iou + avg_class3_iou) / 3
        if avg_miou > best_miou:
            best_miou = avg_miou
            if not os.path.exists('./save_model_best'):
                os.mkdir('./save_model_best')
            if avg_miou>0.5:
                torch.save(model.state_dict(), './save_model_best/{}_miou_{}_model_best_epoch_{}.pth'.format(model_name,avg_miou,epoch))

        #基于miou的早停止
        # if avg_miou > best_miou:
        #     best_miou = avg_miou
        #     not_improve = 0
        #     print(f'New best mIoU: {best_miou}')
        #     # Save model logic...
        # else:
        #     not_improve += 1
        #     print(f'No improvement in mIoU for {not_improve} epochs')
        #     if not_improve == 10:
        #         print('early stopping 10')
        #         break

        #基于loss的早停 如果当前验证损失高于最小损失 增加 not_improve 计数器
        # if (train_loss/len(test_loader)) > min_loss:
        #     not_improve += 1
        #     print(f'Loss Not Decrease for {not_improve} time')
        #     if not_improve == 30000:
        #         print('Loss not decrease for 30000 times, Stop Training')
        #         break
        # else:
        #     not_improve = 0
        #     min_loss = (train_loss/len(test_loader))

        logger.info('[valid]epoch:{}, avg_miou:{},avg_class1_iou{},avg_class2_iou{},avg_class3_iou{}'.format(epoch, avg_miou, avg_class1_iou, avg_class2_iou, avg_class3_iou))
        wandb.log({"miou": avg_miou,"class1_iou": avg_class1_iou,"class2_iou": avg_class2_iou,"class3_iou": avg_class3_iou,"best_miou": best_miou,"loss": total_loss,"train_miou": tavg_miou, "train_class1_iou": tavg_class1_iou, "train_class2_iou": tavg_class2_iou,"train_class3_iou": tavg_class3_iou})

    wandb.finish()