import os
import time

import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm
from pathlib import Path
from work.net import Unet
from work.loss_function import CE_Loss, Dice_loss, LossHistory
from work.dataset import DeeplabDataset, deeplab_dataset_collate
from work.metrics import f_score


def get_lr(optimizer):  # 获取优化器里面的学习率
    for param_group in optimizer.param_groups:
        return param_group['lr']


def fit_one_epoch(net, epoch, epoch_size, epoch_size_val, gen, genval, Epoch, cuda,lr_scheduler):  # 传了一堆参数，得仔细看
    net = net.train()  # 进入训练模式，虽然前面弄过一次了但是后面会变成val模式
    total_loss = 0  # 训练总损失
    total_f_score = 0  # 训练总分数

    val_toal_loss = 0  # 验证总损失
    val_total_f_score = 0  # 验证总分数
    start_time = time.time()  # 记录开始时间
    with tqdm(total=epoch_size, desc=f'Epoch {epoch + 1}/{Epoch}', postfix=dict, mininterval=0.3) as pbar:  # 进度条
        for iteration, batch in enumerate(gen):
            if iteration >= epoch_size:  # 一个epoch结束
                break
            imgs, pngs, labels = batch  # 拆分出图和标记
            # 转化为tensor格式，期间不用进行梯度跟踪，如果GPU加速可用就放入GPU中
            with torch.no_grad():
                imgs = torch.from_numpy(imgs).type(torch.FloatTensor)
                pngs = torch.from_numpy(pngs).type(torch.FloatTensor).long()
                labels = torch.from_numpy(labels).type(torch.FloatTensor)
                if cuda:
                    imgs = imgs.cuda()
                    pngs = pngs.cuda()
                    labels = labels.cuda()

            optimizer.zero_grad()  # 梯度清零
            outputs = net(imgs)  # 获取网络输出结果
            loss = CE_Loss(outputs, pngs, num_classes=NUM_CLASSES)  # 这个loss貌似不是论文写的那种方式，到时候搞一个对比一下
            if dice_loss:  # 附加loss，如果要算的话，dice_loss原理详见https://zhuanlan.zhihu.com/p/86704421
                main_dice = Dice_loss(outputs, labels)
                loss = loss + main_dice
            # 计算分数，不需要梯度追踪
            with torch.no_grad():
                # -------------------------------#
                #   计算f_score
                # -------------------------------#
                _f_score = f_score(outputs, labels)
            if hasattr(torch.cuda, 'empty_cache'):
                torch.cuda.empty_cache()
            loss.backward()  # 反向传播获得梯度信息
            optimizer.step()  # 对参数进行更新
            lr_scheduler.step()#这个放里面看看
            # 统计一次epoch的loss和function
            total_loss += loss.item()
            total_f_score += _f_score.item()

            waste_time = time.time() - start_time  # 获取总耗时
            # 进度条设定
            pbar.set_postfix(**{'total_loss': total_loss / (iteration + 1),
                                'f_score': total_f_score / (iteration + 1),
                                's/step': waste_time,
                                'lr': get_lr(optimizer)})
            pbar.update(1)

            start_time = time.time()
    # 进行验证，过程差不多，只是不需要参数更新，差不多整个放到no_graad下
    net.eval()#这个应该要加上的吧，用到了BN
    print('Start Validation')
    with tqdm(total=epoch_size_val, desc=f'Epoch {epoch + 1}/{Epoch}', postfix=dict, mininterval=0.3) as pbar:
        for iteration, batch in enumerate(genval):
            if iteration >= epoch_size_val:
                break
            imgs, pngs, labels = batch
            with torch.no_grad():
                imgs = torch.from_numpy(imgs).type(torch.FloatTensor)
                pngs = torch.from_numpy(pngs).type(torch.FloatTensor).long()
                labels = torch.from_numpy(labels).type(torch.FloatTensor)
                if cuda:
                    imgs = imgs.cuda()
                    pngs = pngs.cuda()
                    labels = labels.cuda()

                outputs = net(imgs)
                val_loss = CE_Loss(outputs, pngs, num_classes=NUM_CLASSES)
                if dice_loss:
                    main_dice = Dice_loss(outputs, labels)
                    val_loss = val_loss + main_dice
                # -------------------------------#
                #   计算f_score
                # -------------------------------#
                _f_score = f_score(outputs, labels)

                val_toal_loss += val_loss.item()
                val_total_f_score += _f_score.item()

            pbar.set_postfix(**{'total_loss': val_toal_loss / (iteration + 1),
                                'f_score': val_total_f_score / (iteration + 1),
                                'lr': get_lr(optimizer)})
            pbar.update(1)
    # 善后工作
    loss_history.append_loss(total_loss / (epoch_size + 1), val_toal_loss / (epoch_size_val + 1))  # 记录loss分数到文件中
    print('Finish Validation')
    # 打印一些信息
    print('Epoch:' + str(epoch + 1) + '/' + str(Epoch))
    print('Total Loss: %.4f || Val Loss: %.4f ' % (total_loss / (epoch_size + 1), val_toal_loss / (epoch_size_val + 1)))

    print('Saving state, iter:', str(epoch + 1))
    # 保存本次epoch的参数到文件中
    torch.save(model.state_dict(), 'logs/Epoch%d-Total_Loss%.4f-Val_Loss%.4f.pth' % (
    (epoch + 1), total_loss / (epoch_size + 1), val_toal_loss / (epoch_size_val + 1)))


if __name__ == "__main__":
    # 准备模型和记录地址
    log_dir = "logs/"  # 结果记录的地址
    # ------------------------------#
    #   输入图片的大小
    # ------------------------------#
    inputs_size = [512, 512, 3]
    # ---------------------#
    #   分类个数+1
    #   2+1
    # ---------------------#
    NUM_CLASSES = 4#先改为4个类
    # --------------------------------------------------------------------#
    #   建议选项：
    #   种类少（几类）时，设置为True
    #   种类多（十几类）时，如果batch_size比较大（10以上），那么设置为True
    #   种类多（十几类）时，如果batch_size比较小（10以下），那么设置为False
    # ---------------------------------------------------------------------#
    dice_loss = True  # 等会要改成带权重的模式对比一下
    # -------------------------------#
    #   主干网络预训练权重的使用
    # -------------------------------#
    pretrained = True # 是否用迁移学习
    # -------------------------------#
    #   Cuda的使用
    # -------------------------------#
    # Cuda = torch.cuda.is_available()  # 目前还用不了,GPU受不了
    Cuda=True
    # ------------------------------#
    #   数据集路径
    # ------------------------------#
    #dataset_path = "VOCdevkit/VOC2007/"
    GPU_large=True#GPU显存是否足够
    dataset_path="./data/"
    assert os.path.exists(dataset_path),"数据集目录不存在"
    model = Unet(num_classes=NUM_CLASSES, in_channels=inputs_size[-1], pretrained=pretrained).train()  # 上来先进入训练模式

    loss_history = LossHistory("logs/")  # 损失记录函数
    # -------------------------------------------#
    #   权值文件的下载请看README
    #   权值和主干特征提取网络一定要对应
    # -------------------------------------------#
    # if pretrained==True:#使用迁移学习就加载预训练的参数
    #     model_path = r"model_data/vgg16-397923af.pth"  # 预训练模型参数的地址
    #     print('Loading weights into state dict...')
    #     device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')  # 虽然上面指定了是否用GPU但是这里还是先把设备搞好再看情况
    #     model_dict = model.state_dict()  # 获取现在模型的参数
    #     pretrained_dict = torch.load(model_path, map_location=device)  # 获取与训练的参数
    #     pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) == np.shape(v)}  # 将预训练参数更新为模型中有的（不确定，等会试着跑跑）
    #     model_dict.update(pretrained_dict)  # 进行一波参数更新
    #     model.load_state_dict(model_dict)  # 把模型参数搞到模型中
    #     print('Finished!')  # 提示预训练参数更新完毕

    if Cuda:
        net = torch.nn.DataParallel(model)
        cudnn.benchmark = True  # Benchmark模式会提升计算速度，但是由于计算中有随机性，每次网络前馈结果略有差异。
        net = net.cuda()  # 放到GPU中
    # 准备数据的前奏
    train_lines=[]
    train_dataset_path_list=os.listdir(dataset_path)#先列出数据集下面的所有文件
    for path in train_dataset_path_list:
        if os.path.exists(dataset_path+path) and Path(dataset_path+path).is_dir():#判断是否是目录
            with open(os.path.join(dataset_path,path,"files_train.csv"),"r") as p:#打开各个训练集下的
                tmp=[i.strip().split(",") for i in p.readlines() if i.strip().split(",")[5]=="TRUE"]#将F=TRUE的行添加到结构分割的训练集中
                # tmp=np.array(tmp)
                print(tmp[0][5])
                for i in range(len(tmp)):#这里要根据实际情况进行更改，下一个数据集要9个小时左右，而且还损坏了，所以我是靠感觉写的
                    for j in range(4):
                        tmp[i][j]=dataset_path+path+tmp[i][j].strip(".")
                train_lines+=tmp



    # with open(os.path.join(dataset_path, "ImageSets/Segmentation/train.txt"), "r") as f:
    #     train_lines = f.readlines()
    val_lines = []
    val_dataset_path_list = os.listdir(dataset_path)  # 先列出数据集下面的所有文件
    for path in train_dataset_path_list:
        if os.path.exists(dataset_path + path) and Path(dataset_path + path).is_dir():  # 判断是否是目录
            with open(os.path.join(dataset_path,path,"files_test.csv"),"r") as p:
                tmp=[i.strip().split(",") for i in p.readlines() if i.strip().split(",")[5]=="TRUE"]#TRUE还是True要注意一下
                # tmp=np.array(tmp)
                for i in range(len(tmp)):  # 这里要根据实际情况进行更改，下一个数据集要9个小时左右，而且还损坏了，所以我是靠感觉写的
                    for j in range(3):
                        tmp[i][j] =dataset_path+path+tmp[i][j].strip(".")
                val_lines += tmp
    # 打开数据集的txt
    # with open(os.path.join(dataset_path, "ImageSets/Segmentation/val.txt"), "r") as f:
    #     val_lines = f.readlines()

    # ------------------------------------------------------#
    #   主干特征提取网络特征通用，冻结训练可以加快训练速度
    #   也可以在训练初期防止权值被破坏。
    #   Init_Epoch为起始世代
    #   Interval_Epoch为冻结训练的世代
    #   Epoch总训练世代
    #   提示OOM或者显存不足请调小Batch_size
    # ------------------------------------------------------#
    # 训练的思路大致是先固定前面编码部分（载入预训练参数）对后面的网络进行训练，再解封，对整体进行更新
    nw = 0
    if pretrained==True:
        lr = 1e-3  # 0.0001#改了
        Init_Epoch = 0
        Interval_Epoch = 50
        Batch_size = 1#感觉batch太小了，我把2改成了16
        # nw = min([os.cpu_count(), Batch_size if Batch_size > 1 else 0, 8])

        # 优化器与学习率管理器
        optimizer = optim.Adam(model.parameters(), lr,weight_decay=0.5,)  # 优化器，这里可以做一些文章,我加了一个正则化的东西
        lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=8, gamma=0.5)  # 学习率管理器，每一步就更新为原来的0.92，我改成连续8步不降变为原来一半，梯度太小就改一下
        # 准备训练，验证的数据
        train_dataset = DeeplabDataset(train_lines, inputs_size, NUM_CLASSES, True,
                                       dataset_path)  # 原来数据拿出来后放到Dataset里加工
        val_dataset = DeeplabDataset(val_lines, inputs_size, NUM_CLASSES, False, dataset_path)
        gen = DataLoader(train_dataset, batch_size=Batch_size, num_workers=nw, pin_memory=True,
                         drop_last=True, collate_fn=deeplab_dataset_collate,shuffle=False)  # 这里要进行更改，比如num_workers要改,我shuffle改成True，dataset里是不是就不用随机取了
        gen_val = DataLoader(val_dataset, batch_size=Batch_size, num_workers=nw, pin_memory=True,
                             drop_last=True, collate_fn=deeplab_dataset_collate,shuffle=False)

        epoch_size = len(train_lines) // Batch_size  # 确定一个epoch中更新的次数
        epoch_size_val = len(val_lines) // Batch_size

        if epoch_size == 0 or epoch_size_val == 0:
            raise ValueError("数据集过小，无法进行训练，请扩充数据集。")

        for param in model.vgg.parameters():  # 不更新这些参数（我后面可能会去掉VGG）
            param.requires_grad = False

        for epoch in range(Init_Epoch, Interval_Epoch):  # 进行这么多次epoch
            fit_one_epoch(model, epoch, epoch_size, epoch_size_val, gen, gen_val, Interval_Epoch, Cuda,lr_scheduler)  # 进行一次epoch的训练
            # lr_sFalsecheduler.step()  # 一次就更新学习率

    if GPU_large:#GPU显存不够，把这个关了
        lr = 1e-5
        Interval_Epoch = 50 #######
        Epoch = 100
        Batch_size = 2

        optimizer = optim.Adam(model.parameters(), lr)
        lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=8, gamma=0.5)

        train_dataset = DeeplabDataset(train_lines, inputs_size, NUM_CLASSES, True, dataset_path)
        val_dataset = DeeplabDataset(val_lines, inputs_size, NUM_CLASSES, False, dataset_path)
        gen = DataLoader(train_dataset, batch_size=Batch_size, num_workers=nw, pin_memory=True,
                         drop_last=True, collate_fn=deeplab_dataset_collate,shuffle=False)
        gen_val = DataLoader(val_dataset, batch_size=Batch_size, num_workers=nw, pin_memory=True,
                             drop_last=True, collate_fn=deeplab_dataset_collate,shuffle=False)

        epoch_size = len(train_lines) // Batch_size
        epoch_size_val = len(val_lines) // Batch_size

        if epoch_size == 0 or epoch_size_val == 0:
            raise ValueError("数据集过小，无法进行训练，请扩充数据集。")

        for param in model.vgg.parameters():  # 进行VGG参数的更新，感觉放到优化器之前会更好一点
            param.requires_grad = True  ############

        for epoch in range(Interval_Epoch, Epoch):
            fit_one_epoch(model, epoch, epoch_size, epoch_size_val, gen, gen_val, Epoch, Cuda,lr_scheduler)
            # lr_scheduler.step()

