import torch as t
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from datetime import datetime
from dataset import CamvidDataset
from evalution_segmentaion import eval_semantic_segmentation
from FCN import FCN
import cfg

# 首先要设置场景，设备启动GPU，没有则用CPU
device = t.device('cuda') if t.cuda.is_available() else t.device('cpu')
# CamvidDataset():数据处理类

# 设置数据集
Cam_train = CamvidDataset([cfg.TRAIN_ROOT, cfg.TRAIN_LABEL], cfg.crop_size) # 原始图片路径、标签路径、需要裁剪的尺寸
Cam_val = CamvidDataset([cfg.VAL_ROOT, cfg.VAL_LABEL], cfg.crop_size)

# 读取数据集
train_data = DataLoader(Cam_train, batch_size=cfg.BATCH_SIZE, shuffle=True, num_workers=0) # shuffle=True随机打乱  num_workers=0有显卡设为4，cpu设为1
val_data = DataLoader(Cam_val, batch_size=cfg.BATCH_SIZE, shuffle=True, num_workers=0)

fcn = FCN(12) # 实例化，类别数传入
fcn = fcn.to(device) #指定设备
criterion = nn.NLLLoss().to(device) # 损失函数，与交叉熵的区别替代
optimizer = optim.Adam(fcn.parameters(), lr=1e-4) # 优化器，adam
# RGBD数据集  推荐SGD


# 语义分割中一次训练一次验证

# 两次循环
'''
        for    Epoch   大循环  train_loss大循环损失函数 = Batch中loss叠加
            for Batch  92次小循环
'''

def train(model):
    best = [0] # 保持最好权重
    net = model.train() # 处于训练
    # 训练轮次Epoch，FCN论文中至少175次才有效果
    for epoch in range(cfg.EPOCH_NUMBER):
        print('Epoch is [{}/{}]'.format(epoch + 1, cfg.EPOCH_NUMBER))
        if epoch % 50 == 0 and epoch != 0: # 每50次降一次学习率
            for group in optimizer.param_groups:
                group['lr'] *= 0.5
# 指标初始化
        train_loss = 0
        train_acc = 0
        train_miou = 0 # 最重要的
        train_class_acc = 0
        # 训练批次
        

        for i, sample in enumerate(train_data): # key+value（data+label）
            # 载入数据
            # Variable 数据形式
            img_data = Variable(sample['img'].to(device))   # [4, 3, 352, 480]
            img_label = Variable(sample['label'].to(device))    # [4, 352, 480]
            # 训练
            out = net(img_data)     # [4, 12, 352, 480]  获取图片数据
            out = F.log_softmax(out, dim=1) # nn.NLLLoss()，所以需要手动log
            loss = criterion(out, img_label)
            optimizer.zero_grad()  # 梯度先清零，否则会一直迭代
            loss.backward() # 反向传播，得到权重
            optimizer.step() # 每次更新权重
            # 每一次loss指标输出
            train_loss += loss.item()

            # 评估
            # 求预测图中最大值（模型认为最准确的输出）
            # 最大值本身+最大值索引，得到索引放到GPU再计算，最后转为np
            pre_label = out.max(dim=1)[1].data.cpu().numpy()    # (4, 352, 480)，固定写法
            # np转为列表
            pre_label = [i for i in pre_label]
            # 真实与预测标签都要进行同样处理
            true_label = img_label.data.cpu().numpy()   # (4, 352, 480)
            true_label = [i for i in true_label]
            # 指标计算，都是一个Epoch后的结果
            # 1.混淆矩阵
            eval_metrix = eval_semantic_segmentation(pre_label, true_label)
            # 1.1 训练准确率
            train_acc += eval_metrix['mean_class_accuracy']
            # 1.2 miou
            train_miou += eval_metrix['miou']
            #1.3 类精度
            train_class_acc += eval_metrix['class_accuracy']
            # 每一次batch后loss指标输出，batch= （Epoch/Batch_Size）
            # Batch_Size：一次读取多少个图
            print('|batch[{}/{}]|batch_loss {: .8f}|'.format(i + 1, len(train_data), loss.item()))

        metric_description = '|Train Acc|: {:.5f}|Train Mean IU|: {:.5f}\n|Train_class_acc|:{:}'.format(
            # 平均精度
            train_acc / len(train_data),
            train_miou / len(train_data),
            train_class_acc / len(train_data),
        )

        print(metric_description)
        #找个最好结果，保存权重
        if max(best) <= train_miou / len(train_data):
            best.append(train_miou / len(train_data))
            t.save(net.state_dict(), '{}.pth'.format(epoch)) # 保存模型权重

# 验证相似，去掉反向传播
def evaluate(model):
    net = model.eval()
    eval_loss = 0
    eval_acc = 0
    eval_miou = 0
    eval_class_acc = 0

    prec_time = datetime.now()
    for j, sample in enumerate(val_data):
        valImg = Variable(sample['img'].to(device))
        valLabel = Variable(sample['label'].long().to(device))

        out = net(valImg)
        out = F.log_softmax(out, dim=1)
        loss = criterion(out, valLabel)
        eval_loss = loss.item() + eval_loss
        pre_label = out.max(dim=1)[1].data.cpu().numpy()
        pre_label = [i for i in pre_label]

        true_label = valLabel.data.cpu().numpy()
        true_label = [i for i in true_label]

        eval_metrics = eval_semantic_segmentation(pre_label, true_label)
        eval_acc = eval_metrics['mean_class_accuracy'] + eval_acc
        eval_miou = eval_metrics['miou'] + eval_miou
    # eval_class_acc = eval_metrix['class_accuracy'] + eval_class_acc

    cur_time = datetime.now() # 记录时间。可读
    h, remainder = divmod((cur_time - prec_time).seconds, 3600)
    m, s = divmod(remainder, 60)
    time_str = 'Time: {:.0f}:{:.0f}:{:.0f}'.format(h, m, s)

    val_str = ('|Valid Loss|: {:.5f} \n|Valid Acc|: {:.5f} \n|Valid Mean IU|: {:.5f} \n|Valid Class Acc|:{:}'.format(
        eval_loss / len(train_data),
        eval_acc / len(val_data),
        eval_miou / len(val_data),
        eval_class_acc / len(val_data)))
    print(val_str)
    print(time_str)


if __name__ == "__main__":
    train(fcn)

