import os

import torch
import torchvision
from torch.utils.tensorboard import SummaryWriter

from dataset import Crack
from evaluation import confusion_matrix, pixelAccuracy, clsPixelAccuracy, MIOU
# from model import MyFCN
# from modelLRASSP import MyLRASSP
from modelDeeplab import MyDeeplab


if __name__ == '__main__':
    # 初始化模型
    # model = MyFCN()
    # model = MyLRASSP()
    model = MyDeeplab()

    # 初始化数据集
    data_train = Crack(r'./data/train/imgs', r'./data/train/masks')
    data_test = Crack(r'./data/test/imgs', r'./data/test/masks')
    data_train_loader = torch.utils.data.DataLoader(dataset = data_train, batch_size=4, shuffle=True, drop_last=True)
    data_test_loader = torch.utils.data.DataLoader(dataset = data_test, batch_size=4, shuffle=True, drop_last=True)
    # 后续使用梯度累加以实现batch_size = 32、64、128等效果，详见后续代码

    # 损失函数以及优化器
    loss_func = torch.nn.CrossEntropyLoss()
    optim = torch.optim.Adam(params=model.parameters(), lr=0.001)

    # GPU设备
    device = torch.device('cpu')
    if torch.cuda.is_available():
        device = torch.device('cuda')
        model.to(device)
        loss_func.to(device)
    print('using device: ', device)

    # 日志记录
    log_writer = SummaryWriter(r'./log_deeplab')

    # 训练
    accumulation_steps = 8
    batch_cnt = 0
    epoch = 75
    optim.zero_grad()  # 梯度清空一下
    for i in range(epoch):
        print(f'epoch {i} started')
        model.train()
        for j, (imgs, labels) in enumerate(data_train_loader):
            print(f'batch {batch_cnt} started')
            # 设备
            imgs = imgs.to(device)
            labels = labels.to(device)
            # 前向传播
            output = model(imgs)['out']  # 注意语义分割取'out'结果
            # 损失函数值
            loss_value = loss_func(output, labels)/accumulation_steps
            print(f'batch {batch_cnt} train loss: {loss_value.item()}')

            # 计算当前评价指标
            output = output.argmax(dim=1)  # 算混淆矩阵用
            CM = confusion_matrix(labels, output)
            acc = pixelAccuracy(CM)
            clsacc = clsPixelAccuracy(CM)
            miou = MIOU(CM)
            print(f'batch {batch_cnt} acc: {acc}')
            print(f'batch {batch_cnt} cls(0)acc: {clsacc[0]}   cls(1)acc: {clsacc[1]}')
            print(f'batch {batch_cnt} miou: {miou}')

            # 添加日志
            log_writer.add_scalar('train acc', acc, batch_cnt)
            for j,acc in enumerate(clsacc):
                log_writer.add_scalar(f'train cls({j}) acc', acc, batch_cnt)
            log_writer.add_scalar('train miou', miou, batch_cnt)
            log_writer.add_scalar('train loss', loss_value.item(), batch_cnt)

            # 反向传播，梯度自动累加（注意没有清零）
            loss_value.backward()

            # 梯度累加到32batch，进行反向传播并更新
            if (batch_cnt+1)%accumulation_steps == 0:  # 第1、3、5、7（2个batch，进行反向传播）
                # 参数优化
                optim.step()

                # 最后进行梯度清空，为下两轮累加做准备
                optim.zero_grad()
            batch_cnt+=1
            print(' ------------------------ ')

        # 验证
        print(f'eval {i} started')
        model.eval()
        with torch.no_grad():
            for imgs, labels in data_test_loader:
                # 设备
                imgs = imgs.to(device)
                labels = labels.to(device)
                output = model(imgs)['out']  # 注意语义分割'out'
                loss_value = loss_func(output, labels)

                # 计算评价指标
                output = output.argmax(dim=1)
                CM = confusion_matrix(labels, output)
                acc = pixelAccuracy(CM)
                clsacc = clsPixelAccuracy(CM)
                miou = MIOU(CM)

                print(f'epoch {i} test acc: {acc}')
                print(f'batch {batch_cnt} cls(0)acc: {clsacc[0]}   cls(1)acc: {clsacc[1]}')
                print(f'epoch {i} test mipu: {miou}')
                print(f'epoch {i} test loss: {loss_value.item()}')

                # 记录日志
                log_writer.add_scalar('test acc', acc, i)
                for j,acc in enumerate(clsacc):
                    log_writer.add_scalar(f'test cls({j}) acc', acc, batch_cnt)
                log_writer.add_scalar('test miou', miou, i)
                log_writer.add_scalar('test loss', loss_value.item(), i)

        torch.save(model.state_dict(), r'./model{}.pt'.format(i))
        print('\n ================================================================= \n')
