#!py -3
# coding:utf-8

import os
import torch
from torch import nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision.transforms import transforms
from Unet import *
from data_pk import *

mini_batch = 1
batch_cnt = 100
batch_save_cnt = 10  # 多少个批才保存一次数据
# savemoduleparamName = "../moduleparam/enhance_unet1.pth"
# savemoduleparamName = "../moduleparam/enhance_unet_sigmoid.pth"
# savemoduleparamName = "../moduleparam/enhance_lessunet1.pth"
# savemoduleparamName = "../moduleparam/enhance_lessunet2.pth"
# savemoduleparamName = "../moduleparam/enhance_mixlessunet1.pth"
# savemoduleparamName = "../moduleparam/enhance_mixlessunet2.pth"
# savemoduleparamName = "../moduleparam/enhance_mixlessunet3.pth"
savemoduleparamName = "../moduleparam/enhance_mixlessunet4_1.pth"
max_epoch = 100
use_gpu = torch.cuda.is_available()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")  # 定义训练使用的设备


def main():
    #########新建数据位置列表#########
    inputlist = getFilepathInDir("../oct_img/input")
    # print(inputlist)
    outputlist = getFilepathInDir("../oct_img/output")

    input_vallist = getFilepathInDir("../oct_img/input_val")
    output_vallist = getFilepathInDir("../oct_img/output_val")

    if not compareIsHasSameFilename(inputlist, outputlist):
        print("无效的训练图像集")
        return 0
    if not compareIsHasSameFilename(input_vallist, output_vallist):
        print("无效的训练图像集")
        return 0

    #########新建模型##############
    # model = UNet(in_channel=1, out_channel=1)
    # model = LessUNet(in_channel=1, out_channel=2)
    # model.add_module("non_linear_layer", MySoftmax1d())
    # model.add_module("non_linear_layer", MySigmoid())

    model = MixUNet1()

    model: torch.nn.Module
    # nnc = model.named_children()
    # for i in nnc:
    #     print(i)
    learnrate = 0.1
    hasmoduleparam = False
    if os.path.exists(savemoduleparamName):
        model.load_state_dict(torch.load(savemoduleparamName))
        learnrate = 0.01
        hasmoduleparam = True
    else:
        newdir, _ = os.path.split(savemoduleparamName)
        os.makedirs(newdir, exist_ok=True)

    model.to(device)

    ############新建评价层##################
    # criterion = Criterion2().to(device)
    criterion = Criterion1().to(device)

    ############如果已经有模型参数 侧进行一次校验数据####################
    if hasmoduleparam:
        model.eval()
        dataset = OCT_DataSet(inputlist, outputlist, input_vallist, output_vallist, xtransforms=transforms.ToTensor(),
                              ytransforms=transforms.ToTensor(), trainmode=False)
        dataloader = torch.utils.data.DataLoader(dataset, 1, shuffle=False)
        for i, (input, target) in enumerate(dataloader):
            input: torch.Tensor
            target: torch.Tensor
            if use_gpu:
                input = input.to(device)
                target = target.to(device)
            output = model(input)
            loss = criterion(output, target)
            print("step=%d 校验数据的loss= %f" % (i, loss))
            img_input = ((input[0]) * 255).cpu().numpy()
            img_input = np.transpose(img_input, (1, 2, 0)).astype(np.uint8)

            img_output = (output.detach()[0] * 255).cpu().numpy()
            img_output = np.transpose(img_output, (1, 2, 0)).astype(np.uint8)
            cv.imshow("img_input", img_input)
            cv.imshow("img_output", img_output)
            key = cv.waitKey(30)
            if key == 27:
                break

    ###########新建训练优化器#############
    optimizer = torch.optim.Adam(model.parameters(), lr=learnrate, betas=(0.9, 0.999), eps=1e-8, weight_decay=0,
                                 amsgrad=False)

    ###############训练###############
    dataset = OCT_DataSet(inputlist, outputlist, input_vallist, output_vallist, xtransforms=transforms.ToTensor(),
                          ytransforms=transforms.ToTensor())
    dataloader = torch.utils.data.DataLoader(dataset, mini_batch, shuffle=True)

    model.train()
    epoch = 0
    bh_cnt = 0
    loss_sum = 0
    last_savetime_loss = 0
    optimizer.zero_grad()
    print("\n\n进入训练部分")
    for j in range(max_epoch):
        for i, (input, target) in enumerate(dataloader):
            input: torch.Tensor
            target: torch.Tensor
            if use_gpu:
                input = input.to(device)
                target = target.to(device)
            output = model(input)
            loss = criterion(output, target)
            loss: torch.Tensor
            loss.backward()
            cv.waitKey(1)
            if (i % batch_cnt) == 0:  # 够一定的批数量才应用梯度下降 应用完清零梯度值
                optimizer.step()
                optimizer.zero_grad()

                img_input = ((input[0]) * 255).cpu().numpy()
                img_input = np.transpose(img_input, (1, 2, 0)).astype(np.uint8)

                img_target = (target[0] * 255).cpu().numpy()
                img_target = np.transpose(img_target, (1, 2, 0)).astype(np.uint8)

                img_output = (output.detach()[0] * 255).cpu().numpy()
                img_output = np.transpose(img_output, (1, 2, 0)).astype(np.uint8)

                cv.imshow("img_input", img_input)
                cv.imshow("target_img", img_target)
                cv.imshow("img_output", img_output)
                cv.waitKey(1)

                loss_sum = loss_sum + loss.cpu().item()
                print("%f" % loss)
                bh_cnt = bh_cnt + 1
                if bh_cnt >= batch_save_cnt:
                    print("平均loss = %f" % (loss_sum / batch_save_cnt))
                    if last_savetime_loss > (loss_sum / batch_save_cnt):
                        torch.save(model.state_dict(), f=savemoduleparamName)
                        print("保存了模型参数.")
                        last_savetime_loss = (loss_sum / batch_save_cnt)

                    if last_savetime_loss < 10:
                        last_savetime_loss = (loss_sum / batch_save_cnt)

                    bh_cnt = 0
                    loss_sum = 0

    # input = torch.randn((1, 1, 128, 128), dtype=torch.float32)
    # target = torch.randn((1, 1, 128, 128), dtype=torch.float32)
    # model.eval()  # 使用 drop out 或 批数据归一化后  预测前应用 eval() 固定一些参数
    # output = model(input)
    # loss = criterion(output, target)
    # print(loss)
    # print("end")


if __name__ == '__main__':
    main()
