import os

import numpy as np
import torch
import argparse
from torch.utils.data import DataLoader
from torch import autograd, optim
from torchvision.transforms import transforms
from guide_UNet import Unet
from dataset import LiverDataset
import torch.nn.functional as F
from visdom import Visdom  # 可视化处理模块


# 可视化app
viz = Visdom()

# 是否使用cuda
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

x_transforms = transforms.Compose([
    transforms.Resize((224,224)),
    # transforms.RandomCrop(224),
    # transforms.CenterCrop((381, 381)),
    transforms.ToTensor(),
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])


# mask只需要转换为tensor
y_transforms = transforms.Compose([
    transforms.Resize((224,224)),
    # transforms.CenterCrop((381, 381)),
    transforms.Grayscale(),
    transforms.ToTensor(),
])

#参数解析
parse = argparse.ArgumentParser()

def train_model(model, criterion, optimizer, dataload, num_epochs=40):
    all_step = 0
    for epoch in range(num_epochs):
        print('Epoch {}/{}'.format(epoch, num_epochs - 1))
        print('-' * 10)
        dt_size = len(dataload.dataset)
        epoch_loss = 0
        step = 0
        for x, y in dataload:
            # print(y.size())
            # print(x.size())
            step += 1
            all_step +=1
            inputs = x.to(device)
            labels = y.to(device)
            # zero the parameter gradients
            optimizer.zero_grad()
            # forward
            outputs = model(inputs)
            # print(outputs.size())
            # print(outputs.cpu().data.numpy())
            # print(labels.size())
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            epoch_loss += loss.item()
            viz.line([loss.item()], [all_step], win='loss', opts=dict(title='train loss' ), update=["append"] )
            print("%d/%d,train_loss:%0.3f" % (step, (dt_size - 1) // dataload.batch_size + 1, loss.item()))
        print("epoch %d loss:%0.3f" % (epoch, epoch_loss))
        if not os.path.isdir("./model"):
            os.makedirs("./model")
        torch.save(model.state_dict(), './model/weights_%d.pth' % epoch)
    return model

#训练模型
def train():
    model = Unet(3, 1).to(device)
    batch_size = args.batch_size
    criterion = torch.nn.BCELoss()
    optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=0.0002)
    # optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset(".\\data", transform=x_transforms,target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=batch_size, shuffle=True, num_workers=5)
    train_model(model, criterion, optimizer, dataloaders)


#显示模型的输出结果
def test():
    model = Unet(3, 1).to(device)
    model.load_state_dict(torch.load(r"./model/guide_model.pth", map_location='cpu'))
    liver_dataset = LiverDataset(".\\data", transform=x_transforms,target_transform=y_transforms,test =True)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    import matplotlib.pyplot as plt
    import cv2
    plt.ion()
    with torch.no_grad():
        if not os.path.isdir("./res"):
            os.makedirs("./res")
        for x, path in dataloaders:
            x = x.to(device)
            y, q1, q2, q3, q4, r1, r2, r3, r4 = model(x)
            # img_y=torch.squeeze(y).numpy()
            print(y.size())
            img_y = y.cpu().data.numpy()[0, 0, :, :]
            # cv2.imshow(" ", img_y)
            # cv2.waitKey()
            path = path[0]
            # image = cv2.imread( path )
            # print(image.shape)
            # height, width, _ = image.shape
            # img_y = cv2.resize(img_y,(width, height),interpolation  = cv2.INTER_LINEAR )
            filename = path.split('\\')[-1].replace('jpg', 'png')
            print(filename)
            # cv2.imwrite(os.path.join('./res', filename), 255 - img_y*255)
            n = 0
            for i in [q1, q2, q3, q4, r1, r2, r3, r4]:
                i = F.sigmoid(i)
                n += 1
                i = i.cpu().data.numpy()[0, 0, :, :]
                i = cv2.resize(i, (y.size()[3], y.size()[2]), interpolation=cv2.INTER_LINEAR)
                if not os.path.isdir("./result/inter%d"%n):
                    os.makedirs("./result/inter%d"%n)
                cv2.imwrite(os.path.join("./result/inter%d"%n, filename), i*255)


            # img_y.save()
            # plt.imshow(img_y)
            # plt.pause(0.01)
        # plt.show()


if __name__ == '__main__':
    parse = argparse.ArgumentParser()
    parse.add_argument("--action", type=str, help="train or test", default='train')
    parse.add_argument("--batch_size", type=int, default=1)
    parse.add_argument("--ckp", type=str, help="the path of model weight file")
    args = parse.parse_args()

    if args.action=="train":
        train()
    elif args.action=="test":
        test()