## facades 数据集，测试下模型的效果～
import torch 
import torch.nn as nn 
import numpy as np 
import glob 
from torch.utils.data import DataLoader, Dataset
from PIL import Image
import matplotlib.pyplot as plt  
from model.discriminator import Discriminator
# from model.unet_model import UNet
from eval import eval_net 
from tqdm import tqdm 
from torch.utils.tensorboard import SummaryWriter
from model.unet_model import UnetGenerator

img_files = glob.glob("./facades/train/*")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("device is " + str(device))
epoches = 400
out_channels = 3 
batch_size = 1
dis_lr = 1e-4
gene_lr = 1e-4
dir_checkpoint = "./checkpoints/"

class FDataset(Dataset):
  def __init__(self):
    super(FDataset, self).__init__()

  def __getitem__(self, idx):
    filename = img_files[idx]
    img = Image.open(filename)
    img = np.array(img, dtype=np.float32)
    # print(img.max())
    # print(img.min())
    img = img / 255
    img = (img * 2) - 1
    img = img.transpose(2, 0, 1) # 把通道维度放到前面
    # print(img.shape)
    # img = img.transpose(1, 0, 2)
    # plt.imshow(img)
    # plt.show()
    
    # print(img.shape)
    w = img.shape[2]
    w = w // 2 
    real_img = img[:, :, :w]
    input_img = img[:, :, w:]

    # print(input_img)

    # plt.subplot(1, 2, 1)
    # plt.imshow(real_img[0])
    # plt.subplot(1, 2, 2)
    # plt.imshow(input_img[0])
    # plt.show()
    
    return torch.tensor(real_img.copy()), torch.tensor(input_img.copy())
  
  def __len__(self):
    return len(img_files)

if __name__ == "__main__":

    discriminator = Discriminator(input_nc=3)
    discriminator.to(device)

    generator = UnetGenerator(3, 3, 8, use_dropout=True)
    generator.to(device)

    optimizer_dis = torch.optim.Adam(discriminator.parameters(), lr=dis_lr, weight_decay=1e-5)
    optimizer_gene = torch.optim.Adam(generator.parameters(), lr=gene_lr, weight_decay=1e-5)
  
    criterion = nn.BCEWithLogitsLoss()

    writer = SummaryWriter("./facades_train_log/", comment=f'LR_{dis_lr}_BS_{batch_size}')

    dataset = FDataset()

    dataloader = DataLoader(dataset, shuffle=True, batch_size=1)
    global_step = 0
    dis_loss = 0.0
    gene_loss = 0.0
    # start = 1
    for epoch in range(epoches):
      print("epoch is " + str(epoch))
      for img, mask_type in tqdm(dataloader, total=len(dataset)):
      # for img, mask_type in dataloader:
        ## data_shape: (batch. channel, W, H)
        # continue
        batch,channel, W, H = img.shape
        img = img.to(device=device, dtype=torch.float32)
        label = mask_type.to(device=device, dtype=torch.float32)

        dis_true_out = discriminator(img, label) # 真实判定
        true_label = torch.ones_like(dis_true_out, device=device)
        # masks_pred = masks_pred.view((batch, W, H)) # view 到跟label 一样然后 再去计算loss
        true_loss = criterion(dis_true_out, true_label) # 真实loss
        # 生成一个假的mask 传入图片 生成mask
        fake_mask = generator(img).detach()
        dis_fake_out = discriminator(img, fake_mask)
        fake_label = torch.zeros_like(dis_fake_out, device=device)
        fake_loss = criterion(dis_fake_out, fake_label)
        
        dis_sum_loss = true_loss + fake_loss
        dis_loss += dis_sum_loss.item()

        optimizer_dis.zero_grad()
        # if start == 1:
        dis_sum_loss.backward()
        # nn.utils.clip_grad_value_(di.parameters(), 0.1)
        optimizer_dis.step()

        # 生成一个假的mask 传入图片 生成mask
        fake_mask = generator(img)
        dis_fake_out = discriminator(img, fake_mask)

        true_label = torch.ones_like(dis_fake_out, device=device)
        gene_sum_loss = criterion(dis_fake_out, true_label)
        gene_loss += gene_sum_loss.item()
        optimizer_gene.zero_grad()
        gene_sum_loss.backward()
        optimizer_gene.step()
        
        # report_loss += loss.item()

        global_step += 1
        
        if global_step % (len(dataloader) // (2 * batch_size)) == 0:
            # val_score 为平均的dice score
            writer.add_images("image", img, global_step=global_step)
            writer.add_images("pred_mask", fake_mask, global_step=global_step)
            writer.add_scalar("dis_loss", dis_loss, global_step)
            writer.add_scalar("gene_loss", gene_loss, global_step)
            print("dis_loss is " + str(dis_loss))
            print("gene_loss is " + str(gene_loss))
            # if dis_loss < 20:
            #   start = 0 # 此时停止训练dis 只训练生成器
            # else : 
            #   start = 1
            dis_loss = 0.0
            gene_loss = 0.0
            # val_score = eval_net(generator, val_loader, device)
            # print("dice loss is : " + str(val_score))

            # if best_dice < val_score:
            #     best_dice = val_score
            #     torch.save(generator.state_dict(),
            #                 dir_checkpoint + f'unet2D_self_data.pkl')
            #     print(f'Checkpoint {epoch + 1} saved !')
            # scheduler.step(val_score)
            # writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], global_step)
      torch.save(generator.state_dict(), dir_checkpoint + f'cgan_facades.pkl')
            
            # logging.info('Validation Dice Coeff: {}'.format(val_score))
            # writer.add_scalar('Dice/test', val_score, global_step)
            
            # # 三维变二维图像
            # img = img.squeeze(2) # 去掉深度维度
            # masks_pred = masks_pred.squeeze(2) # 去掉深度维度
            # writer.add_images('images', img, global_step)
            # if out_channels == 1:
                
            #     writer.add_images('masks/true', label, global_step)
            #     writer.add_images('masks/pred', torch.sigmoid(masks_pred) > 0.5, global_step)
  