from turtle import goto
from torch.autograd import Variable
from utils.trainer import content_Trainer, shared_Trainer_condconv_seg_3,stylecode_Trainer,shared_Trainer_condconv_mean,shared_Trainer_condconv,shared_Trainer_condconv_seg_3
from utils.dataloader import Brats2D,split_dataset
from torch.utils.data import DataLoader
import torch
import os
import sys
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter
import torchvision
from piq import ssim,psnr
import numpy as np
import random
writer = SummaryWriter()
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
trainer=shared_Trainer_condconv_seg_3()
trainer.cuda()
x,y=split_dataset()
train_set=Brats2D(x)
test_set=Brats2D(y)
train_loader = DataLoader(dataset=train_set, batch_size=4, shuffle=1, num_workers=4)
test_loader=DataLoader(dataset=test_set, batch_size=1, shuffle=1, num_workers=4)
display_size=1
step=0
for i, (x1,x2,x3,x4, seg) in enumerate(test_loader):
    x1_test,x2_test,x3_test,x4_test=x1.cuda().detach(),x2.cuda().detach(),x3.cuda(),x4.cuda().detach() 
    break
# ps_metric=0
# ss_metric=0
# for i, (x1,x2,x3,x4, seg) in enumerate(test_loader):
#         x1,x2,x3,x4=x1.cuda().detach(),x2.cuda().detach(),x3.cuda(),x4.cuda().detach()
#         trainer.eval()
#         output=trainer.sample(x1,x2,torch.zeros_like(x3).cuda().detach(),x4)
#         ps_metric+=psnr(x3, output[5].unsqueeze(0), data_range=1., reduction='none')
#         ss_metric+=ssim(x3, output[5].unsqueeze(0), data_range=1.)
#         break
best=0
for epoch in range(100):
    dis_loss,adv_loss,recon_loss,cyc_loss=0,0,0,0
    seg_loss=0
    for i, (x1,x2,x3,x4, seg) in enumerate(train_loader):
        x1,x2,x3,x4=x1.cuda().detach(),x2.cuda().detach(),x3.cuda().detach(),x4.cuda().detach()
        trainer.update_learning_rate()
        label=list(np.random.randint(0,2,3))
        label.append(0)# 0 代表可见
        random.shuffle(label)
        dis_loss+=trainer.dis_update(x1,x2,x3,x4,label)
        temp1,temp2,temp3,temp4=trainer.gen_update(x1,x2,x3,x4,seg,label)
        adv_loss+=temp1
        recon_loss+=temp2
        cyc_loss+=temp3
        seg_loss+=temp4
        if i%50==0 and i!=0:
            writer.add_scalar("dis_loss", dis_loss/50,global_step=step)
            writer.add_scalar("G/adv_loss", adv_loss/50,global_step=step)
            writer.add_scalar("G/recon_loss", recon_loss/50,global_step=step)
            writer.add_scalar("G/cyc_loss", cyc_loss/50,global_step=step)
            writer.add_scalar("G/gen_loss", (adv_loss+recon_loss+cyc_loss)/50,global_step=step)
            writer.add_scalar("G/seg_loss", seg_loss/50,global_step=step)
            dis_loss,adv_loss,recon_loss,cyc_loss=0,0,0,0
            seg_loss=0
            step+=1
        if i%1000==0:
            with torch.no_grad():
                outputs = trainer.sample(x1_test,x2_test,x3_test,x4_test,label=[0,0,1,0])
                writer.add_image('O/images1', (outputs[0]+1)/2, step)
                writer.add_image('O/images2', (outputs[2]+1)/2, step)
                writer.add_image('O/images3', (outputs[4]+1)/2, step)
                writer.add_image('O/images4', (outputs[6]+1)/2, step)
                writer.add_image('G/images1', (outputs[1]+1)/2, step)
                writer.add_image('G/images2', (outputs[3]+1)/2, step)
                writer.add_image('G/images3', (outputs[5]+1)/2, step)
                writer.add_image('G/images4', (outputs[7]+1)/2, step)
        torch.cuda.synchronize()
    ps_metric=0
    ss_metric=0
    for i, (x1,x2,x3,x4, seg) in enumerate(test_loader):
        x1,x2,x3,x4=x1.cuda().detach(),x2.cuda().detach(),x3.cuda(),x4.cuda().detach()  
        trainer.eval()
        output=trainer.sample(x1,x2,x3,x4,label=[0,0,1,0])
        #output=trainer.sample(x1,x2,x3,x4)
        ps_metric+=psnr(((output[5]+1)/2).unsqueeze(0).cpu().detach(),((x3+1)/2).cpu().detach(), data_range=1., reduction='none')
        ss_metric+=ssim(((output[5]+1)/2).unsqueeze(0).cpu().detach(),((x3+1)/2).cpu().detach(), data_range=1.)
        torch.cuda.synchronize()
    writer.add_scalar("epoch/ps", ps_metric/len(test_set),global_step=epoch)
    writer.add_scalar("epoch/ss", ss_metric/len(test_set),global_step=epoch)
    if ss_metric/len(test_set)>best:
        best=ss_metric/len(test_set)
        # state={'net':trainer.state_dict()}
        torch.save(trainer,f'saved/best_my_{epoch}.pth')
    trainer.train()