import torch 
from dataset import VC_Dataset
from torch.utils.data import Dataset,DataLoader
from hparams import hparams
from model import Generator,Discriminator
import os

def adjust_lr_rate(optimizer,lr,lr_decay):
    lr_new = max(0., lr - lr_decay)
    for param_groups in optimizer.param_groups:
        param_groups['lr'] = lr_new
    return lr_new,optimizer
        
        
        
        
if __name__ == "__main__":
    
    # 定义device
    device = torch.device("cuda:0")
    
    # 获取模型参数
    para = hparams()
    
    # 模型实例化
    m_G_A2B = Generator()
    m_G_A2B = m_G_A2B.to(device)
    
    m_G_B2A = Generator()
    m_G_B2A = m_G_B2A.to(device)
    
    m_D_A = Discriminator()
    m_D_A = m_D_A.to(device)
    
    m_D_B = Discriminator()
    m_D_B = m_D_B.to(device)
    
    # 定义优化器
    g_lr =  para.g_lr
    d_lr =  para.d_lr
    g_params = list(m_G_A2B.parameters()) +  list(m_G_B2A.parameters())
    d_params = list(m_D_A.parameters()) + list(m_D_B.parameters())
    
    G_optimizer = torch.optim.Adam(g_params, lr=g_lr, betas=(0.5, 0.999))
    D_optimizer = torch.optim.Adam(d_params, lr=d_lr, betas=(0.5, 0.999))
    
    # 定义数据集
    m_dataset = VC_Dataset(para)
    m_dataloader = DataLoader(m_dataset,batch_size = 1,shuffle = True, num_workers = 1)
    
    # 开始进行训练
  
    train_epoch = int(para.train_steps/len(m_dataset))+1
    print(len(m_dataset))
   
    n_step = 0
    g_loss_store = []
    d_loss_store = []
    for epoch in range(train_epoch):
        # 打乱顺序
        m_dataset.gen_random_pair_index()
        for i, batch_samples in enumerate(m_dataloader):
            n_step = n_step+1
            real_A = batch_samples[0]
            real_B = batch_samples[1]
            
            real_A = real_A.to(device).float()
            real_B = real_B.to(device).float()
            
            # 根据迭代轮次 修正 lambda 
            if n_step>para.step_drop_identity:
                identity_loss_lambda = 0
            else:
                identity_loss_lambda = para.identity_loss_lambda
                cycle_loss_lambda = para.cycle_loss_lambda
                
            # 根据迭代步数 修改 lr
            if n_step>para.start_decay:
               g_lr, G_optimizer= adjust_lr_rate(G_optimizer,g_lr,para.decay_G)
               d_lr, D_optimizer= adjust_lr_rate(D_optimizer,d_lr,para.decay_D)
           
           
            # 计算损失函数 G_loss
            fake_B = m_G_A2B(real_A)
            cycle_A = m_G_B2A(fake_B)

            fake_A = m_G_B2A(real_B)
            cycle_B = m_G_A2B(fake_A)

            identity_A = m_G_B2A(real_A)
            identity_B = m_G_A2B(real_B)

            d_fake_A = m_D_A(fake_A)
            d_fake_B = m_D_B(fake_B)

            # for the second step adverserial loss
            d_fake_cycle_A = m_D_A(cycle_A)
            d_fake_cycle_B = m_D_B(cycle_B)

            # Generator Cycle loss
            cycleLoss = torch.mean(torch.abs(real_A - cycle_A)) \
                    + torch.mean(torch.abs(real_B - cycle_B))

            # Generator Identity Loss
            identiyLoss = torch.mean(torch.abs(real_A - identity_A)) \
                        + torch.mean(torch.abs(real_B - identity_B))
            
            # Generator Loss
            generator_loss_A2B = torch.mean((1 - d_fake_B) ** 2)
            generator_loss_B2A = torch.mean((1 - d_fake_A) ** 2)
            
            generator_loss_2nd = 0.5*( torch.mean((1 - d_fake_cycle_A) ** 2) + torch.mean((1 - d_fake_cycle_B) ** 2) )
            
            
            
            # Total Generator Loss
            g_loss = generator_loss_A2B + generator_loss_B2A + \
                     cycle_loss_lambda * cycleLoss  +  \
                     identity_loss_lambda * identiyLoss +\
                     generator_loss_2nd
            g_loss_store.append(g_loss)
            
            # 梯度下降，更新G部分的参数
            G_optimizer.zero_grad()
            D_optimizer.zero_grad()
            g_loss.backward()
            G_optimizer.step()
            
            # 更新 D 部分的参数
            d_real_A = m_D_A(real_A)
            d_real_B = m_D_B(real_B)
            
            generated_A = m_G_B2A(real_B)
            d_fake_A = m_D_A(generated_A)

            # for the second step adverserial loss
            cycled_B = m_G_A2B(generated_A)
            d_cycled_B = m_D_B(cycled_B)

            generated_B = m_G_A2B(real_A)
            d_fake_B = m_D_B(generated_B)

            # for the second step adverserial loss
            cycled_A = m_G_B2A(generated_B)
            d_cycled_A = m_D_A(cycled_A)

            # Loss Functions
            d_loss_A_real = torch.mean((1 - d_real_A) ** 2)
            d_loss_A_fake = torch.mean((0 - d_fake_A) ** 2)
            d_loss_A = (d_loss_A_real + d_loss_A_fake) / 2.0

            d_loss_B_real = torch.mean((1 - d_real_B) ** 2)
            d_loss_B_fake = torch.mean((0 - d_fake_B) ** 2)
            d_loss_B = (d_loss_B_real + d_loss_B_fake) / 2.0

            # the second step adverserial loss
            d_loss_A_cycled = torch.mean((0 - d_cycled_A) ** 2)
            d_loss_B_cycled = torch.mean((0 - d_cycled_B) ** 2)
            d_loss_A_2nd = (d_loss_A_real + d_loss_A_cycled) / 2.0
            d_loss_B_2nd = (d_loss_B_real + d_loss_B_cycled) / 2.0

            # Final Loss for discriminator with the second step adverserial loss
            d_loss = (d_loss_A + d_loss_B) / 2.0 + (d_loss_A_2nd + d_loss_B_2nd) / 2.0
            d_loss_store.append(d_loss)
            # 梯度下降 更新D部分的参数
            G_optimizer.zero_grad()
            D_optimizer.zero_grad()
            d_loss.backward()
            D_optimizer.step()
            
            # 模型保存
            if n_step% para.step_save ==0:
                path_save = os.path.join(para.path_save,'epoch'+str(n_step))
                os.makedirs(path_save,exist_ok=True)
              
                torch.save({'generator_loss_store': g_loss_store,
                             'discriminator_loss_store': d_loss_store,
                             'model_genA2B_state_dict': m_G_A2B.state_dict(),
                             'model_genB2A_state_dict': m_G_B2A.state_dict(),
                             'model_discriminatorA': m_D_A.state_dict(),
                             'model_discriminatorB': m_D_B.state_dict(),
                             'generator_optimizer': G_optimizer.state_dict(),
                             'discriminator_optimizer': D_optimizer.state_dict()
                             }, os.path.join(path_save,'model.pick'))
                
            
            # log 打印
            print("step %04d g_loss_A2B= %f, g_loss_B2A=%f, id_Loss=%f, cycleLoss=%f, d_loss_A=%f, d_loss_B=%f"
                  %(n_step,generator_loss_A2B,generator_loss_A2B,identiyLoss,cycleLoss,d_loss_A,d_loss_B))
    
    
    torch.save({'generator_loss_store': g_loss_store,
                 'discriminator_loss_store': d_loss_store,
                 'model_genA2B_state_dict': m_G_A2B.state_dict(),
                 'model_genB2A_state_dict': m_G_B2A.state_dict(),
                 'model_discriminatorA': m_D_A.state_dict(),
                 'model_discriminatorB': m_D_B.state_dict(),
                 'generator_optimizer': G_optimizer.state_dict(),
                 'discriminator_optimizer': D_optimizer.state_dict()
                 }, os.path.join(path_save,'model_final.pick'))
                
            
            
            
            
            
            
            
        

                        
                        
            

           
           
           
           
           


            
            
            
            
            
            
            
            
            
            
            
            
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    

