import torch 
from dataset import VC_StarGan_Dataset
from torch.utils.data import Dataset,DataLoader
from hparams import hparams
from model import Generator,Discriminator,DomainClassifier
import os
import logging
import torch.nn as nn
def adjust_lr_rate(optimizer,lr,lr_decay):
    lr_new = max(0., lr - lr_decay)
    for param_groups in optimizer.param_groups:
        param_groups['lr'] = lr_new
    return lr_new,optimizer
    

    
    
    

if __name__ == "__main__":
    
    # 定义log文件
    file_log = "starGan.log"
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s',
        handlers=[
            logging.FileHandler(file_log),
            logging.StreamHandler()
        ]
    )
    logger = logging.getLogger()   
    
    
    
    # 定义device
    device = torch.device("cuda:0")
    
    # 获取模型参数
    para = hparams()
    
    # 模型实例化
    m_Generator = Generator(para.n_spk)
    m_Generator = m_Generator.to(device)
    
    m_Discriminator = Discriminator(para.n_spk)
    m_Discriminator = m_Discriminator.to(device)
    
    m_DomainClassifier = DomainClassifier(para.n_spk)
    m_DomainClassifier = m_DomainClassifier.to(device)
    
    
    # 定义优化器
    g_lr =  para.g_lr
    g_optimizer = torch.optim.Adam(m_Generator.parameters(), g_lr, [0.5, 0.999])
    
    d_lr = para.d_lr
    d_optimizer = torch.optim.Adam(m_Discriminator.parameters(), d_lr, [0.5, 0.999])
    
    c_lr = para.c_lr
    c_optimizer = torch.optim.Adam(m_DomainClassifier.parameters(), c_lr,[0.5, 0.999])
    
    
    # DomainClassifier 损失函数
    CELoss = nn.CrossEntropyLoss()
    
    # 定义数据集
    m_Dataset= VC_StarGan_Dataset(para)
    m_DataLoader = DataLoader(m_Dataset,batch_size = 1,shuffle = True, num_workers = 1)
    
    n_step = 0
    
    for epoch in range(para.n_epoch):
        # 打乱顺序
        m_Dataset.gen_random_pair()
        
        for i, sample_batch in enumerate(m_DataLoader):
            n_step = n_step+1
            
            # 读取数据
            real_A = sample_batch[0]
            one_hot_A = sample_batch[1]
            index_A = sample_batch[2]
            
            real_A = real_A.to(device).float()
            index_A = index_A.to(device)
            one_hot_A = one_hot_A.to(device).float()
            
            real_B = sample_batch[3]
            one_hot_B = sample_batch[4]
            index_B = sample_batch[5]
           
            real_B = real_B.to(device).float()
            index_B = index_B.to(device)
            one_hot_B = one_hot_B.to(device).float()
            
            
            # 更新 m_DomainClassifier
            domain_real = m_DomainClassifier(real_B)
            
            loss_c = CELoss(domain_real,index_B)
            
            c_optimizer.zero_grad()
            d_optimizer.zero_grad()
            g_optimizer.zero_grad()
            
            loss_c.backward()
            c_optimizer.step()
            
            
            # 更新 m_Discriminator
            d_B_real = m_Discriminator(real_B,one_hot_B)
            fake_B =  m_Generator(real_A,one_hot_B)
            d_B_fake = m_Discriminator(fake_B,one_hot_B)
            
            d_loss_real =  torch.mean((1 - d_B_real) ** 2)
            d_loss_fake =  torch.mean((0 - d_B_fake) ** 2)
            
            # domain_fake_B = m_DomainClassifier(fake_B)
            # domain_fake_loss = CELoss(domain_fake_B,index_B)
            
            
            # 计算梯度惩罚项
            alpha = torch.rand(real_A.size(0), 1, 1, 1).to(device)
            x_hat = (alpha * real_A + (1 - alpha) * fake_B).requires_grad_(True)
            out_src = m_Discriminator(x_hat, one_hot_B)
            
            weight = torch.ones(out_src.size()).to(device)
            dydx = torch.autograd.grad(outputs=out_src,
                                   inputs=x_hat,
                                   grad_outputs=weight,
                                   retain_graph=True,
                                   create_graph=True,
                                   only_inputs=True)[0]

            dydx = dydx.view(dydx.size(0), -1)
            dydx_l2norm = torch.sqrt(torch.sum(dydx**2, dim=1))
        
            loss_dp = torch.mean((dydx_l2norm-1)**2)
            
            loss_d = d_loss_real + d_loss_fake +5*loss_dp 
           
            c_optimizer.zero_grad()
            d_optimizer.zero_grad()
            g_optimizer.zero_grad()
            loss_d.backward()
            d_optimizer.step()
            
            # 更新 m_Generator
            fake_B =  m_Generator(real_A,one_hot_B)
            cycle_A = m_Generator(fake_B,one_hot_A)
            
            d_fake_B = m_Discriminator(fake_B,one_hot_B) 
            
            id_A = m_Generator(real_A,one_hot_A)
            
            domain_out_fake_B = m_DomainClassifier(fake_B)
            
            
            loss_gan = torch.mean((1 - d_fake_B) ** 2)
            
            loss_cycle = torch.mean((real_A - cycle_A) ** 2)
            
            loss_id = torch.mean((real_A - id_A) ** 2)
            
            loss_domian = CELoss(domain_out_fake_B,index_B)
            
            loss_g = loss_gan + \
                     para.lambda_cycle*loss_cycle + \
                     para.lambda_identity*loss_id + \
                     para.lambda_classifier * loss_domian
                     
            c_optimizer.zero_grad()
            d_optimizer.zero_grad()
            g_optimizer.zero_grad()
            
            loss_g.backward()
            g_optimizer.step()
            
            
            
            # 调整lr
            if n_step>para.start_decay and  n_step%(para.lr_update_step)==0:

               g_lr, g_optimizer= adjust_lr_rate(g_optimizer,g_lr,para.decay_g)
               d_lr, d_optimizer= adjust_lr_rate(d_optimizer,d_lr,para.decay_d)
               c_lr, c_optimizer= adjust_lr_rate(c_optimizer,c_lr,para.decay_c)
            
           #  模型保存            
            if  n_step %(para.save_step) ==0:
                path_save = os.path.join(para.path_save,str(n_step))
                os.makedirs(path_save,exist_ok=True)
                
                torch.save({'model_G':m_Generator.state_dict(),
                            'model_D':m_Discriminator.state_dict(),
                            'model_C':m_DomainClassifier.state_dict(),
                            'opt_G':g_optimizer.state_dict(),
                            'opt_D':d_optimizer.state_dict(),
                            'opt_C':c_optimizer.state_dict()},
                            os.path.join(path_save,'model.pick'))
                            
            # 打印log
            logger.info("step %8d g_loss= %f, d_loss=%f, c_loss=%f  domain_loss = %f"%(n_step,loss_g,loss_d,loss_c,loss_domian))
            
    torch.save({'model_G':m_Generator.state_dict(),
                'model_D':m_Discriminator.state_dict(),
                'model_C':m_DomainClassifier.state_dict(),
                'opt_G':g_optimizer.state_dict(),
                'opt_D':d_optimizer.state_dict(),
                'opt_C':c_optimizer.state_dict()},
                os.path.join(path_save,'final_model.pick'))
               
               
               
               
               
              
            

            
            
            
                
            
            
            
            
        
    
    
    
    
    
