import codecs

from utils import get_batch_mask_01_loss, get_latent_coding_kl, get_mask_local_mean_loss_with_detach, get_mask_middle_scatter, get_reconstruct_mse, get_segment_mask_loss, get_mask_simple_scatter


def train(epoch,soas,torch_loader,optimizer):
    # 调整模型为训练模式
    soas.train()
    # 记录一共多少个批次
    batch_num=0.0
    # 累计损失和
    train_loss=0.0
    for idx,(obs,act) in enumerate(torch_loader):
        # 清除参数梯度
        optimizer.zero_grad()
        # 模型进行预测
        top_feats,masks,latent_codings,latents,pred_act=soas.forward(obs)
        # MSE误差相关逻辑
        mse_loss=get_reconstruct_mse(pred_act,act)
        train_loss+=mse_loss.item()
        batch_num+=1
        # 反向传播梯度并更新
        mse_loss.backward()
        optimizer.step()
    # 打印逻辑
    train_loss/=batch_num
    print("%d Epoch , mean loss : %f"%(epoch,train_loss))


def train_with_latentKL(epoch,soas,torch_loader,optimizer,latent_loss_coef=1.0):
    # 调整模型为训练模式
    soas.train()
    # 记录一共多少个批次
    batch_num=0.0
    # 累计损失
    train_loss_val=0.0
    latent_loss_val=0.0
    for idx,(obs,act) in enumerate(torch_loader):
        # 清除参数梯度
        optimizer.zero_grad()
        # 模型进行预测
        top_feats,masks,latent_codings,latents,pred_act=soas.forward(obs)
        # MSE误差相关逻辑
        mse_loss=get_reconstruct_mse(pred_act,act)
        train_loss_val+=mse_loss.item()
        # latent编码的KL散度损失项
        split_size=int(latent_codings[0].shape[1]/2.0)
        latent_kl_loss=get_latent_coding_kl(latent_codings,split_size)
        latent_loss_val+=latent_kl_loss.item()
        # Batch计数
        batch_num+=1
        # 加权误差
        total_loss=mse_loss+latent_loss_coef*latent_kl_loss
        # 反向传播并且更新
        total_loss.backward()
        optimizer.step()
    # 打印逻辑
    train_loss_val/=batch_num
    latent_loss_val/=batch_num
    print("%d Epoch, mean MSE loss : %f, mean latent loss : %f"%(epoch,train_loss_val,latent_loss_val))

def train_with_latentKL_maskSegLen(epoch,soas,torch_loader,optimizer,latent_loss_coef=1.0,segLen_loss_coef=1.0):
    # 调整模型为训练模式
    soas.train()
    # 记录一共多少个批次
    batch_num=0.0
    # 累计损失
    mse_loss_val=0.0
    latent_loss_val=0.0
    segLen_loss_val=0.0
    # 循环逻辑
    for idx,(obs,act) in enumerate(torch_loader):
        # 清除参数梯度
        optimizer.zero_grad()
        # 模型进行预测
        top_feats,masks,latent_codings,latents,pred_act=soas.forward(obs)
        # MSE误差相关逻辑
        mse_loss=get_reconstruct_mse(pred_act,act)
        mse_loss_val+=mse_loss.item()
        # latent编码的KL散度损失项
        split_size=int(latent_codings[0].shape[1]/2.0)
        latent_kl_loss=get_latent_coding_kl(latent_codings,split_size)
        latent_loss_val+=latent_kl_loss.item()
        # maskSegLen的散度损失项
        segLen_loss=get_segment_mask_loss(masks,80,50,100)
        segLen_loss_val+=segLen_loss.item()
        # Batch计数
        batch_num+=1
        # 加权误差
        total_loss=mse_loss+latent_loss_coef*latent_kl_loss+segLen_loss_coef*segLen_loss
        # 反向传播并且更新
        total_loss.backward()
        optimizer.step()
    # 打印逻辑
    mse_loss_val/=batch_num
    latent_loss_val/=batch_num
    segLen_loss_val/=batch_num
    print("%d Epoch, mean MSE loss : %f, mean latent loss : %f, mean segLen loss : %f"%(epoch,mse_loss_val,latent_loss_val,segLen_loss_val))

def train_with_latentKL_mask01(epoch,soas,torch_loader,optimizer,sigma_epoch,mse_loss_coef=1.0,latent_loss_coef=1.0,mask01_loss_coef=1.0):
    # 调整模型为训练模式
    soas.train()
    # 记录一共多少个批次
    batch_num=0.0
    # 累计损失
    train_loss_val=0.0
    latent_loss_val=0.0
    mask01_loss_val=0.0
    # mask简要分布统计逻辑
    lower_half_total=0.0
    upper_half_total=0.0
    lower_total_scatter=0.0
    mid_total_scatter=0.0
    upper_total_scatter=0.0
    for idx,(obs,act) in enumerate(torch_loader):
        # 清除参数梯度
        optimizer.zero_grad()
        # 模型进行预测
        top_feats,masks,latent_codings,latents,pred_act=soas.forward(obs)
        # mask简要分布统计逻辑
        lower_half_part,upper_half_part=get_mask_middle_scatter(masks)
        lower_half_total+=lower_half_part
        upper_half_total+=upper_half_part
        lower_scatter,mid_scatter,upper_scatter=get_mask_simple_scatter(masks)
        lower_total_scatter+=lower_scatter
        mid_total_scatter+=mid_scatter
        upper_total_scatter+=upper_scatter
        # MSE误差相关逻辑
        mse_loss=get_reconstruct_mse(pred_act,act)
        train_loss_val+=mse_loss.item()
        # latent编码的KL散度损失项
        split_size=int(latent_codings[0].shape[1]/2.0)
        latent_kl_loss=get_latent_coding_kl(latent_codings,split_size)
        latent_loss_val+=latent_kl_loss.item()
        # mask01损失项
        mask01_loss=get_batch_mask_01_loss(masks,0,sigma_epoch,1,sigma_epoch)
        mask01_loss_val+=mask01_loss.item()
        # Batch计数
        batch_num+=1
        # 加权误差
        total_loss=mse_loss_coef*mse_loss+latent_loss_coef*latent_kl_loss+mask01_loss_coef*mask01_loss
        # 反向传播并且更新
        total_loss.backward()
        optimizer.step()
    # 打印逻辑
    train_loss_val/=batch_num
    latent_loss_val/=batch_num
    mask01_loss_val/=batch_num
    lower_half_total/=batch_num
    upper_half_total/=batch_num
    lower_total_scatter/=batch_num
    mid_total_scatter/=batch_num
    upper_total_scatter/=batch_num
    print("%d Epoch, mean MSE loss : %f, mean latent loss : %f, mean mask01 loss: %f, half scatter: %f, %f mask simple scatter: %f, %f, %f"
    %(epoch,train_loss_val,latent_loss_val,mask01_loss_val,lower_half_total,upper_half_total,lower_total_scatter,mid_total_scatter,upper_total_scatter))


def train_with_latentKL_mask01_maskLocal(epoch,soas,torch_loader,optimizer,sigma_epoch,mse_loss_coef=1.0,latent_loss_coef=1.0,mask01_loss_coef=1.0,mask_local_loss_coef=1.0,log_path=""):
    # 调整模型为训练模式
    soas.train()
    # 记录一共多少个批次
    batch_num=0.0
    # 累计损失
    train_loss_val=0.0
    latent_loss_val=0.0
    mask01_loss_val=0.0
    # mask简要分布统计逻辑
    lower_half_total=0.0
    upper_half_total=0.0
    lower_total_scatter=0.0
    mid_total_scatter=0.0
    upper_total_scatter=0.0
    for idx,(obs,act) in enumerate(torch_loader):
        # 清除参数梯度
        optimizer.zero_grad()
        # 模型进行预测
        top_feats,masks,latent_codings,latents,pred_act=soas.forward(obs)
        # mask简要分布统计逻辑
        lower_half_part,upper_half_part=get_mask_middle_scatter(masks)
        lower_half_total+=lower_half_part
        upper_half_total+=upper_half_part
        lower_scatter,mid_scatter,upper_scatter=get_mask_simple_scatter(masks)
        lower_total_scatter+=lower_scatter
        mid_total_scatter+=mid_scatter
        upper_total_scatter+=upper_scatter
        # MSE误差相关逻辑
        mse_loss=get_reconstruct_mse(pred_act,act)
        train_loss_val+=mse_loss.item()
        # latent编码的KL散度损失项
        split_size=int(latent_codings[0].shape[1]/2.0)
        latent_kl_loss=get_latent_coding_kl(latent_codings,split_size)
        latent_loss_val+=latent_kl_loss.item()
        # mask01损失项
        mask01_loss=get_batch_mask_01_loss(masks,0,sigma_epoch,1,sigma_epoch)
        mask01_loss_val+=mask01_loss.item()
        # mask_local_mean损失项
        mask_local_loss=get_mask_local_mean_loss_with_detach(masks,100)
        # Batch计数
        batch_num+=1
        # 加权误差
        total_loss=mse_loss_coef*mse_loss+latent_loss_coef*latent_kl_loss+mask01_loss_coef*mask01_loss+mask_local_loss_coef*mask_local_loss
        # 反向传播并且更新
        total_loss.backward()
        optimizer.step()
    # 打印逻辑
    train_loss_val/=batch_num
    latent_loss_val/=batch_num
    mask01_loss_val/=batch_num
    lower_half_total/=batch_num
    upper_half_total/=batch_num
    lower_total_scatter/=batch_num
    mid_total_scatter/=batch_num
    upper_total_scatter/=batch_num
    print("%d Epoch, mean MSE loss : %f, mean latent loss : %f, mean mask01 loss: %f, half scatter: %f, %f mask simple scatter: %f, %f, %f"
    %(epoch,train_loss_val,latent_loss_val,mask01_loss_val,lower_half_total,upper_half_total,lower_total_scatter,mid_total_scatter,upper_total_scatter))
    # 数据写入日志文件中
    if len(log_path)>0:
        fw=codecs.open(log_path,"a","UTF-8")
        fw.write("%d,%f,%f,%f,%f,%f,%f,%f,%f\n"%(epoch,train_loss_val,latent_loss_val,mask01_loss_val,lower_half_total,upper_half_total,lower_total_scatter,mid_total_scatter,upper_total_scatter))
        fw.close()

def train_with_latentKL_mask01_logged(epoch,soas,torch_loader,optimizer,sigma_epoch,mse_loss_coef=1.0,latent_loss_coef=1.0,mask01_loss_coef=1.0,log_path=""):
    # 调整模型为训练模式
    soas.train()
    # 记录一共多少个批次
    batch_num=0.0
    # 累计损失
    train_loss_val=0.0
    latent_loss_val=0.0
    mask01_loss_val=0.0
    # mask简要分布统计逻辑
    lower_half_total=0.0
    upper_half_total=0.0
    lower_total_scatter=0.0
    mid_total_scatter=0.0
    upper_total_scatter=0.0
    for idx,(obs,act) in enumerate(torch_loader):
        # 清除参数梯度
        optimizer.zero_grad()
        # 模型进行预测
        top_feats,masks,latent_codings,latents,pred_act=soas.forward(obs)
        # mask简要分布统计逻辑
        lower_half_part,upper_half_part=get_mask_middle_scatter(masks)
        lower_half_total+=lower_half_part
        upper_half_total+=upper_half_part
        lower_scatter,mid_scatter,upper_scatter=get_mask_simple_scatter(masks)
        lower_total_scatter+=lower_scatter
        mid_total_scatter+=mid_scatter
        upper_total_scatter+=upper_scatter
        # MSE误差相关逻辑
        mse_loss=get_reconstruct_mse(pred_act,act)
        train_loss_val+=mse_loss.item()
        # latent编码的KL散度损失项
        split_size=int(latent_codings[0].shape[1]/2.0)
        latent_kl_loss=get_latent_coding_kl(latent_codings,split_size)
        latent_loss_val+=latent_kl_loss.item()
        # mask01损失项
        mask01_loss=get_batch_mask_01_loss(masks,0,sigma_epoch,1,sigma_epoch)
        mask01_loss_val+=mask01_loss.item()
        # Batch计数
        batch_num+=1
        # 加权误差
        total_loss=mse_loss_coef*mse_loss+latent_loss_coef*latent_kl_loss+mask01_loss_coef*mask01_loss
        # 反向传播并且更新
        total_loss.backward()
        optimizer.step()
    # 打印逻辑
    train_loss_val/=batch_num
    latent_loss_val/=batch_num
    mask01_loss_val/=batch_num
    lower_half_total/=batch_num
    upper_half_total/=batch_num
    lower_total_scatter/=batch_num
    mid_total_scatter/=batch_num
    upper_total_scatter/=batch_num
    print("%d Epoch, mean MSE loss : %f, mean latent loss : %f, mean mask01 loss: %f, half scatter: %f, %f mask simple scatter: %f, %f, %f"
    %(epoch,train_loss_val,latent_loss_val,mask01_loss_val,lower_half_total,upper_half_total,lower_total_scatter,mid_total_scatter,upper_total_scatter))
    # 数据写入日志文件中
    if len(log_path)>0:
        fw=codecs.open(log_path,"a","UTF-8")
        fw.write("%d,%f,%f,%f,%f,%f,%f,%f,%f\n"%(epoch,train_loss_val,latent_loss_val,mask01_loss_val,lower_half_total,upper_half_total,lower_total_scatter,mid_total_scatter,upper_total_scatter))
        fw.close()
