import logging
from pathlib import Path
import os
from tqdm import tqdm

import torch.optim as optim
import torch.utils
import torch.utils.cpp_extension
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from PIL import Image

from dataset.benchmarks import fast_npss
from network.cvae_ic_dwt import *
from utils.metrics import *

import spin

from pytorch_wavelets import DTCWTForward

import matplotlib
matplotlib.use("Agg")

PE_KSP_TO_SPIN = {
    "Head": "Head",
    "Neck": "Neck",
    "Right Shoulder": "Right ForeArm",
    "Right Arm": "Right Arm",
    "Right Hand": "Right Hand",
    "Left Shoulder": "Left ForeArm",
    "Left Arm": "Left Arm",
    "Left Hand": "Left Hand",
    "Spine": "Spine1",
    "Hips": "Hips",
    "Right Upper Leg": "Right Upper Leg",
    "Right Leg": "Right Leg",
    "Right Foot": "Right Foot",
    "Left Upper Leg": "Left Upper Leg",
    "Left Leg": "Left Leg",
    "Left Foot": "Left Foot",
    "Left Toe": "Left Toe",
    "Right Toe": "Right Toe",
}


def bi_concat(pred, iv_pred, seq_len):
    # flip reverse prediction result
    iv_pred = torch.flip(iv_pred, dims=[1])

    over_len = (seq_len // 10) * 2
    half_len = seq_len // 2
    # pred: B*T*J*3
    before_pred = pred[:, :half_len - over_len]
    after_pred = iv_pred[:, over_len * 2:]

    positive_middle_pred = pred[:, half_len - over_len:]
    reverse_middle_pred = iv_pred[:, :over_len * 2]

    middle_pred = torch.zeros(positive_middle_pred.shape).cuda()
    for i in range(over_len * 2):
        ratio = 1 - i / (over_len * 2 - 1)
        middle_pred[:, i] = positive_middle_pred[:, i] * ratio + reverse_middle_pred[:, i] * (1 - ratio)

    concat_pred = torch.cat([before_pred, middle_pred, after_pred], 1)
    return concat_pred  # B*T*22*3 (no permute)


def freeze_layers(model):
    for module in model.modules():
        if type(module) is False:
            continue

        if isinstance(module, nn.modules.batchnorm._BatchNorm):
            module.eval()
            for m in module.parameters():
                m.requires_grad = False

        if isinstance(module, nn.Dropout):
            module.eval()
            for m in module.parameters():
                m.requires_grad = False


class Model(object):
    def __init__(self, conf, skeleton, train_data=None, test_data=None) -> None:
        self.data_conf = conf["data"]
        self.model_conf = conf["model"]
        self.hmr_conf = conf["network"]["hmr"]
        self.train_conf = conf["network"]["cvae_ic_dwt"]
        
        self.lr = self.train_conf["learning_rate"]
        self.epoch_num = self.train_conf["epoch"]
        self.batch_size = self.train_conf["batch_size"]
        
        self.save_path = conf["save"]["save_path"]
        self.load_path = self.save_path
        self.log_path = conf["save"]["log_path"]
        
        self.train_data = train_data
        self.test_data = test_data
        self.cur_sequence_length = self.test_data.cur_sequence_length
        self.p_std = None
        self.v_std = None
        
        # build hmr 
        self.hmr = spin.hmr(self.hmr_conf["smpl_mean_params_path"], self.hmr_conf)
        self.hmr.to("cuda")
        checkpoint = torch.load(self.hmr_conf["spin_model_path"], map_location="cpu")
        self.hmr.load_state_dict(checkpoint["model"], strict=False)
        freeze_layers(self.hmr)
        self.optimizer_hmr = optim.Adam(
            filter(lambda p: p.requires_grad, self.hmr.parameters()),
            lr=1e-6,
        )
        
        # build cvae
        self.cvae = CVAE(conf, skeleton)
        self.iv_cvae = CVAE(conf, skeleton)
        self.optimizer_cvae = optim.Adam(lr=self.lr,
                                    params=(list(self.cvae.parameters()) +
                                            list(self.iv_cvae.parameters())),
                                    betas=(self.train_conf["beta1"], self.train_conf["beta2"]),
                                    weight_decay=self.train_conf["weight_decay"])
        
        # build discriminator network
        self.long_discriminator = LongMotionDiscriminator(length=self.data_conf["long_length"],
                                                          in_dim=self.data_conf["joint_num"]*3*2).cuda()
        self.short_discriminator = ShortMotionDiscriminator(length=self.data_conf["short_length"],
                                                            in_dim=self.data_conf["joint_num"] * 3 * 2).cuda()
        self.optimizer_discriminator = optim.Adam(lr=self.lr * 0.1,
                                                  params=(list(self.long_discriminator.parameters()) +
                                                          list(self.short_discriminator.parameters())),
                                                  betas=(self.train_conf["beta1"], self.train_conf["beta2"]),
                                                  weight_decay=self.train_conf["weight_decay"])
        
        
        logging.basicConfig(level=logging.INFO, format='%(asctime)s  %(message)s',
                            filename=Path(self.save_path / 'log.txt'))
    
    def load(self, epoch=0):
        epoch = "" if epoch==0 else str(epoch)
        
        # hmr
        self.hmr.load_state_dict(
            torch.load(Path(self.load_path) / 'checkpoint' / (epoch+'hmr.pth')))
        self.optimizer_hmr.load_state_dict(
            torch.load(Path(self.load_path) / 'checkpoint' / (epoch+'optimizer_h.pth')))
        
        # cvae
        self.cvae.load_state_dict(
            torch.load(Path(self.load_path) / 'checkpoint' / (epoch+'cvae.pth')))
        self.iv_cvae.load_state_dict(
            torch.load(Path(self.load_path) / 'checkpoint' / (epoch+'iv_cvae.pth')))
        self.optimizer_cvae.load_state_dict(
            torch.load(Path(self.load_path) / 'checkpoint' / (epoch+'optimizer_c.pth')))
        
        # discriminator
        self.short_discriminator.load_state_dict(
            torch.load(Path(self.load_path) / 'checkpoint' / (epoch+'short_discriminator.pth')))
        self.long_discriminator.load_state_dict(
            torch.load(Path(self.load_path) / 'checkpoint' / (epoch+'long_discriminator.pth')))
        self.optimizer_discriminator.load_state_dict(
            torch.load(Path(self.load_path) / 'checkpoint' / (epoch+'optimizer_d.pth')))
    
    def save(self, epoch=0):
        epoch = "" if epoch==0 else str(epoch)
        
        # hmr
        torch.save(self.hmr.state_dict(),
                   Path(self.save_path) / (epoch+'hmr.pth'))
        torch.save(self.optimizer_hmr.state_dict(),
                   Path(self.save_path) / (epoch+'optimizer_h.pth'))
        
        # cvae
        torch.save(self.cvae.state_dict(),
                   Path(self.save_path) / (epoch+'cvae.pth'))
        torch.save(self.iv_cvae.state_dict(),
                   Path(self.save_path) / (epoch+"iv_cvae.pth"))
        torch.save(self.optimizer_cvae.state_dict(),
                   Path(self.save_path) / (epoch+"optimizer_c.pth"))
        
        # discriminator
        torch.save(self.short_discriminator.state_dict(),
                   Path(self.save_path) / (epoch+"short_discriminator.pth"))
        torch.save(self.long_discriminator.state_dict(),
                   Path(self.save_path) / (epoch+"long_discriminator.pth"))
        torch.save(self.optimizer_discriminator.state_dict(),
                   Path(self.save_path) / (epoch+"optimizer_d.pth"))
        
    def train(self):
        writer = SummaryWriter(self.log_path)
        
        min_loss = 1000000
        for epoch in range(self.epoch_num):
            self.hmr.train()
            self.cvae.train()
            self.iv_cvae.train()
            
            # update current sequence length
            self.cur_sequence_length = min(self.data_conf["sequence_length"], self.train_data.start_sequence_length +
                                           int(epoch / self.data_conf["seq_len_update_rate"]) * 2)
            
            single_seq_len = self.cur_sequence_length // 2 + (self.cur_sequence_length // 10) * 2

            # Ztta embedding
            z_tta = gen_ztta(self.cur_sequence_length, self.model_conf["encoder"]["state_dim"][-1],
                             self.model_conf["position_encoding_basis"]).cuda()
            
            # save global position std
            batch_idx = 0
            hmr_batch_loss = []
            batch_loss = []
            
            train_loader = DataLoader(dataset=self.train_data, batch_size=self.batch_size, drop_last=True, num_workers=4)
            
            for data in tqdm(train_loader, ncols=100):
                # hmr
                self.optimizer_hmr.zero_grad()    
                # data
                hmr_data, X, Q = data
                # hmr recognition
                self.hmr.reset()
                
                for i in range(hmr_data["star"]["img"].shape[0]):
                    Q_star = self.hmr(hmr_data["star"],i)
                    Q_tar = self.hmr(hmr_data["tar"],i)
                    Q[i,0,1:] = Q_star[0,1:]
                    Q[i,-1,1:] = Q_tar[0,1:]
                
                # loss update
                
                l2, Ltan, Lcos, Lpar, Lspine, Lgr, Lstraight3d, Lcon2d,\
                loss_foot, loss_sh, msc_loss, Lprior, hmr_loss = self.hmr.get_loss()
                hmr_loss.backward()
                self.optimizer_hmr.step()
                
                # loss log
                hmr_loss = hmr_loss.item()
                hmr_batch_loss.append(hmr_loss)
                writer.add_scalar("hmr loss", hmr_loss, global_step=epoch * 317)
                writer.add_scalar("mse", l2, global_step=epoch * 317)
                writer.add_scalar("tan", Ltan, global_step=epoch * 317)
                writer.add_scalar("cos", Lcos, global_step=epoch * 317)
                writer.add_scalar("par", Lpar, global_step=epoch * 317)
                writer.add_scalar("spine", Lspine, global_step=epoch * 317)
                writer.add_scalar("ground/chain", Lgr, global_step=epoch * 317)
                writer.add_scalar("straight_in_3d", Lstraight3d, global_step=epoch * 317)
                writer.add_scalar("contact/con2d", Lcon2d, global_step=epoch * 317)
                writer.add_scalar(f"ground/foot", loss_foot, global_step=epoch * 317,)
                writer.add_scalar("ground/silhuette", loss_sh, global_step=epoch * 317)
                writer.add_scalar("contact/msc", msc_loss, global_step=epoch * 317)
                writer.add_scalar("rotmat", Lprior, global_step=epoch * 317)
                 
                # generate in-between dataset
                batch_sample, iv_batch_sample = self.train_data.update_data(X, Q)                
                
                norm_path = os.path.join(self.save_path, "norm.csv")
                std = np.concatenate((self.train_data.position_std[0], self.train_data.velocity_std.reshape((66, 1))), axis=1)
                np.savetxt(norm_path, std)
                self.p_std = self.train_data.position_std.view(1, 1, -1, 3).cuda()
                
                # init lstm
                bs = len(batch_sample["local_quaternion"])
                self.cvae.reset(bs)
                self.iv_cvae.reset(bs)
                
                # init pred list
                pred_list = [batch_sample["global_position"][:, 0].cuda()]
                contact_list = [batch_sample["contact"][:, 0].cuda()]
                iv_index = self.data_conf["sequence_length"] - self.cur_sequence_length
                iv_pred_list = [iv_batch_sample["global_position"][:, iv_index].cuda()]
                iv_contact_list = [iv_batch_sample["contact"][:, iv_index].cuda()]
                
                for t in range(single_seq_len - 1):
                    # positive target info
                    target_info = self.iv_cvae.get_opposite_target()
                    
                    if target_info is None:
                        target_local_quaternion = batch_sample["local_quaternion"][:, self.cur_sequence_length - 1]
                        target_local_quaternion = target_local_quaternion.view(target_local_quaternion.size(0), -1)
                        target_root_velocity = batch_sample["root_velocity"][:, self.cur_sequence_length - 1 - 1]
                        target_contact = batch_sample["contact"][:, self.cur_sequence_length - 1]
                        target_root_position = batch_sample["global_position"][:, self.cur_sequence_length - 1, 0, :]

                        target_info = [target_local_quaternion, target_root_velocity,
                                       target_contact, target_root_position]                      
                    
                    # positive
                    pred_global_position, pred_contact = self.cvae(batch_sample,
                                                                   t,
                                                                   self.cur_sequence_length - 1 - t,
                                                                   target_info,
                                                                   self.cur_sequence_length,
                                                                   z_tta,
                                                                   self.p_std)
                    pred_list.append(pred_global_position)
                    contact_list.append(pred_contact)
                    
                    # reverse target info
                    iv_target_info = self.cvae.get_opposite_target()
                    
                    # reverse
                    iv_pred_global_position, iv_pred_contact = self.iv_cvae(iv_batch_sample,
                                                                            iv_index + t,
                                                                            t + 1,
                                                                            iv_target_info,
                                                                            self.cur_sequence_length,
                                                                            z_tta,
                                                                            self.p_std,
                                                                            positive=False)
                    iv_pred_list.append(iv_pred_global_position)
                    iv_contact_list.append(iv_pred_contact)
                
                root_loss, contact_loss, quaternion_loss, position_loss, KLD = self.cvae.get_loss()
                iv_root_loss, iv_contact_loss, iv_quaternion_loss, iv_position_loss, iv_KLD = self.iv_cvae.get_loss()
                
                # concat
                pred = torch.cat(pred_list, 0)
                pred = pred.view(len(pred_list), -1, 22, 3).permute(1, 0, 2, 3) # BxTxJx3
                iv_pred = torch.cat(iv_pred_list, 0)
                iv_pred = iv_pred.view(len(iv_pred_list), -1, 22, 3).permute(1, 0, 2, 3)  # BxTxJx3 (正向)
                concat_pred = bi_concat(pred, iv_pred, self.cur_sequence_length)  # BxTxJx3
                concat_pred = concat_pred.type(torch.float32)
                
                # adversarial input
                fake_position = torch.cat([item.reshape(item.size(0), -1).unsqueeze(-1)
                                           for item in concat_pred.permute(1, 0, 2, 3)], -1)
                fake_velocity = torch.cat([fake_position[:, :, 1:] - fake_position[:, :, :-1],
                                           torch.zeros_like(fake_position[:, :, 0:1])], -1)
                fake_input = torch.cat([fake_position, fake_velocity], 1)

                real_position = torch.cat(
                    [batch_sample["global_position"][:, i].view(batch_sample["global_position"].size(0), -1).unsqueeze(
                        -1) for i in range(self.cur_sequence_length)], -1)
                real_velocity = torch.cat([real_position[:, :, 1:] - real_position[:, :, :-1],
                                           torch.zeros_like(real_position[:, :, 0:1])], -1)
                real_input = torch.cat([real_position, real_velocity], 1)
                
                # discriminal
                self.optimizer_discriminator.zero_grad()
                short_fake_logic = torch.mean(self.short_discriminator(fake_input.detach())[:, 0], 1)
                short_real_logic = torch.mean(self.short_discriminator(real_input.cuda())[:, 0], 1)
                short_fake_loss = torch.mean(short_fake_logic ** 2)
                short_real_loss = torch.mean((short_real_logic - 1) ** 2)
                short_d_loss = (short_real_loss + short_fake_loss) / 2

                long_fake_logic = torch.mean(self.long_discriminator(fake_input.detach())[:, 0], 1)
                long_real_logic = torch.mean(self.long_discriminator(real_input.cuda())[:, 0], 1)
                long_fake_loss = torch.mean(long_fake_logic ** 2)
                long_real_loss = torch.mean((long_real_logic - 1) ** 2)
                long_d_loss = (long_real_loss + long_fake_loss) / 2

                discriminator_loss = self.train_conf["gan_loss_weight"] * (short_d_loss + long_d_loss)
                discriminator_loss.backward()
                self.optimizer_discriminator.step()
                
                # generator
                self.optimizer_cvae.zero_grad()
                short_g_fake_logic = torch.mean(self.short_discriminator(fake_input)[:, 0], 1)
                short_g_loss = torch.mean((short_g_fake_logic - 1) ** 2)
                long_g_fake_logic = torch.mean(self.long_discriminator(fake_input)[:, 0], 1)
                long_g_loss = torch.mean((long_g_fake_logic - 1) ** 2)
                
                # the same target loss
                overlap_loss = torch.mean(torch.abs(
                    pred[:, self.cur_sequence_length // 2 - (self.cur_sequence_length // 10) * 2:] -
                    torch.flip(iv_pred, dims=[1])[:, :(self.cur_sequence_length // 10) * 2 * 2]
                ) / self.p_std)
                
                
                # slide loss
                pred_pos = torch.cat([x.reshape(x.size(0), -1).unsqueeze(-1)
                                      for x in concat_pred.permute(1, 0, 2, 3)], -1)
                # print(pred_pos.shape)

                
                pred_vel = (pred_pos[:, self.data_conf["foot_index"], 1:] -
                            pred_pos[:, self.data_conf["foot_index"], :-1])
                pred_vel = pred_vel.view(pred_vel.size(0), 4, 3, pred_vel.size(-1))
                slide_loss = torch.mean(torch.abs(pred_vel * batch_sample["contact"][:, :self.cur_sequence_length - 1]
                                                  .cuda().permute(0, 2, 1).unsqueeze(2)))

                # dwt loss
                xfm = DTCWTForward(J=3, biort='near_sym_b', qshift='qshift_b').cuda()
                pred_low, pred_high = xfm(concat_pred)
                real_low, real_high = xfm(batch_sample["global_position"][:, :self.cur_sequence_length].cuda())
                dwt_loss = 0
                for i in range(len(pred_high)):
                    dwt_loss = torch.mean(torch.abs(pred_high[i] - real_high[i]))
                dwt_loss /= len(pred_high)

                # robust KLD
                KLD = torch.sqrt(1 + KLD ** 2) - 1# sum loss
                total_loss = (self.train_conf["quaternion_loss_weight"] * (quaternion_loss + iv_quaternion_loss) / 2 +
                              self.train_conf["contact_loss_weight"] * (contact_loss + iv_contact_loss) / 2 +
                              self.train_conf["root_loss_weight"] * (root_loss + iv_root_loss) / 2 +
                              self.train_conf["position_loss_weight"] * (position_loss + iv_position_loss) / 2 +
                              self.train_conf["gan_loss_weight"] * (short_g_loss + long_g_loss) +
                              self.train_conf["kld_weight"] * (KLD + iv_KLD) / 2 +
                              self.train_conf["overlap_loss_weight"] * overlap_loss +
                              self.train_conf["dwt_loss_weight"] * dwt_loss +
                              self.train_conf["slide_loss_weight"] * slide_loss)
                total_loss.backward()
                
                # gradient clip
                nn.utils.clip_grad_norm_(self.cvae.parameters(), 0.5)
                nn.utils.clip_grad_norm_(self.iv_cvae.parameters(), 0.5)
                
                self.optimizer_cvae.step()
                        
                # generator loss
                writer.add_scalar("root_loss", root_loss + iv_root_loss, global_step=epoch * 317 + batch_idx)
                writer.add_scalar("contact_loss", contact_loss + iv_contact_loss, global_step=epoch * 317 + batch_idx)
                writer.add_scalar("position_loss", position_loss + iv_position_loss,
                                  global_step=epoch * 317 + batch_idx)
                writer.add_scalar("quaternion_loss", quaternion_loss + iv_quaternion_loss,
                                  global_step=epoch * 317 + batch_idx)
                writer.add_scalar("KLD", KLD + iv_KLD, global_step=epoch * 317 + batch_idx)
                writer.add_scalar("total_generator_loss", total_loss, global_step=epoch * 317 + batch_idx)
                writer.add_scalar("overlap_loss", overlap_loss, global_step=epoch * 317 + batch_idx)
                writer.add_scalar("dwt_loss", dwt_loss, global_step=epoch * 317 + batch_idx)
                writer.add_scalar("slide_loss", slide_loss, global_step=epoch * 317 + batch_idx)

                # GAN loss
                writer.add_scalar("short_generator_loss", short_g_loss, global_step=epoch * 317 + batch_idx)
                writer.add_scalar("long_generator_loss", long_g_loss, global_step=epoch * 317 + batch_idx)
                writer.add_scalar("short_discriminator_loss", short_d_loss, global_step=epoch * 317 + batch_idx)
                writer.add_scalar("long_discriminator_loss", long_d_loss, global_step=epoch * 317 + batch_idx)
                writer.add_scalar("discriminator_loss", discriminator_loss, global_step=epoch * 317 + batch_idx)

                batch_loss.append(total_loss.cpu().detach())
                batch_idx += 1
            
            # save model
            hmr_train_loss = np.mean(hmr_batch_loss)
            cur_train_loss = np.mean(batch_loss)
            if cur_train_loss < min_loss:
                min_loss = cur_train_loss
                self.save()
                
            if epoch % 10 == 0 and epoch!=0:
                test_loss = self.test()
                writer.add_scalar("test loss", test_loss, global_step=epoch * 317)

            if epoch % self.train_conf["save_duration"] == 0:
                self.save(epoch.__str__())

            logging.info('Epoch {} : '.format(epoch) + 
                         'HMR Loss = {:.9f} '.format(hmr_train_loss) +
                         'Train Loss = {:.9f} '.format(cur_train_loss) +
                         'Min Loss = {:.9f} '.format(min_loss) +
                         'lr = {} '.format(self.lr))

            print('Epoch {} : '.format(epoch) +
                  'HMR Loss = {:.9f} '.format(hmr_train_loss) +
                  'Train Loss = {:.9f} '.format(cur_train_loss) +
                  'Min Loss = {:.9f} '.format(min_loss) +
                  'lr = {} '.format(self.lr))

        writer.close()
        
    def test(self):

        # load global position std
        std = np.loadtxt(os.path.join(self.load_path, "checkpoint/norm.csv"))
        self.p_std = torch.from_numpy(std[:, 0]).view(1, 1, -1, 3).cuda()
        
        self.hmr.eval()
        self.cvae.eval()
        self.iv_cvae.eval()

        self.cur_sequence_length = self.data_conf["sequence_length"]
        single_seq_len = self.cur_sequence_length // 2 + (self.cur_sequence_length // 10) * 2

        # Ztta embedding
        z_tta = gen_ztta(self.cur_sequence_length, self.model_conf["encoder"]["state_dim"][-1],
                         self.model_conf["position_encoding_basis"]).cuda()
        
        metrics = []
        hmr_batch_loss = []
        batch_loss = []
        
        batch_num = 0
        
        repeat_num = 1
        repeated_positions = []
        while repeat_num > 0:
            repeat_num -= 1
            all_positions = []
            
            test_loader = DataLoader(dataset=self.test_data, batch_size=self.batch_size, drop_last=True)
            for batch_idx, data in enumerate(test_loader):
                
                if self.data_conf["test_nums"] == "single" and batch_idx not in self.data_conf["single_idx"]:
                    continue
                
                batch_num += 1
                with torch.no_grad():
                    # init hmr
                    hmr_data, X, Q = data
                    # Real dataset
                    batch_sample_orig, _ = self.test_data.update_data(X, Q)
                    self.hmr.reset()
                    for i in range(hmr_data["star"]["img"].shape[0]):
                        Q_star = self.hmr(hmr_data["star"],i)
                        Q_tar = self.hmr(hmr_data["tar"],i)
                        Q[i,0,1:] = Q_star[0,1:]
                        Q[i,-1,1:] = Q_tar[0,1:]
                    
                    # Update dataset
                    batch_sample, iv_batch_sample = self.test_data.update_data(X, Q)
                
                    # loss update
                    l2, Ltan, Lcos, Lpar, Lspine, Lgr, Lstraight3d, Lcon2d,\
                    loss_foot, loss_sh, msc_loss, Lprior, hmr_loss = self.hmr.get_loss()
                    hmr_loss = hmr_loss.item()
                    hmr_batch_loss.append(hmr_loss)
                    
                    if self.data_conf["save_gif"]:
                        print(batch_idx, " ", batch_sample["global_position"].shape)

                    # init lstm
                    bs = len(batch_sample["local_quaternion"])
                    self.cvae.reset(bs)
                    self.iv_cvae.reset(bs)

                    # init pred list
                    pred_list = [batch_sample["global_position"][:, 0].cuda()]
                    contact_list = [batch_sample["contact"][:, 0].cuda()]

                    iv_index = self.data_conf["sequence_length"] - self.cur_sequence_length
                    iv_pred_list = [iv_batch_sample["global_position"][:, iv_index].cuda()]
                    iv_contact_list = [iv_batch_sample["contact"][:, iv_index].cuda()]

                    pred_quat_list = [batch_sample["local_quaternion"][:, 0].cuda()]
                    pred_root_list = [batch_sample["global_position"][:, 0, 0].cuda()]
                    iv_pred_quat_list = [iv_batch_sample["local_quaternion"][:, iv_index].cuda()]
                    iv_pred_root_list = [iv_batch_sample["global_position"][:, iv_index, 0].cuda()]

                    for t in range(single_seq_len - 1):
                        # positive target info
                        target_info = self.iv_cvae.get_opposite_target()
                        if target_info is None:
                            target_local_quaternion = batch_sample["local_quaternion"][:, self.cur_sequence_length - 1]
                            target_local_quaternion = target_local_quaternion.view(target_local_quaternion.size(0), -1)
                            target_root_velocity = batch_sample["root_velocity"][:, self.cur_sequence_length - 1 - 1]
                            target_contact = batch_sample["contact"][:, self.cur_sequence_length - 1]
                            target_root_position = batch_sample["global_position"][:, self.cur_sequence_length - 1, 0,
                                                   :]

                            target_info = [target_local_quaternion, target_root_velocity,
                                           target_contact, target_root_position]

                        # positive
                        pred_global_position, pred_contact, pred_local_quaternion, pred_root_position = self.cvae(
                            batch_sample,
                            t,
                            self.cur_sequence_length - 1 - t,
                            target_info,
                            self.cur_sequence_length,
                            z_tta,
                            self.p_std,
                            train=False)
                        pred_list.append(pred_global_position)
                        contact_list.append(pred_contact)
                        pred_quat_list.append(pred_local_quaternion)
                        pred_root_list.append(pred_root_position)

                        # reverse target info
                        iv_target_info = self.cvae.get_opposite_target()

                        # reverse
                        iv_pred_global_position, iv_pred_contact, iv_pred_local_quaternion, iv_pred_root_position = self.iv_cvae(
                            iv_batch_sample,
                            iv_index + t,
                            t + 1,
                            iv_target_info,
                            self.cur_sequence_length,
                            z_tta,
                            self.p_std,
                            positive=False,
                            train=False)

                        iv_pred_list.append(iv_pred_global_position)
                        iv_contact_list.append(iv_pred_contact)
                        iv_pred_quat_list.append(iv_pred_local_quaternion)
                        iv_pred_root_list.append(iv_pred_root_position)

                    root_loss, contact_loss, quaternion_loss, position_loss, KLD = self.cvae.get_loss()
                    iv_root_loss, iv_contact_loss, iv_quaternion_loss, iv_position_loss, iv_KLD = self.iv_cvae.get_loss()

                    # concat
                    pred = torch.cat(pred_list, 0)
                    pred = pred.view(len(pred_list), -1, 22, 3).permute(1, 0, 2, 3)  # BxTxJx3
                    iv_pred = torch.cat(iv_pred_list, 0)
                    iv_pred = iv_pred.view(len(iv_pred_list), -1, 22, 3).permute(1, 0, 2, 3)  # BxTxJx3 (正向)
                    pred_gp = bi_concat(pred, iv_pred, self.cur_sequence_length)  # BxTx22x3

                    # slerp
                    bvh_list, pred_quat, pred_root = blend_quat(pred_quat_list,
                                                                iv_pred_quat_list,
                                                                pred_root_list,
                                                                iv_pred_root_list,
                                                                 self.cur_sequence_length)

                    # test loss
                    overlap_loss = torch.mean(torch.abs(
                        pred[:, self.cur_sequence_length // 2 - (self.cur_sequence_length // 10) * 2:] -
                        torch.flip(iv_pred, dims=[1])[:, :(self.cur_sequence_length // 10) * 2 * 2]
                    ) / self.p_std)

                    total_loss = (
                            self.train_conf["quaternion_loss_weight"] * (quaternion_loss + iv_quaternion_loss) / 2 +
                            self.train_conf["contact_loss_weight"] * (contact_loss + iv_contact_loss) / 2 +
                            self.train_conf["root_loss_weight"] * (root_loss + iv_root_loss) / 2 +
                            self.train_conf["position_loss_weight"] * (position_loss + iv_position_loss) / 2 +
                            self.train_conf["overlap_loss_weight"] * overlap_loss)

                    batch_loss.append(total_loss.cpu().detach())
                    all_positions.append(pred_gp)

                    # cal metric
                    if self.data_conf["save_metric"]:
                        # ADE, FDE
                        ADE, FDE = cal_ADE_FDE_metric(pred_gp, batch_sample["global_position"].cuda())
                        # print(batch_idx, " ADE: ", ADE, "FDE: ", FDE)

                        # NPSS: global position
                        pred_gp_ = pred_gp.view(pred_gp.size(0), pred_gp.size(1), -1)
                        gt_gp = batch_sample["global_position"].view(batch_sample["global_position"].size(0),
                                                                     batch_sample["global_position"].size(1),
                                                                     -1)
                        NPSS = fast_npss(gt_gp, pred_gp_.cpu())
                        # print(batch_idx, " NPSS-gp: ", NPSS)

                        foot_index = np.asarray(self.data_conf["foot_index"])
                        concat_pred_ = pred_gp.view(pred_gp.size(0), pred_gp.size(1), -1)
                        foot_height = concat_pred_[:, :, foot_index[[1, 4, 7, 10]]]
                        ground_height_left_ankle = torch.min(foot_height[:, :, 0], dim=1). \
                            values.unsqueeze(-1).repeat(1, pred_gp.size(1))
                        ground_height_right_ankle = torch.min(foot_height[:, :, 2], dim=1). \
                            values.unsqueeze(-1).repeat(1, pred_gp.size(1))
                        ground_height_left_foot = torch.min(foot_height[:, :, 1], dim=1). \
                            values.unsqueeze(-1).repeat(1, pred_gp.size(1))
                        ground_height_right_foot = torch.min(foot_height[:, :, 3], dim=1). \
                            values.unsqueeze(-1).repeat(1, pred_gp.size(1))

                        update_height_left_ankle = foot_height[:, :, 0] - ground_height_left_ankle - 0.74
                        update_height_left_foot = foot_height[:, :, 1] - ground_height_left_foot - 0.74
                        update_height_right_ankle = foot_height[:, :, 2] - ground_height_right_ankle - 0.4
                        update_height_right_foot = foot_height[:, :, 3] - ground_height_right_foot - 0.4

                        new_foot_height = torch.cat([update_height_left_ankle.unsqueeze(-1),
                                                     update_height_left_foot.unsqueeze(-1),
                                                     update_height_right_ankle.unsqueeze(-1),
                                                     update_height_right_foot.unsqueeze(-1)], 2)
                        cal_contact_list = new_foot_height < 0

                        pred_pos = pred_gp.permute(0, 2, 3, 1)
                        pred_pos = pred_pos.view(pred_pos.size(0), -1, pred_pos.size(-1))
                        # pred_vel = (pred_pos[:, foot_index[[0, 1, 2, 6, 7, 8]], 1:] -
                        #             pred_pos[:, foot_index[[0, 1, 2, 6, 7, 8]], :-1])
                        pred_vel = (pred_pos[:, foot_index, 1:] -
                                    pred_pos[:, foot_index, :-1])
                        pred_vel = pred_vel.view(pred_vel.size(0), 4, 3, pred_vel.size(-1))
                        slide_loss = torch.mean(torch.abs(pred_vel *
                                                          cal_contact_list[:, :-1].permute(0, 2, 1).unsqueeze(
                                                              2).cuda()))

                        dis = torch.abs(pred_vel * cal_contact_list[:, :-1].permute(0, 2, 1).unsqueeze(2))
                        dis = np.asarray(dis.permute(0, 3, 1, 2).cpu())  # B*49*4*3
                        foot_slide = np.linalg.norm(dis, ord=2, axis=3).sum(-1).sum(
                            -1).mean() / self.cur_sequence_length
                        # print(batch_idx, " foot-slide : ", foot_slide)

                        metrics.append([ADE, 
                                        FDE, 
                                        NPSS, 
                                        slide_loss.cpu(), 
                                        foot_slide])

                    # save img
                    bs = 0
                    if self.data_conf["save_gif"]:
                        if os.path.exists(str(Path(self.save_path)/'img'))==False:
                            os.makedirs(str(Path(self.save_path)/'img'), mode=0o777)
                        if os.path.exists(str(Path(self.save_path)/'gif'))==False:
                            os.makedirs(str(Path(self.save_path)/'gif'), mode=0o777)
                        img_list = []
                        global_position = batch_sample["global_position"]
                        global_position_orig = batch_sample_orig["global_position"]
                        for t in range(len(pred_gp[1])):
                            pred_global_position = pred_gp[:, t]
                            plot_pose(np.concatenate([global_position[bs, 0].view(22, 3).detach().cpu().numpy(),
                                                      pred_global_position[bs].view(22, 3).detach().cpu().numpy(),
                                                      global_position[bs, -1].view(22, 3).detach().cpu().numpy()], 0),
                                      t, str(self.save_path) + '/img/pred')
                            k = t if t < self.cur_sequence_length else (self.cur_sequence_length-1)
                            plot_pose(np.concatenate([global_position_orig[bs, 0].view(22, 3).detach().cpu().numpy(),
                                                      global_position_orig[bs, k].view(22, 3).detach().cpu().numpy(),
                                                      global_position_orig[bs, -1].view(22, 3).detach().cpu().numpy()], 0),
                                      t, str(self.save_path) + '/img/gt', "#FF8C00")
                            pred_img = Image.open(str(self.save_path) + '/img/pred_' + str(t) + '.png', 'r')
                            gt_img = Image.open(str(self.save_path) + '/img/gt_' + str(t) + '.png', 'r')
                            img_list.append(np.concatenate([pred_img, gt_img.resize(pred_img.size)], 1))
                        

                        # save gif
                        import imageio
                        imageio.mimsave((str(self.save_path) + '/gif/img_%03d.gif' % batch_idx), img_list, duration=0.1)

                    if self.data_conf["save_bvh"]:
                        if os.path.exists(str(Path(self.save_path)/'bvh'))==False:
                            os.makedirs(str(Path(self.save_path)/'bvh'))
                        bvh_data = torch.cat([item[bs].unsqueeze(0) for item in bvh_list], 0).detach().cpu().numpy()
                        write_to_bvhfile(bvh_data, (str(self.save_path) + '/bvh/slerp_' + self.model_conf["model_name"] +
                                                    '_bs%02d_%03d.bvh' % (bs, batch_idx)))
                        
            all_positions = torch.cat(all_positions, 0)
            repeated_positions.append(all_positions)

        if len(repeated_positions) > 1:
            APD = cal_APD(repeated_positions)
            print("Mean APD: ", APD)

        id = self.data_conf["single_idx"][0]
            
        # print hmr logging
        if self.data_conf["test_nums"] != "single" and self.data_conf["save_metric"]:
            print(" Mean ADE = {:.4f}".format(np.mean(np.asarray(metrics)[:, 0])) +
                  " Mean FDE = {:.4f}".format(np.mean(np.asarray(metrics)[:, 1])) +
                  " Mean NPSS = {:.4f}".format(np.mean(np.asarray(metrics)[:, 2])) +
                  " Slide Loss = {:.4f}".format(np.mean(np.asarray(metrics)[:, 3])) +
                  " Foot Slide = {:.4f}".format(np.mean(np.asarray(metrics)[:, 4])))

            logging.info("Mean ADE = {:.4f}".format(np.mean(np.asarray(metrics)[:, 0])) +
                         " Mean FDE = {:.4f}".format(np.mean(np.asarray(metrics)[:, 1])) +
                         " Mean NPSS = {:.4f}".format(np.mean(np.asarray(metrics)[:, 2])) +
                         " Foot Slide = {:.4f}".format(np.mean(np.asarray(metrics)[:, 4])))
            
        hmr_test_loss = np.mean(hmr_batch_loss)
        test_loss = np.mean(batch_loss)
        print("HMR Test Loss = {:.9f}".format(hmr_test_loss) +
            "Test Loss = {:.9f} ".format(test_loss))
        logging.info("HMR Test Loss = {:.9f}".format(hmr_test_loss) +
            "Test Loss = {:.9f} ".format(test_loss))
        return test_loss
        
        return 