import pytorch_lightning as pl
from argparse import ArgumentParser, Namespace
import os
from torch.utils.data import DataLoader
from dataset.bimanual_dataset import get_datasets
from methods.vae.model import InteractionVAE
import torch
from datetime import datetime
from pytorch_lightning.profilers import SimpleProfiler, AdvancedProfiler
from pytorch_lightning import loggers as pl_loggers
from pathlib import Path
import shutil
from viz.viz_helper import visualize_skeleton
from pytorch3d.transforms import quaternion_to_matrix
class LitInteraction(pl.LightningModule):
    def __init__(self, args):
        super().__init__()
        if isinstance(args, dict):
            args = Namespace(**args)
        self.args = args
        self.save_hyperparameters(args)
        self.start_time = datetime.now().strftime("%m:%d:%Y_%H:%M:%S")
        
        # liuyun:
        self.use_action_embedding = args.action_embedding
        
        # train statistics
        self.train_info = {"batch_cnt": 0, "sum_loss": 0}
        # val statistics
        self.val_info = {"batch_cnt": 0, "sum_val_loss": 0}

        self.model = InteractionVAE(args).to(device=self.device, dtype=torch.float)
    
    def forward(self, *args):
        return self.model(*args)

    def on_train_start(self) -> None:
        #     backup trainer.py and model
        shutil.copy('./methods/vae/train_baseline.py', str(save_dir / 'train.py'))
        shutil.copy('./methods/vae/model.py', str(save_dir / 'model.py'))
        shutil.copy('./dataset/dataset.py', str(save_dir / 'dataset.py'))
        return
    
    def configure_optimizers(self):
        optimizer = torch.optim.Adam(params=list(self.model.parameters()),
                                     lr=self.args.lr)

        return ({'optimizer': optimizer})

    def calc_loss(self, body_pred, body_gt, pose_pred, pose_gt, obj_pred, obj_gt, q_z):
        # (T, B, N_joints, 3) / (T, B, 2, N_joints, 3)
        # (T, B, N_points, 3) / (T, B, 2, N_joints, 3)
        # pose: (T, B, 7) / (T, B, 2, 7)
        B = body_gt.shape[1]

        p_z = torch.distributions.normal.Normal(
            loc=torch.zeros((B, self.args.latent_dim), requires_grad=False, device=self.device),
            scale=torch.ones((B, self.args.latent_dim), requires_grad=False, device=self.device))

        loss_kl = torch.mean(torch.mean(torch.distributions.kl.kl_divergence(q_z, p_z), dim=[1]))
        if self.args.robust_kl:
            loss_kl = torch.sqrt(loss_kl * loss_kl + 1) - 1.0
        
        loss_body_past = torch.nn.MSELoss(reduction='mean')(body_pred[:self.args.past_len], body_gt[:self.args.past_len])
        loss_body_future  = torch.nn.MSELoss(reduction='mean')(body_pred[self.args.past_len:], body_gt[self.args.past_len:])

        loss_obj_past = torch.nn.MSELoss(reduction='mean')(obj_pred[:self.args.past_len], obj_gt[:self.args.past_len])
        loss_obj_future = torch.nn.MSELoss(reduction='mean')(obj_pred[self.args.past_len:], obj_gt[self.args.past_len:])

        loss_obj_nonrot_past = torch.nn.MSELoss(reduction='mean')(pose_pred[:self.args.past_len,:,:3], pose_gt[:self.args.past_len,:,:3]) 
        loss_obj_nonrot_future = torch.nn.MSELoss(reduction='mean')(pose_pred[self.args.past_len:,:,:3], pose_gt[self.args.past_len:,:,:3])
        loss_obj_rot_past = torch.nn.MSELoss(reduction='mean')(pose_pred[:self.args.past_len,:,-4:], pose_gt[:self.args.past_len,:,-4:])
        loss_obj_rot_future = torch.nn.MSELoss(reduction='mean')(pose_pred[self.args.past_len:,:,-4:], pose_gt[self.args.past_len:,:,-4:])

        # quaternion normalization in QuaterNet
        quaternion_reg_loss = (pose_pred[:,:,-4:].norm(p=2, dim=-1).square()-1).square().mean()

        loss_dict = dict(
                         kl=loss_kl,
                         body_past = loss_body_past,
                         body_future = loss_body_future,
                         obj_past = loss_obj_past,
                         obj_future = loss_obj_future,
                         loss_obj_nonrot_past = loss_obj_nonrot_past,
                         loss_obj_nonrot_future = loss_obj_nonrot_future,
                         loss_obj_rot_past = loss_obj_rot_past,
                         loss_obj_rot_future = loss_obj_rot_future,
                         quaternion_reg_loss = quaternion_reg_loss
                         )

        weighted_loss_dict = dict(
                         kl=loss_kl * self.args.weight_kl,
                         body_past = loss_body_past * self.args.weight_body * self.args.weight_past,
                         body_future = loss_body_future * self.args.weight_body,
                         obj_past = loss_obj_past * self.args.weight_obj* self.args.weight_past,
                         obj_future = loss_obj_future * self.args.weight_obj,
                         loss_obj_nonrot_past = loss_obj_nonrot_past * self.args.weight_obj_nonrot* self.args.weight_past,
                         loss_obj_nonrot_future = loss_obj_nonrot_future* self.args.weight_obj_nonrot,
                         loss_obj_rot_past = loss_obj_rot_past* self.args.weight_obj_rot* self.args.weight_past,
                         loss_obj_rot_future = loss_obj_rot_future* self.args.weight_obj_rot,
                         quaternion_reg_loss = self.args.weight_quat_reg* quaternion_reg_loss
                         )

        loss = torch.stack(list(weighted_loss_dict.values())).sum()

        return loss, loss_dict, weighted_loss_dict
    
    def calc_metric(self, body_pred, body_gt, obj_pred, obj_gt, pose_pred, pose_gt):
        # body_pred: (T, B, N_joints, 3) / (T, B, 2, N_joints, 3)
        # obj_pred: (T, B, N_points, 3) / (T, B, 2, N_joints, 3)
        # pose_pred: (T, B, 7) / (T, B, 2, 7)
        mpjpe_h = (body_pred[10:] - body_gt[10:]).norm(dim=-1,p=2).mean().item()
        mpjpe_o = (obj_pred[10:] - obj_gt[10:]).norm(dim=-1,p=2).mean().item()
        translation_error = (pose_pred[10:, ..., :3] - pose_gt[10:, ..., :3]).norm(dim=-1,p=2).mean().item()

        # q=-q
        rotation_error_v1 = (pose_pred[10:, ..., -4:] - pose_gt[10:, ..., -4:]).norm(dim=-1,p=1)
        rotation_error_v2 = (pose_pred[10:, ..., -4:] + pose_gt[10:, ..., -4:]).norm(dim=-1,p=1)
        rotation_min = torch.stack([rotation_error_v1, rotation_error_v2], dim=0).min(dim=0)[0]
        rotation_error = rotation_min.mean().item()
        
        if self.args.single_side:
            detailed_metrics = {
                "mpjpe_h": mpjpe_h,
                "mpjpe_o": mpjpe_o,
                "translation_error": translation_error,
                "rotation_error": rotation_error,
            }
        else:
            detailed_metrics = {}
            detailed_metrics["mpjpe_h"] = [(body_pred[10:] - body_gt[10:]).norm(dim=-1,p=2)[:, :, 0, :].mean().item(), (body_pred[10:] - body_gt[10:]).norm(dim=-1,p=2)[:, :, 1, :].mean().item()]
            detailed_metrics["mpjpe_o"] = [(obj_pred[10:] - obj_gt[10:]).norm(dim=-1,p=2)[:, :, 0, :].mean().item(), (obj_pred[10:] - obj_gt[10:]).norm(dim=-1,p=2)[:, :, 1, :].mean().item()]
            detailed_metrics["translation_error"] = [(pose_pred[10:, ..., :3] - pose_gt[10:, ..., :3]).norm(dim=-1,p=2)[..., 0].mean().item(), (pose_pred[10:, ..., :3] - pose_gt[10:, ..., :3]).norm(dim=-1,p=2)[..., 1].mean().item()]
            detailed_metrics["rotation_error"] = []
            rotation_error_v1 = (pose_pred[10:, :, 0, -4:] - pose_gt[10:, :, 0, -4:]).norm(dim=-1,p=1)
            rotation_error_v2 = (pose_pred[10:, :, 0, -4:] + pose_gt[10:, :, 0, -4:]).norm(dim=-1,p=1)
            rotation_min = torch.stack([rotation_error_v1, rotation_error_v2], dim=0).min(dim=0)[0]
            detailed_metrics["rotation_error"].append(rotation_min.mean().item())
            rotation_error_v1 = (pose_pred[10:, :, 1, -4:] - pose_gt[10:, :, 1, -4:]).norm(dim=-1,p=1)
            rotation_error_v2 = (pose_pred[10:, :, 1, -4:] + pose_gt[10:, :, 1, -4:]).norm(dim=-1,p=1)
            rotation_min = torch.stack([rotation_error_v1, rotation_error_v2], dim=0).min(dim=0)[0]
            detailed_metrics["rotation_error"].append(rotation_min.mean().item())

        return mpjpe_h, mpjpe_o, translation_error, rotation_error, detailed_metrics

    def calc_obj_pred(self, pose_pred, zero_pose_obj):
        # input: pose pred (T, B, 7) / (T, B, 2, 7)
        # zero_pose_obj: (B, N_points, 3) / (B, 2, N_points, 3)
        # return obj_pred: (T, B, N_points, 3) / (T, B, 2, N_points, 3)
        # quaternion to matrix
        obj_gt_base = zero_pose_obj[None, ...]  # (1, B, N_points, 3) / (1, B, 2, N_points, 3)
        translation = pose_pred[..., None, :3]  # (T, B, 1, 3) / (T, B, 2, 1, 3)
        quat_correct = torch.cat([pose_pred[..., -1:], pose_pred[..., -4:-1]],dim=-1)  # xyzw->wxyz, (T, B, 4) / (T, B, 2, 4)
        rotation_matrix = quaternion_to_matrix(quat_correct)  # (T, B, 3, 3) / (T, B, 2, 3, 3)
        if self.args.single_side:
            obj_pred = rotation_matrix.matmul(obj_gt_base.permute(0, 1, 3, 2)).permute(0, 1, 3, 2).contiguous() + translation  # (T, B, N_points, 3) / (T, B, 2, N_points, 3)
        else:
            obj_pred = rotation_matrix.matmul(obj_gt_base.permute(0, 1, 2, 4, 3)).permute(0, 1, 2, 4, 3).contiguous() + translation  # (T, B, N_points, 3) / (T, B, 2, N_points, 3)

        return obj_pred

    def _common_step(self, batch, batch_idx, mode):
        body_gt = batch[0].transpose(0,1).float()
        obj_gt = batch[1].transpose(0,1).float()
        pose_gt = batch[2].transpose(0,1).float()
        zero_pose_obj = batch[3].float()  # (B, N_points, 3) / (B, 2, N_points, 3)
        # liuyun:
        action_gt = batch[6].float()  # (B, N_ACTION)

        T, B = body_gt.shape[:2]

        if mode == 'train':
            body_pred, body_gt, pose_pred, q_z = self(body_gt, obj_gt, pose_gt, action_gt)
        else:
            body_pred, body_gt, pose_pred, q_z = self.model.sample(body_gt, obj_gt, pose_gt, action_gt)

        if self.args.single_side:
            pose_pred = pose_pred.view(T,B,7)
            body_pred = body_pred.view(T,B,21,3)
        else:
            pose_pred = pose_pred.view(T,B,2,7)
            body_pred = body_pred.view(T,B,2,21,3)

        # compute obj pred
        obj_pred = self.calc_obj_pred(pose_pred, zero_pose_obj)

        loss, loss_dict, weighted_loss_dict = self.calc_loss(body_pred, body_gt, pose_pred, pose_gt, obj_pred, obj_gt, q_z)

        mpjpe_h, mpjpe_o, translation_error, rotation_error, _ = self.calc_metric(body_pred, body_gt, obj_pred, obj_gt, pose_pred, pose_gt)
        metric_dict = dict(
            mpjpe_h = mpjpe_h,
            mpjpe_o = mpjpe_o,
            translation_error = translation_error,
            rotation_error = rotation_error
        )
        # TODO: support self.args.single_view==False
        render_interval = 50 if mode == 'valid' else 200
        if self.args.render and (batch_idx % render_interval == 0) and ((self.current_epoch >= self.args.render_epoch) or self.args.debug):
            with torch.no_grad():
                skeledonData = body_pred[:,0].cpu().numpy().reshape(T,21, 3)            
                objData = obj_pred[:,0].cpu().numpy().reshape(T,self.args.num_points, 3)
                skeledonData_gt = body_gt[:,0].cpu().numpy().reshape(T,21, 3)            
                objData_gt = obj_gt[:,0].cpu().numpy().reshape(T,self.args.num_points, 3)

                export_file = Path.joinpath(save_dir, 'render')
                export_file.mkdir(exist_ok=True, parents=True)
                rend_video_path = os.path.join(export_file, '{}_{}_{}_p.gif'.format(mode, self.current_epoch, batch_idx))

                visualize_skeleton(skeledonData, objData, save_dir = rend_video_path)

                rend_video_path = os.path.join(export_file, '{}_{}_{}_gt.gif'.format(mode, self.current_epoch, batch_idx))
                visualize_skeleton(skeledonData_gt, objData_gt, save_dir = rend_video_path)

        if mode == "valid":
            print("[metric_dict]", metric_dict)

        return loss, loss_dict, weighted_loss_dict, metric_dict

    def training_step(self, batch, batch_idx):
        if self.val_info["batch_cnt"] > 0:
            self.train_info["batch_cnt"] = 0
            self.train_info["sum_loss"] = 0
        loss, loss_dict, weighted_loss_dict, metric_dict = self._common_step(batch, batch_idx, 'train')

        self.log('train_loss', loss, prog_bar=False)
        # for key in loss_dict:
        #     self.log(key, loss_dict[key], prog_bar=True) 

        # for key in metric_dict:
        #     self.log(key, metric_dict[key], prog_bar=True)
        
        self.train_info["batch_cnt"] += 1
        self.train_info["sum_loss"] += loss.item()
        self.val_info["batch_cnt"] = 0
        self.val_info["sum_val_loss"] = 0

        return loss

    def validation_step(self, batch, batch_idx):
        loss, loss_dict, weighted_loss_dict, metric_dict = self._common_step(batch, batch_idx, 'valid')

        # for key in loss_dict:
        #     self.log('val_' + key, loss_dict[key], prog_bar=False)
        self.log('val_loss', loss)
        
        self.val_info["batch_cnt"] += 1
        self.val_info["sum_val_loss"] += loss.item()
        print("[validation_step], batch_cnt, mean_train_loss, cum_mean_val_loss, val_loss, metric_dict =", self.val_info["batch_cnt"], self.train_info["sum_loss"] / max(self.train_info["batch_cnt"], 1), self.val_info["sum_val_loss"] / self.val_info["batch_cnt"], loss, metric_dict)

        # for key in metric_dict:
        #     self.log('val_' + key, metric_dict[key], prog_bar=True)


    def test_step(self, batch, batch_idx):
        loss, loss_dict, weighted_loss_dict, metric_dict = self._common_step(batch, batch_idx, 'valid')
        # for key in metric_dict:
        #     self.log('test_' + key, metric_dict[key], prog_bar=True)
        

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if __name__ == '__main__':
    if torch.cuda.is_available():
        print(torch.cuda.get_device_name(0))

    parser = ArgumentParser()
    parser.add_argument("--embedding_dim", type=int, default=256)
    parser.add_argument("--num_joints", type=int, default=21)
    parser.add_argument("--num_points", type=int, default=50)
    parser.add_argument("--past_len", type=int, default=10)
    parser.add_argument("--future_len", type=int, default=10)

    parser.add_argument("--latent_dim", type=int, default=512)
    parser.add_argument("--num_heads", type=int, default=8)
    parser.add_argument("--ff_size", type=int, default=512)
    parser.add_argument("--activation", type=str, default='relu')
    parser.add_argument("--dropout", type=float, default=0)
    parser.add_argument("--num_layers", type=int, default=4)
    parser.add_argument("--render_epoch", type=int, default=0)
    parser.add_argument("--resume_checkpoint", type=str, default=None)
    parser.add_argument("--debug", type=int, default=0)
    parser.add_argument("--expr_name", type=str, default=datetime.now().strftime("%b-%j-%H:%M"))
    parser.add_argument("--max_epochs", type=int, default=30)

    parser.add_argument("--lr", type=float, default=1e-4)
    parser.add_argument("--weight_past", type=float, default=0.5)
    parser.add_argument("--weight_body", type=float, default=3)
    parser.add_argument("--weight_obj", type=float, default=1)
    parser.add_argument("--weight_kl", type=float, default=0.05)
    parser.add_argument("--robust_kl", type=int, default=0)
    # parser.add_argument("--weight_obj_rot", type=float, default=1)
    parser.add_argument("--weight_obj_rot", type=float, default=0)
    parser.add_argument("--weight_obj_nonrot", type=float, default=1)
    parser.add_argument("--weight_quat_reg", type=float, default=0.01)
    parser.add_argument("--render", default=False, action='store_true')
    
    # liuyun:
    parser.add_argument("--obj_pose_embedding", default=False, action='store_true')
    parser.add_argument("--action_embedding", default=False, action='store_true')
    parser.add_argument("--single_side", default=False, action='store_true')  # 只训[右手+工具]或[左手+对象]

    args = parser.parse_args()
    
    pl.seed_everything(233)
    torch.autograd.set_detect_anomaly(True)

    train_set, val_set, test_set, unseen_test_set = get_datasets(N_point=args.num_points, cvt_to_relative_pose=True)  # NOTE: change here
    
    train_loader = DataLoader(train_set, batch_size=32, num_workers=1, shuffle=True,
                            drop_last=True, pin_memory=False)
    val_loader = DataLoader(val_set, batch_size=32, num_workers=1, shuffle=True,
                    drop_last=True, pin_memory=False)
    test_loader_unseen = DataLoader(unseen_test_set, batch_size=32, num_workers=1, shuffle=False,
                    drop_last=False, pin_memory=False)
    test_loader_seen = DataLoader(test_set, batch_size=32, num_workers=1, shuffle=False,
                    drop_last=False, pin_memory=False)

    model = LitInteraction(args)
    # model = LitInteraction.load_from_checkpoint("/home/liuyun/HOI-mocap/motion_forecasting/InterDiff-on-Skeleton-main/results/interaction_vae/Oct-292-10:18/version_0/checkpoints/last.ckpt", strict=False, args=args).to(device)
    results_folder = "./results"

    tb_logger = pl_loggers.TensorBoardLogger(str(results_folder + '/interaction_vae'), name=args.expr_name)
    save_dir = Path(tb_logger.log_dir)  # for this version
    print(save_dir)

    checkpoint_callback = pl.callbacks.ModelCheckpoint(dirpath=str(save_dir / 'checkpoints'),
                                                       monitor="val_loss",
                                                       save_weights_only=True, save_last=True)
    print(checkpoint_callback.dirpath)
    # early_stop_callback = pl.callbacks.EarlyStopping(monitor="val_loss", min_delta=1e-4, patience=3, verbose=False,
    #                                                  mode="min")
    # profiler = SimpleProfiler()
    trainer = pl.Trainer.from_argparse_args(args,
                                            logger=tb_logger,
                                            # profiler=profiler,
                                            callbacks=[checkpoint_callback],
                                            check_val_every_n_epoch=1, accelerator="gpu", devices=1
                                            )
    trainer.fit(model, train_loader, val_loader)
    # trainer.test(ckpt_path="best", dataloaders=test_loader_seen)
    # trainer.test(ckpt_path="best", dataloaders=test_loader_unseen)


