import pytorch_lightning as pl
from argparse import ArgumentParser, Namespace
import os
from torch.utils.data import DataLoader
from dataset.bimanual_dataset import get_datasets
from methods.rnn.model_rnn_baseline import ObjHumanLSTM
import torch
from datetime import datetime
from pytorch_lightning.profilers import SimpleProfiler, AdvancedProfiler
from pytorch_lightning import loggers as pl_loggers
from pathlib import Path
import shutil
from viz.viz_helper import visualize_skeleton
from pytorch3d.transforms import quaternion_to_matrix
class LitInteraction(pl.LightningModule):
    def __init__(self, args):
        super().__init__()
        if isinstance(args, dict):
            args = Namespace(**args)
        self.args = args
        self.save_hyperparameters(args)
        self.start_time = datetime.now().strftime("%m:%d:%Y_%H:%M:%S")

        self.model = ObjHumanLSTM(args).to(device=self.device, dtype=torch.float)
        
        # train statistics
        self.train_info = {"batch_cnt": 0, "sum_loss": 0}
        # val statistics
        self.val_info = {"batch_cnt": 0, "sum_val_loss": 0}
    
    def forward(self, *args):
        return self.model(*args)

    def on_train_start(self) -> None:
        #     backup trainer.py and model
        shutil.copy('./methods/rnn/train_rnn_baseline.py', str(save_dir / 'train.py'))
        shutil.copy('./methods/rnn/model_rnn_baseline.py', str(save_dir / 'model.py'))
        shutil.copy('./dataset/dataset.py', str(save_dir / 'dataset.py'))
        return
    
    def configure_optimizers(self):
        optimizer = torch.optim.Adam(params=list(self.model.parameters()),
                                     lr=self.args.lr)

        return ({'optimizer': optimizer})

    def calc_loss(self, body_pred, body_gt, pose_pred, pose_gt, obj_pred, obj_gt):
        # obj_gt: T_past + T_future -1,B,n_points,3 #
        # obj_pred: T_past + T_future-1,B,n_points,3 #
        # body_pred: T_past + T_future-1,B,n_joints,3
        # body_gt: T_past + T_future-1,B,n_joints,3
        # pose_gt: T_past + T_future -1,B,7 # last 4 rot
        # pose_pred: T_past + T_future-1,B,7 # last 4 rot

        T, B = body_gt.shape[:2]
        
        loss_body_past = torch.nn.MSELoss(reduction='mean')(body_pred[:self.args.past_len-1], body_gt[:self.args.past_len-1])
        loss_body_future  = torch.nn.MSELoss(reduction='mean')(body_pred[self.args.past_len-1:], body_gt[self.args.past_len-1:])

        loss_obj_past = torch.nn.MSELoss(reduction='mean')(obj_pred[:self.args.past_len-1], obj_gt[:self.args.past_len-1])
        loss_obj_future = torch.nn.MSELoss(reduction='mean')(obj_pred[self.args.past_len-1:], obj_gt[self.args.past_len-1:])

        loss_obj_nonrot_past = torch.nn.MSELoss(reduction='mean')(pose_pred[:self.args.past_len-1, ..., :3], pose_gt[:self.args.past_len-1, ..., :3]) 
        loss_obj_nonrot_future = torch.nn.MSELoss(reduction='mean')(pose_pred[self.args.past_len-1:, ..., :3], pose_gt[self.args.past_len-1:, ..., :3])
        loss_obj_rot_past = torch.nn.MSELoss(reduction='mean')(pose_pred[:self.args.past_len-1, ..., -4:], pose_gt[:self.args.past_len-1, ..., -4:]) 
        loss_obj_rot_future = torch.nn.MSELoss(reduction='mean')(pose_pred[self.args.past_len-1:, ..., -4:], pose_gt[self.args.past_len-1:, ..., -4:])

        # quaternion normalization in QuaterNet
        quaternion_reg_loss = (pose_pred[..., -4:].norm(p=2, dim=-1).square()-1).square().mean()

        loss_dict = dict(
                         body_past = loss_body_past,
                         body_future = loss_body_future,
                         obj_past = loss_obj_past,
                         obj_future = loss_obj_future,
                         loss_obj_nonrot_past = loss_obj_nonrot_past,
                         loss_obj_nonrot_future = loss_obj_nonrot_future,
                         loss_obj_rot_past = loss_obj_rot_past,
                         loss_obj_rot_future = loss_obj_rot_future,
                         quaternion_reg_loss = quaternion_reg_loss
                         )

        weighted_loss_dict = dict(
                         body_past = loss_body_past * self.args.weight_body * self.args.weight_past,
                         body_future = loss_body_future * self.args.weight_body,
                         obj_past = loss_obj_past * self.args.weight_obj* self.args.weight_past,
                         obj_future = loss_obj_future * self.args.weight_obj,
                         loss_obj_nonrot_past = loss_obj_nonrot_past * self.args.weight_obj_nonrot* self.args.weight_past,
                         loss_obj_nonrot_future = loss_obj_nonrot_future* self.args.weight_obj_nonrot,
                         loss_obj_rot_past = loss_obj_rot_past* self.args.weight_obj_rot* self.args.weight_past,
                         loss_obj_rot_future = loss_obj_rot_future* self.args.weight_obj_rot,
                         quaternion_reg_loss = self.args.weight_quat_reg* quaternion_reg_loss
                         )

        loss = torch.stack(list(weighted_loss_dict.values())).sum()

        return loss, loss_dict, weighted_loss_dict

    def calc_metric(self, body_pred, body_gt, obj_pred, obj_gt, pose_pred, pose_gt):
        # body_pred: T,B,2,N_joints,3
        # obj_pred: T,B,2,N_points,3
        # pose_pred: T,B,2,7
        assert body_pred.size()[-1]==3
        assert obj_pred.size()[-1]==3
        mpjpe_h = (body_pred[10:] - body_gt[10:]).norm(dim=-1,p=2).mean().item()
        mpjpe_o = (obj_pred[10:] - obj_gt[10:]).norm(dim=-1,p=2).mean().item()
        translation_error = (pose_pred[10:, ..., :3] - pose_gt[10:, ..., :3]).norm(dim=-1, p=2).mean().item()

        # we have to modify
        rotation_error_v1 = (pose_pred[10:, ..., -4:] - pose_gt[10:, ..., -4:]).norm(dim=-1, p=1)
        rotation_error_v2 = (pose_pred[10:, ..., -4:] + pose_gt[10:, ..., -4:]).norm(dim=-1, p=1)
        rotation_min = torch.stack([rotation_error_v1, rotation_error_v2], dim=0).min(dim=0)[0]
        rotation_error = rotation_min.mean().item()

        return mpjpe_h, mpjpe_o, translation_error, rotation_error

    def calc_obj_pred(self, pose_pred, zero_pose_obj):
        # input: pose pred (T, B, 2, 7)
        # zero_pose_obj: (B, 2, N_points, 3)
        # return obj_pred: (T, B, 2, N_points, 3)
        # quaternion to matrix
        obj_gt_base = zero_pose_obj[None, ...]  # (1, B, 2, N_points, 3)
        translation = pose_pred[..., None, :3]  # (T, B, 2, 1, 3)
        quat_correct = torch.cat([pose_pred[..., -1:], pose_pred[..., -4:-1]],dim=-1)  # xyzw->wxyz, (T, B, 2, 4)
        rotation_matrix = quaternion_to_matrix(quat_correct)  # (T, B, 2, 3, 3)
        obj_pred = rotation_matrix.matmul(obj_gt_base.permute(0, 1, 2, 4, 3)).permute(0, 1, 2, 4, 3).contiguous() + translation  # (T, B, N_points, 3) / (T, B, 2, N_points, 3)

        return obj_pred

    def _common_step(self, batch, batch_idx, mode):
        body_gt = batch[0].transpose(0,1).float()  # (T, B, 2, 21, 3)
        obj_gt = batch[1].transpose(0,1).float()  # (T, B, 2, N_point, 3)
        pose_gt = batch[2].transpose(0,1).float()  # (T, B, 2, 7)
        zero_pose_obj = batch[3].float()  # (B, 2, N_points, 3)
        # liuyun:
        action_gt = batch[6].float()  # (B, N_ACTION)

        T, B = body_gt.shape[:2]
        obj_human_gt =  torch.cat([body_gt.view(T,B,2,21*3), pose_gt], dim=-1)  # (T, B, 2, 21 * 3 + 7)

        if mode == 'train':
            obj_human_pred = self.model(obj_human_gt, zero_pose_obj.view(B, 2, -1))
            T = obj_human_pred.shape[0]
            assert obj_human_pred.shape[-1] == (21 * 3 + 7)
            body_pred, pose_pred = torch.split(obj_human_pred, [21*3, 7], dim=-1)

            pose_pred = pose_pred.view(T,B,2,7)
            body_pred = body_pred.view(T,B,2,21,3)
        else:
            obj_human_pred = self.model.forward_autoregressive(obj_human_gt[:10], zero_pose_obj.view(B,2,-1), self.args.future_len)
            body_past = body_gt[:self.args.past_len].detach().clone()
            pose_past = pose_gt[:self.args.past_len].detach().clone()
            
            T_pred = obj_human_pred.shape[0]
            assert obj_human_pred.shape[-1] == (21 * 3 + 7)
            body_pred, pose_pred = torch.split(obj_human_pred, [21*3, 7], dim=-1)
            pose_pred = pose_pred.view(T_pred, B, 2, 7)
            body_pred = body_pred.view(T_pred, B, 2, 21, 3)
            body_pred = torch.cat([body_past, body_pred], dim=0)
            pose_pred = torch.cat([pose_past, pose_pred], dim=0)

        T = obj_human_gt.shape[0]

        # compute obj pred
        obj_pred = self.calc_obj_pred(pose_pred, zero_pose_obj)

        if mode=='train':
            loss, loss_dict, weighted_loss_dict = self.calc_loss(body_pred[:-1], body_gt[1:], pose_pred[:-1], pose_gt[1:], obj_pred[:-1], obj_gt[1:])
        else:
            loss, loss_dict, weighted_loss_dict = self.calc_loss(body_pred[:-1], body_gt[1:], pose_pred[:-1], pose_gt[1:], obj_pred[:-1], obj_gt[1:])

        mpjpe_h, mpjpe_o, translation_error, rotation_error = self.calc_metric(body_pred, body_gt, obj_pred, obj_gt, pose_pred, pose_gt)
        metric_dict = dict(
            mpjpe_h = mpjpe_h,
            mpjpe_o = mpjpe_o,
            translation_error = translation_error,
            rotation_error = rotation_error
        )
        render_interval = 50 if mode == 'valid' else 200
        if self.args.render and (batch_idx % render_interval == 0) and ((self.current_epoch >= self.args.render_epoch) or self.args.debug):
            with torch.no_grad():
                skeledonData = body_pred[:,0].cpu().numpy().reshape(T,21, 3)            
                objData = obj_pred[:,0].cpu().numpy().reshape(T,12, 3)
                skeledonData_gt = body_gt[:,0].cpu().numpy().reshape(T,21, 3)            
                objData_gt = obj_gt[:,0].cpu().numpy().reshape(T,12, 3)

                export_file = Path.joinpath(save_dir, 'render')
                export_file.mkdir(exist_ok=True, parents=True)
                rend_video_path = os.path.join(export_file, '{}_{}_{}_p.gif'.format(mode, self.current_epoch, batch_idx))

                visualize_skeleton(skeledonData, objData, save_dir = rend_video_path)

                rend_video_path = os.path.join(export_file, '{}_{}_{}_gt.gif'.format(mode, self.current_epoch, batch_idx))
                visualize_skeleton(skeledonData_gt, objData_gt, save_dir = rend_video_path)

        return loss, loss_dict, weighted_loss_dict, metric_dict

    def training_step(self, batch, batch_idx):
        if self.val_info["batch_cnt"] > 0:
            self.train_info["batch_cnt"] = 0
            self.train_info["sum_loss"] = 0
        loss, loss_dict, weighted_loss_dict, metric_dict = self._common_step(batch, batch_idx, 'train')

        self.log('train_loss', loss, prog_bar=False)
        # for key in loss_dict:
        #     self.log(key, loss_dict[key], prog_bar=False) 

        # for key in metric_dict:
        #     self.log(key, metric_dict[key], prog_bar=True)
        
        self.train_info["batch_cnt"] += 1
        self.train_info["sum_loss"] += loss.item()
        self.val_info["batch_cnt"] = 0
        self.val_info["sum_val_loss"] = 0

        return loss

    def validation_step(self, batch, batch_idx):
        loss, loss_dict, weighted_loss_dict, metric_dict = self._common_step(batch, batch_idx, 'valid')

        # for key in loss_dict:
        #     self.log('val_' + key, loss_dict[key], prog_bar=False)
        self.log('val_loss', loss)
        
        self.val_info["batch_cnt"] += 1
        self.val_info["sum_val_loss"] += loss.item()
        print("[validation_step], batch_cnt, mean_train_loss, cum_mean_val_loss, val_loss, metric_dict =", self.val_info["batch_cnt"], self.train_info["sum_loss"] / max(self.train_info["batch_cnt"], 1), self.val_info["sum_val_loss"] / self.val_info["batch_cnt"], loss, metric_dict)

        # for key in metric_dict:
        #     self.log('val_' + key, metric_dict[key], prog_bar=True)


    def test_step(self, batch, batch_idx):
        loss, loss_dict, weighted_loss_dict, metric_dict = self._common_step(batch, batch_idx, 'valid')
        for key in metric_dict:
            self.log('test_' + key, metric_dict[key], prog_bar=True)
        

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if __name__ == '__main__':
    if torch.cuda.is_available():
        print(torch.cuda.get_device_name(0))

    parser = ArgumentParser()
    parser.add_argument("--embedding_dim", type=int, default=256)
    parser.add_argument("--num_joints", type=int, default=21)
    parser.add_argument("--num_points", type=int, default=50)
    parser.add_argument("--past_len", type=int, default=10)
    parser.add_argument("--future_len", type=int, default=10)

    parser.add_argument("--residual", type=int, default=1)
    parser.add_argument("--activation", type=str, default='relu')
    parser.add_argument("--dropout", type=float, default=0)
    parser.add_argument("--num_layers", type=int, default=4)
    parser.add_argument("--render_epoch", type=int, default=0)
    parser.add_argument("--resume_checkpoint", type=str, default=None)
    parser.add_argument("--debug", type=int, default=0)
    parser.add_argument("--expr_name", type=str, default=datetime.now().strftime("%b-%j-%H:%M"))
    parser.add_argument("--max_epochs", type=int, default=1000)

    parser.add_argument("--lr", type=float, default=1e-4)
    parser.add_argument("--weight_past", type=float, default=0.5)
    parser.add_argument("--weight_body", type=float, default=3)
    parser.add_argument("--weight_obj", type=float, default=1)
    # parser.add_argument("--weight_obj_rot", type=float, default=1)
    parser.add_argument("--weight_obj_rot", type=float, default=0)
    parser.add_argument("--weight_obj_nonrot", type=float, default=1)

    parser.add_argument("--weight_quat_reg", type=float, default=0.01)

    # dataset
    parser.add_argument("--render", default=False, action='store_true')

    args = parser.parse_args()
    args.smpl_dim = 21*3
    
    pl.seed_everything(0)
    torch.autograd.set_detect_anomaly(True)

    print("###### start preparing dataloaders ... ######")
    train_set, val_set, test_set, unseen_test_set = get_datasets(N_point=args.num_points, cvt_to_relative_pose=True)
    
    train_loader = DataLoader(train_set, batch_size=32, num_workers=1, shuffle=True,
                            drop_last=True, pin_memory=False)
    val_loader = DataLoader(val_set, batch_size=32, num_workers=1, shuffle=True,
                    drop_last=True, pin_memory=False)
    test_loader_unseen = DataLoader(unseen_test_set, batch_size=32, num_workers=1, shuffle=False,
                    drop_last=False, pin_memory=False)
    test_loader_seen = DataLoader(test_set, batch_size=32, num_workers=1, shuffle=False,
                    drop_last=False, pin_memory=False)
    print("###### finish preparing dataloaders !!! ######")

    model = LitInteraction(args)
    results_folder = "./results"

    tb_logger = pl_loggers.TensorBoardLogger(str(results_folder + '/interaction_rnn'), name=args.expr_name)
    save_dir = Path(tb_logger.log_dir)  # for this version
    print(save_dir)

    checkpoint_callback = pl.callbacks.ModelCheckpoint(dirpath=str(save_dir / 'checkpoints'),
                                                       monitor="val_loss",
                                                       save_weights_only=True, save_last=True)
    print(checkpoint_callback.dirpath)
    # early_stop_callback = pl.callbacks.EarlyStopping(monitor="val_loss", min_delta=1e-4, patience=3, verbose=False,
    #                                                  mode="min")
    # profiler = SimpleProfiler()
    trainer = pl.Trainer.from_argparse_args(args,
                                            logger=tb_logger,
                                            # profiler=profiler,
                                            callbacks=[checkpoint_callback],
                                            check_val_every_n_epoch=1, accelerator="gpu", devices=1
                                            )
    trainer.fit(model, train_loader, val_loader)
    # trainer.test(ckpt_path="best", dataloaders=test_loader_seen)
    # trainer.test(ckpt_path="best", dataloaders=test_loader_unseen)
