# -*- coding: utf-8 -*-
"""
# @FileName:     training_loop_taco_v5.py
# @AuthorName:   Sanqi Lu (Lingwei Dang)
# @Institution:  SCUT, Guangzhou, China
# @EmailAddress: lenvondang@163.com
# @CreateTime:   2024/12/12 14:05
"""
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import sys
sys.path.append(".")
import numpy as np
import torch
from torch.optim import AdamW
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data.dataloader import DataLoader
from natsort import natsorted
import math
from einops import rearrange
import imageio

from custom_evaluate.fid_pose_reconstructor.fid_autoencoder import SpatioTemporalAutoencoder
from custom_evaluate.fid_pose_reconstructor.taco_dataset_v5 import TACO_Dataset_HOI
from custom_evaluate.fid_pose_reconstructor.hoi_losses import ObjectPointCloudLoss, HandJointLoss
from custom_evaluate.utils.img_utils import draw_hoi_pose_on_given_img_rendered_by_depth
from custom_evaluate.utils.zip_folder import compress_directory_to_zip

class Trainor:
    def __init__(
            self,
            save_dir="",
            batch_size=16,
            learning_rate=0.001,
            total_steps = 10000,
            log_step=10,
            save_step=500,
            resume_model_ckpt_path="",
            clip_version="/share/home/wuqingyao_danglingwei/model_zoos/ViT-B-32.pt",
            data_root="/share/home/wuqingyao_danglingwei/datas/TACO_Data_20250314/full_20k_plus",
            device="cuda:0"
    ):
        # # 固定随机种子
        # fixseed(args.base.seed)
        self.save_dir = save_dir
        self.batch_size = batch_size
        self.total_steps = total_steps
        self.log_step = log_step
        self.save_step = save_step

        self.device = device
        # log 记录器
        self.writer = SummaryWriter(log_dir=save_dir)
        # 保存配置文件
        # 常用 字段
        self.global_step = 0

        self.hand_loss_fn = HandJointLoss()
        self.obj_loss_fn = ObjectPointCloudLoss()
    
        # 准备模型
        self.model = SpatioTemporalAutoencoder()
        if resume_model_ckpt_path != "":
            self.load_model(resume_model_ckpt_path)

        self.model.to(self.device)
        print(f"model trainable parameters: {sum([p.numel() for p in self.model.parameters() if p.requires_grad]) / 1e6} M")
        
        # 优化器
        self.opt = AdamW(
            self.model.parameters(), lr=learning_rate)
        
        # 准备数据
        train_dataset = TACO_Dataset_HOI(
            data_root=data_root,
            prompts_column_path="base_2500_train_VLMEnhanced_prompts.txt",
            img_column_path="base_2500_train_images.txt",
            hoi_traj_path="base_2500_train_normalized_hoi_pose_trajs.txt",
            debug=False
        )
        print(f"train ds: {len(train_dataset)}")
        self.train_dataloader = DataLoader(dataset=train_dataset, shuffle=True, batch_size=batch_size, num_workers=8)
        print(f"train dl: {len(self.train_dataloader)}")

        self.eval_dataset = TACO_Dataset_HOI(
            data_root=data_root,
            prompts_column_path="test_small_151_VLMEnhanced_prompts.txt",
            img_column_path="test_small_151_images.txt",
            hoi_traj_path="test_small_151_normalized_hoi_pose_trajs.txt",
            debug=True
        )
        print(f"eval ds: {len(self.eval_dataset)}")
        self.eval_dataloader = DataLoader(dataset=self.eval_dataset, shuffle=False, batch_size=batch_size, num_workers=4)
        print(f"eval dl: {len(self.eval_dataloader)}")

    def load_model(self, ckpt_path):
        # 导入模型参数
        print(f"导入模型参数：{ckpt_path}")
        state_dict = torch.load(ckpt_path, map_location='cpu')
        missing_keys, unexpected_keys = self.model.load_state_dict(state_dict["model"], strict=False)
        assert len(unexpected_keys) == 0
        assert all([k.startswith('clip_model.') for k in missing_keys])
        self.global_step = state_dict["global_step"] + 1

    def save_model_and_optimizer(self):
        state_dict = self.model.state_dict()
        # Do not save CLIP weights
        clip_weights = [e for e in state_dict.keys() if e.startswith('clip_model.')]
        for e in clip_weights:
            del state_dict[e]

        torch.save(
            {
                "model": state_dict,
                "optimizer": self.opt.state_dict(),
                "global_step": self.global_step,
            }, 
            f"{self.save_dir}/model_{self.global_step:09d}.pth"
        )


    def run_loop(self):
        for epoch in range(self.global_step // len(self.train_dataloader), self.total_steps // len(self.train_dataloader) + 1):
            for pose_traj in tqdm(self.train_dataloader, total=len(self.train_dataloader), desc=f"epoch: {epoch}"):
                # [b, t, v, c] = [b, 49, 298, 3], [[text list], [b, 3, 224, 224]]
                pose_traj = pose_traj.float().to(self.device)
                pred_hand, pred_obj = self.model(pose_traj)
                gt_hand = rearrange(rearrange(pose_traj, 'b t (m k) c -> b t m k c', m=2)[:, :, :, :21, :], "b t m k c -> b t (m k ) c")
                gt_obj = rearrange(rearrange(pose_traj, 'b t (m k) c -> b t m k c', m=2)[:, :, :, 21:, :], "b t m k c -> b t (m k ) c")
                hand_loss_dict = self.hand_loss_fn(pred_hand, gt_hand)
                obj_loss_dict = self.obj_loss_fn(pred_obj, gt_obj)
                # 计算损失
                loss = torch.mean((hand_loss_dict["loss"] + obj_loss_dict["loss"]))

                self.opt.zero_grad()
                loss.backward()
                # torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)
                self.opt.step()

                # 记录日志
                if self.global_step % self.log_step == 0:
                    self.writer.add_scalar(f"train/loss", loss, global_step=self.global_step)
                    for key, item in hand_loss_dict.items():
                        self.writer.add_scalar(f"train/{key}_hand", item.mean(), global_step=self.global_step)
                    for key, item in obj_loss_dict.items():
                        self.writer.add_scalar(f"train/{key}_obj", item.mean(), global_step=self.global_step)

                    print(f"global_step: {self.global_step}, loss: {loss}, hand_loss: {hand_loss_dict['loss'].mean()}, obj_loss: {obj_loss_dict['loss'].mean()}")
                
                if self.global_step % self.save_step == 0:
                    self.model.eval()
                    self.evaluate()
                    self.model.train()

                    self.save_model_and_optimizer()

                self.global_step += 1

        # Save the last checkpoint if it wasn't already saved.
        self.save_model_and_optimizer()

    def evaluate(self):
        avg_loss_dict = {}
        cnt = 0
        for pose_traj in tqdm(self.eval_dataloader, total=len(self.eval_dataloader)):
            # [b, 42, 2, 33], [[text list], [b, 3, 224, 224]]
            pose_traj = pose_traj.float().to(self.device)
            pred_hand, pred_obj = self.model(pose_traj)
            sampled_pose_hoi = rearrange(
                torch.cat([
                    rearrange(pred_hand, 'b t (m k) c -> b t m k c', m=2),
                    rearrange(pred_obj, 'b t (m k) c -> b t m k c', m=2)
                ], dim=3), "b t m k c -> b t (m k) c"
            )
            gt_hand = rearrange(rearrange(pose_traj, 'b t (m k) c -> b t m k c', m=2)[:, :, :, :21, :], "b t m k c -> b t (m k ) c")
            gt_obj = rearrange(rearrange(pose_traj, 'b t (m k) c -> b t m k c', m=2)[:, :, :, 21:, :], "b t m k c -> b t (m k ) c")
            hand_loss_dict = self.hand_loss_fn(pred_hand, gt_hand)
            obj_loss_dict = self.obj_loss_fn(pred_obj, gt_obj)

            # 计算损失
            for key, item in hand_loss_dict.items():
                self.writer.add_scalar(f"test/{key}_hand", item.mean(), global_step=self.global_step)
                avg_loss_dict[f"test/{key}_hand"] = avg_loss_dict.get(f"test/{key}_hand", 0) + item.mean()
            for key, item in obj_loss_dict.items():
                avg_loss_dict[f"test/{key}_obj"] = avg_loss_dict.get(f"test/{key}_obj", 0) + item.mean()
            
            # todo 画图保存
            if cnt==0:
                # print(f"sampled_pose_hoi: {sampled_pose_hoi.shape}")
                for drawn_idx in range(5):
                    if drawn_idx >= sampled_pose_hoi.shape[0]:
                        break
                    # 绘制轨迹
                    curr_pred_hoi_traj = sampled_pose_hoi[drawn_idx:drawn_idx+1].float().cpu().data.numpy()
                    self.draw_pose_traj(
                        curr_pred_hoi_traj, cogvideox_size_hw=[240, 368], 
                        save_path=os.path.join(self.save_dir, "samples", f"pred_hoi_traj_globalstep_{self.global_step}_{drawn_idx:05d}.mp4"), 
                        base_points_set=None
                    )

                    # curr_pred_hoi_traj = pose_traj[drawn_idx:drawn_idx+1].float().cpu().data.numpy()
                    # self.draw_pose_traj(
                    #     curr_pred_hoi_traj, cogvideox_size_hw=[240, 368], 
                    #     save_path=os.path.join(self.save_dir, "samples", f"gt_hoi_traj_globalstep_{self.global_step}_{drawn_idx:05d}.mp4"), 
                    #     base_points_set=None
                    # )
                    
            cnt += pose_traj.shape[0]

        for key, item in avg_loss_dict.items():
            item /= cnt
            self.writer.add_scalar(f"test/{key}", item, global_step=self.global_step)

    def draw_pose_traj(
        self,
        pred_traj, cogvideox_size_hw=[240, 368], save_path="out.mp4", base_points_set=None
    ):
        # [b, t, n, c], [-1, 1] 之间
        # 画图
        b, f, v, c = pred_traj.shape
        pred_pose_0 = pred_traj.reshape(b, f, 2, -1, c)[0] # [t, 2, v, c]
        # # BPS 相关
        # curr_hand_pose = pred_pose_0[:, :, :21, :]
        # curr_bps_obj_pose = pred_pose_0[:, :, 21:, :]
        # F, _, N, D = curr_bps_obj_pose.shape

        # # 反向解码到 XYZ_n1to1
        # origin_points =  base_points_set[None, None] + curr_bps_obj_pose
        # origin_hoi_pose = np.concatenate([curr_hand_pose, origin_points], axis=2) # # [t, 2, v, c]
        # # print(f"origin_hoi_pose: {origin_hoi_pose.shape}, minmax: {origin_hoi_pose.min(), origin_hoi_pose.max()}")
        

        origin_hoi_pose = pred_pose_0
        # 反归一化
        expand_origin_min_z = 0.8523401065952239
        expand_origin_max_z = 2.4990647112847197
        origin_hoi_pose[:, :, :, 0] = (origin_hoi_pose[:, :, :, 0] + 1) * cogvideox_size_hw[1] / 2 #  + crop_left_top_x
        origin_hoi_pose[:, :, :, 1] = (origin_hoi_pose[:, :, :, 1] + 1) * cogvideox_size_hw[0] / 2 #  + crop_left_top_y
        origin_hoi_pose[:, :, :, 2] = (origin_hoi_pose[:, :, :, 2] + 1) * (expand_origin_max_z - expand_origin_min_z) / 2 #  + expand_origin_min_z

        all_drawn_imgs = []
        for pose_t in range(origin_hoi_pose.shape[0]):
            white_img = np.zeros((cogvideox_size_hw[0], cogvideox_size_hw[1], 3), dtype=np.uint8)
            white_img = draw_hoi_pose_on_given_img_rendered_by_depth(origin_hoi_pose[pose_t], white_img) # [2, 21+128, 3]
            all_drawn_imgs.append(white_img)

        all_drawn_imgs = np.stack(all_drawn_imgs, axis=0) #[t, h, w, c]
        with imageio.get_writer(save_path, fps=8) as writer:
            for frame in all_drawn_imgs:
                writer.append_data(frame)


if __name__ == '__main__':
    # 保存代码
    save_dir = "/share/home/wuqingyao_danglingwei/outputs/for_paper/hoi_autoencoder"
    clip_version="/share/home/wuqingyao_danglingwei/model_zoos/ViT-B-32.pt"
    data_root="/share/home/wuqingyao_danglingwei/datas/TACO_Data_20250314/full_20k_plus"
    resume_model_ckpt_path = ""

    # save_dir = "/home/fit/liuyebin/WORK/lingweidang/outputs/for_paper/mdm_hoi"
    # clip_version="/WORK/PUBLIC/liuyebin_work/lingweidang/model_zoos/ViT-B-32.pt"
    # data_root="/WORK/PUBLIC/liuyebin_work/lingweidang/datas/TACO_Data_20250314/full_20k_plus"
    # resume_model_ckpt_path = ""

    os.makedirs(save_dir, exist_ok=True)
    os.makedirs(f"{save_dir}/samples", exist_ok=True)
    compress_directory_to_zip(os.path.abspath("."), os.path.join(save_dir, "code.zip"))

    train_loop = Trainor(
        save_dir=save_dir,
        batch_size=16,
        learning_rate=0.001,
        total_steps = 10000,
        log_step=10, # 10, 
        save_step=10, # 10, 500
        resume_model_ckpt_path=resume_model_ckpt_path,
        clip_version=clip_version,
        data_root=data_root,
        device="cuda:0"
    )
    train_loop.run_loop()
    print(f"Done!")

