# -*- coding: utf-8 -*-
"""
# @FileName:     cvae.py
# @AuthorName:   Sanqi Lu (Lingwei Dang)
# @Institution:  SCUT, Guangzhou, China
# @EmailAddress: lenvondang@163.com
# @CreateTime:   2024/12/13 11:34
"""
import sys
sys.path.append(".")
import torch
import torch.nn
from torch.nn import Module, Sequential, Linear, Dropout, LeakyReLU, Tanh
import clip
from einops import rearrange

from custom_evaluate.fid_pose_reconstructor.cvae_encoder.gcn_layers import GraphConv, GraphConvBlock, ResGCB

class CVAE(Module):
    def __init__(self, hand_node=21, obj_node=128, clip_dim=512, hidden_dim=512, z_dim=64, temporal_length=49, dropout_rate=0.1, clip_version="/share/home/wuqingyao_danglingwei/model_zoos/ViT-B-32.pt", freeze_clip=True):
        super(CVAE, self).__init__()

        self.hand_node = hand_node
        self.obj_node = obj_node
        self.temporal_length = temporal_length
        self.z_dim = z_dim
        self.freeze_clip = freeze_clip
        
        # print('EMBED TEXT')
        # print('Loading CLIP...')
        # self.clip_version = clip_version
        # self.clip_model, _ = self.load_and_freeze_clip(clip_version, freeze_clip)

        self.embed_condition = Sequential(
            Linear(clip_dim * 2, hidden_dim),
            # LeakyReLU(0.1),
            Tanh(),
            # Dropout(dropout_rate),
            Linear(hidden_dim, hidden_dim),
            # LeakyReLU(0.1),
            Tanh(),
            # Dropout(dropout_rate),
            Linear(hidden_dim, hidden_dim)
        )
        self.embed_data = Sequential(
            GraphConvBlock(in_len=temporal_length, out_len=temporal_length, in_node_n=2*(hand_node+obj_node)*3, out_node_n=hidden_dim,  dropout_rate=dropout_rate, bias=True, residual=False),
            ResGCB(in_len=temporal_length, out_len=temporal_length, in_node_n=hidden_dim, out_node_n=hidden_dim, dropout_rate=dropout_rate, bias=True, residual=True),
            ResGCB(in_len=temporal_length, out_len=temporal_length, in_node_n=hidden_dim, out_node_n=hidden_dim, dropout_rate=dropout_rate, bias=True, residual=True),
            ResGCB(in_len=temporal_length, out_len=temporal_length, in_node_n=hidden_dim, out_node_n=hidden_dim, dropout_rate=dropout_rate, bias=True, residual=True),
            ResGCB(in_len=temporal_length, out_len=temporal_length, in_node_n=hidden_dim, out_node_n=hidden_dim, dropout_rate=dropout_rate, bias=True, residual=True),
            ResGCB(in_len=temporal_length, out_len=temporal_length, in_node_n=hidden_dim, out_node_n=hidden_dim, dropout_rate=dropout_rate, bias=True, residual=True),
            ResGCB(in_len=temporal_length, out_len=temporal_length, in_node_n=hidden_dim, out_node_n=hidden_dim, dropout_rate=dropout_rate, bias=True, residual=True),

        )
        self.enc = Sequential(
            GraphConvBlock(in_len=temporal_length+1, out_len=temporal_length, in_node_n=hidden_dim, out_node_n=hidden_dim, dropout_rate=dropout_rate, bias=True, residual=False),
            ResGCB(in_len=temporal_length, out_len=temporal_length, in_node_n=hidden_dim, out_node_n=hidden_dim, dropout_rate=dropout_rate, bias=True, residual=True),
            ResGCB(in_len=temporal_length, out_len=temporal_length, in_node_n=hidden_dim, out_node_n=hidden_dim, dropout_rate=dropout_rate, bias=True, residual=True),
            ResGCB(in_len=temporal_length, out_len=temporal_length, in_node_n=hidden_dim, out_node_n=hidden_dim, dropout_rate=dropout_rate, bias=True, residual=True),
            ResGCB(in_len=temporal_length, out_len=temporal_length, in_node_n=hidden_dim, out_node_n=hidden_dim, dropout_rate=dropout_rate, bias=True, residual=True),
            ResGCB(in_len=temporal_length, out_len=temporal_length, in_node_n=hidden_dim, out_node_n=hidden_dim, dropout_rate=dropout_rate, bias=True, residual=True),
            ResGCB(in_len=temporal_length, out_len=temporal_length, in_node_n=hidden_dim, out_node_n=hidden_dim, dropout_rate=dropout_rate, bias=True, residual=True),
        )

        self.mean = Linear(hidden_dim * temporal_length, z_dim)
        self.logvar = Linear(hidden_dim * temporal_length, z_dim)

        self.hand_dec = Sequential(
            GraphConvBlock(in_len=1, out_len=temporal_length, in_node_n=z_dim+hidden_dim, out_node_n=hidden_dim, dropout_rate=dropout_rate, bias=True, residual=False),
            ResGCB(in_len=temporal_length, out_len=temporal_length, in_node_n=hidden_dim, out_node_n=hidden_dim, dropout_rate=dropout_rate, bias=True, residual=True),
            ResGCB(in_len=temporal_length, out_len=temporal_length, in_node_n=hidden_dim, out_node_n=hidden_dim, dropout_rate=dropout_rate, bias=True, residual=True),
            ResGCB(in_len=temporal_length, out_len=temporal_length, in_node_n=hidden_dim, out_node_n=hidden_dim, dropout_rate=dropout_rate, bias=True, residual=True),
            ResGCB(in_len=temporal_length, out_len=temporal_length, in_node_n=hidden_dim, out_node_n=hidden_dim, dropout_rate=dropout_rate, bias=True, residual=True),
            ResGCB(in_len=temporal_length, out_len=temporal_length, in_node_n=hidden_dim, out_node_n=hidden_dim, dropout_rate=dropout_rate, bias=True, residual=True),
            ResGCB(in_len=temporal_length, out_len=temporal_length, in_node_n=hidden_dim, out_node_n=hidden_dim, dropout_rate=dropout_rate, bias=True, residual=True),
            GraphConv(in_len=temporal_length, out_len=temporal_length, in_node_n=hidden_dim, out_node_n=2*hand_node*3, bias=True)
        )

        self.obj_dec = Sequential(
            GraphConvBlock(in_len=1, out_len=temporal_length, in_node_n=z_dim+hidden_dim, out_node_n=hidden_dim, dropout_rate=dropout_rate, bias=True, residual=False),
            ResGCB(in_len=temporal_length, out_len=temporal_length, in_node_n=hidden_dim, out_node_n=hidden_dim, dropout_rate=dropout_rate, bias=True, residual=True),
            ResGCB(in_len=temporal_length, out_len=temporal_length, in_node_n=hidden_dim, out_node_n=hidden_dim, dropout_rate=dropout_rate, bias=True, residual=True),
            ResGCB(in_len=temporal_length, out_len=temporal_length, in_node_n=hidden_dim, out_node_n=hidden_dim, dropout_rate=dropout_rate, bias=True, residual=True),
            ResGCB(in_len=temporal_length, out_len=temporal_length, in_node_n=hidden_dim, out_node_n=hidden_dim, dropout_rate=dropout_rate, bias=True, residual=True),
            ResGCB(in_len=temporal_length, out_len=temporal_length, in_node_n=hidden_dim, out_node_n=hidden_dim, dropout_rate=dropout_rate, bias=True, residual=True),
            ResGCB(in_len=temporal_length, out_len=temporal_length, in_node_n=hidden_dim, out_node_n=hidden_dim, dropout_rate=dropout_rate, bias=True, residual=True),
            GraphConv(in_len=temporal_length, out_len=temporal_length, in_node_n=hidden_dim, out_node_n=2*obj_node*3, bias=True)
        )

    def parameters_wo_clip(self):
        return [p for name, p in self.named_parameters() if not name.startswith('clip_model.')]

    def load_and_freeze_clip(self, clip_version, freeze_clip=True):
        clip_model, clip_preprocess = clip.load(clip_version, device='cpu', jit=False)  # Must set jit=False for training
        # clip.model.convert_weights(clip_model)  # Actually this line is unnecessary since clip by default already on float16
        clip_model = clip_model.float()  # 转为 float32
        for name, param in clip_model.named_parameters():
            print(f"clip model 参数类型：{name}: {param.dtype}")
            break

        # Freeze CLIP weights
        if freeze_clip:
            clip_model.eval()
            for p in clip_model.parameters():
                p.requires_grad = False
            print(f"请注意，clip 模型参数冻结")
        else:
            # clip_model.train()
            for p in clip_model.parameters():
                p.requires_grad = True
            print(f"请注意，clip 模型参数打开训练")

        return clip_model, clip_preprocess
    
    def encode_text(self, raw_text):
        # raw_text - list (batch_size length) of strings with input text prompts
        device = next(self.parameters()).device
        texts = clip.tokenize(raw_text, truncate=True).to(device) # [bs, context_length] # if n_tokens > 77 -> will truncate
        return self.clip_model.encode_text(texts).float()

    def encode_image(self, image_batch):
        # [b, 3, h, w]
        image_feature = self.clip_model.encode_image(image_batch)
        return image_feature
    
    def _sample(self, mean, logvar):
        """Returns a sample from a Gaussian with diagonal covariance."""
        return torch.exp(0.5*logvar) * torch.randn_like(logvar) + mean

    def forward(self, data, raw_txt=None, first_frame=None):
        '''

        :param data: [b, t, v, c]
        :param raw_txt:
        :param first_frame: [b, 3, 224, 224]
        :return:
        '''

        enc_text = self.encode_text(raw_txt)  # [b, 512]
        enc_img = self.encode_image(first_frame)  # [b, 512]
        # enc_text = torch.randn((first_frame.shape[0], 512))
        # enc_img = torch.randn((first_frame.shape[0], 512))

        cond_emb = self.embed_condition(torch.cat([enc_text, enc_img], dim=-1))[..., None]  # [b, 512, 1]
        data_emb = self.embed_data(rearrange(data, "b t v c -> b (v c) t")) # [b, 512, t]

        feature = self.enc(torch.cat((data_emb, cond_emb), dim=-1)) # b, 512, t+1

        mean = self.mean(feature.view(feature.shape[0], -1))
        logvar = self.logvar(feature.view(feature.shape[0], -1))

        z = self._sample(mean, logvar) # [b, h]
        hand_pred = self.hand_dec(torch.cat((cond_emb.squeeze(dim=-1), z), dim=-1)[..., None])  # b, 16, 30+64
        hand_pred = rearrange(hand_pred, "b (k v c) t -> b t k v c", k=2, c=3)

        obj_pred = self.obj_dec(torch.cat((cond_emb.squeeze(dim=-1), z), dim=-1)[..., None])  # b, 16, 30+64
        obj_pred = rearrange(obj_pred, "b (k v c) t -> b t k v c", k=2, c=3)
        
        output = torch.cat([hand_pred, obj_pred], dim=3)
        output = rearrange(output, "b t k v c -> b t (k v) c")
        return output, mean, logvar


    def inference(self, z, raw_txt=None, first_frame=None):
        '''

        Args:
            condition: [b, 48, 25] / [b, 16, 30]
            z: b, 64

        Returns:

        '''
        enc_text = self.encode_text(raw_txt)  # [b, 512]
        enc_img = self.encode_image(first_frame)  # [b, 512]
        # enc_text = torch.randn((first_frame.shape[0], 512))
        # enc_img = torch.randn((first_frame.shape[0], 512))

        cond_emb = self.embed_condition(torch.cat([enc_text, enc_img], dim=-1))  # [b, 512, 1]

        hand_pred = self.hand_dec(torch.cat((cond_emb.squeeze(dim=-1), z), dim=-1)[..., None])  # b, 16, 30+64
        hand_pred = rearrange(hand_pred, "b (k v c) t -> b t k v c", k=2, c=3)

        obj_pred = self.obj_dec(torch.cat((cond_emb.squeeze(dim=-1), z), dim=-1)[..., None])  # b, 16, 30+64
        obj_pred = rearrange(obj_pred, "b (k v c) t -> b t k v c", k=2, c=3)
        
        output = torch.cat([hand_pred, obj_pred], dim=3)
        output = rearrange(output, "b t k v c -> b t (k v) c")
        return output


if __name__ == '__main__':
    device = "cpu"
    model = CVAE().to(device)
    print(f"model total parameters: {sum([p.numel() for p in model.parameters()]) / 1e6} M")
    print(f"model trainable parameters: {sum([p.numel() for p in model.parameters() if p.requires_grad]) / 1e6} M")

    x = torch.randn((4, 49, 298, 3)).float().to(device)
    raw_txt = ["this is cat", "that is dog", "sac", "asdw"]
    first_frame = torch.randint(low=0, high=255, size=(4, 3, 224, 224)).float().to(device)

    out, mean, logvar = model(x, raw_txt, first_frame) # (b, 42, 2, 33)
    print(out.shape)

    z = torch.randn((4, 64*2)).to(device)
    out = model.inference(z, raw_txt, first_frame)
    print(out.shape)
