import torch
import pickle
import torch.nn as nn
from models.lib.wav2vec import Wav2Vec2Model
from models.utils import init_biased_mask, enc_dec_mask, PeriodicPositionalEncoding
from base import BaseModel
from transformers import Wav2Vec2CTCTokenizer, Wav2Vec2Processor
from utils.wav2vec import Wav2Vec2ForCTC
from utils.modeling_hubert import HubertModel

def linear_interpolation(features, input_fps, output_fps, output_len=None):
    features = features.transpose(1, 2)
    seq_len = features.shape[2] / float(input_fps)
    if output_len is None:
        output_len = int(seq_len * output_fps)
    output_features = torch.nn.functional.interpolate(features, size=output_len, align_corners=True, mode='linear')
    return output_features.transpose(1, 2)

# Hubert修改
def inputRepresentationAdjustment(audio_embedding_matrix, vertex_matrix, ifps, ofps):

    if ifps % ofps == 0:
        factor = -1 * (-ifps // ofps)
        if audio_embedding_matrix.shape[1] % 2 != 0:
            audio_embedding_matrix = audio_embedding_matrix[:, :audio_embedding_matrix.shape[1] - 1]

        if audio_embedding_matrix.shape[1] > vertex_matrix.shape[1] * 2:
            audio_embedding_matrix = audio_embedding_matrix[:, :vertex_matrix.shape[1] * 2]

        elif audio_embedding_matrix.shape[1] < vertex_matrix.shape[1] * 2:
            vertex_matrix = vertex_matrix[:, :audio_embedding_matrix.shape[1] // 2]
    else:
        factor = -1 * (-ifps // ofps)
        audio_embedding_seq_len = vertex_matrix.shape[1] * factor
        audio_embedding_matrix = audio_embedding_matrix.transpose(1, 2)
        audio_embedding_matrix = torch.nn.functional.interpolate(audio_embedding_matrix, size=audio_embedding_seq_len, align_corners=True, mode='linear')
        audio_embedding_matrix = audio_embedding_matrix.transpose(1, 2)

    frame_num = vertex_matrix.shape[1]
    audio_embedding_matrix = torch.reshape(audio_embedding_matrix, (1, audio_embedding_matrix.shape[1] // factor, audio_embedding_matrix.shape[2] * factor))

    return audio_embedding_matrix, vertex_matrix, frame_num


class CodeTalker(BaseModel):
    def __init__(self, args):
        super(CodeTalker, self).__init__()
        """
        audio: (batch_size, raw_wav)
        template: (batch_size, V*3)
        vertice: (batch_size, seq_len, V*3)
        """
        self.args = args
        self.dataset = args.dataset

        # self.audio_encoder = Wav2Vec2Model.from_pretrained(args.wav2vec2model_path)
        # # wav2vec 2.0 weights initialization
        # self.audio_encoder.feature_extractor._freeze_parameters()

        self.audio_feature_map = nn.Linear(768, args.feature_dim)
        # motion encoder
        self.vertice_map = nn.Linear(args.vertice_dim, args.feature_dim)
        # periodic positional encoding 
        self.PPE = PeriodicPositionalEncoding(args.feature_dim, period = args.period)
        # temporal bias
        self.biased_mask = init_biased_mask(n_head = 4, max_seq_len = 600, period=args.period)
        decoder_layer = nn.TransformerDecoderLayer(d_model=args.feature_dim, nhead=args.n_head, dim_feedforward=2*args.feature_dim, batch_first=True)        
        self.transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers=args.num_layers)
        # motion decoder
        self.feat_map = nn.Linear(args.feature_dim, args.face_quan_num*args.zquant_dim, bias=False)
        # style embedding
        self.learnable_style_emb = nn.Embedding(len(args.train_subjects.split()), args.feature_dim)

        self.device = args.device
        nn.init.constant_(self.feat_map.weight, 0)
        # nn.init.constant_(self.feat_map.bias, 0)

        if args.autoencoder == 'stage1_vocaset':
            from models.stage1_vocaset import VQAutoEncoder
        elif args.autoencoder == 'stage1_BIWI':
            from models.stage1_BIWI import VQAutoEncoder

        self.autoencoder = VQAutoEncoder(args)
        self.autoencoder.load_state_dict(torch.load(args.vqvae_pretrained_path)['state_dict'])
        for param in self.autoencoder.parameters():
            param.requires_grad = False

        # 修改
        self.processor = Wav2Vec2Processor.from_pretrained("jonatasgrosman/wav2vec2-large-xlsr-53-english")
        self.tokenizer = Wav2Vec2CTCTokenizer.from_pretrained("jonatasgrosman/wav2vec2-large-xlsr-53-english")
        self.text_encoder = Wav2Vec2ForCTC.from_pretrained("jonatasgrosman/wav2vec2-large-xlsr-53-english")
        if self.dataset == "vocaset":
            self.transformer = nn.Transformer(d_model=1024, batch_first=True)
        self.lm_head = nn.Linear(1024, 33)
        self.dropout = nn.Dropout(p=0.0, inplace=False)

        # Hubert Audio Encoder 修改
        self.audio_encoder_h = HubertModel.from_pretrained("/data/xdtest/premodel/hubert-base-ls960") # 修改
        self.audio_dim_h = self.audio_encoder_h.encoder.config.hidden_size
        self.audio_encoder_h.feature_extractor._freeze_parameters()
        self.audio_feature_map_h = nn.Linear(1536, args.feature_dim)

        frozen_layers = [0,1]

        for name, param in self.audio_encoder_h.named_parameters():
            if name.startswith("feature_projection"):
                param.requires_grad = False
            if name.startswith("encoder.layers"):
                layer = int(name.split(".")[2])
                if layer in frozen_layers:
                    param.requires_grad = False

    def forward(self, audio, template, vertice, one_hot, criterion):
        # tgt_mask: :math:`(T, T)`.
        # memory_mask: :math:`(T, S)`.
        template = template.unsqueeze(1) # (1,1,V*3)

        # style embedding
        obj_embedding = self.learnable_style_emb(torch.argmax(one_hot, dim=1))
        obj_embedding = obj_embedding.unsqueeze(1)

        frame_num = vertice.shape[1]

        # 隐藏
        # # audio feature extraction
        # hidden_states = self.audio_encoder(audio, self.dataset, frame_num=frame_num).last_hidden_state
        # if self.dataset == "BIWI":
        #     if hidden_states.shape[1]<frame_num*2:
        #         vertice = vertice[:, :hidden_states.shape[1]//2]
        #         frame_num = hidden_states.shape[1]//2
        # hidden_states = self.audio_feature_map(hidden_states)

        # Hubert audio feature extraction 修改
        hidden_states_h = self.audio_encoder_h(audio).last_hidden_state

        if self.dataset == "BIWI":  # 暂时没有测试在BIWI上的可行性
            hidden_states_h, vertice, frame_num = inputRepresentationAdjustment(hidden_states_h, vertice, 50, 25)
        elif self.dataset == "vocaset":
            hidden_states_h, vertice, frame_num = inputRepresentationAdjustment(hidden_states_h, vertice, 50, 30)

        hidden_states_h = hidden_states_h[:, :frame_num]
        hidden_states_h = self.audio_feature_map_h(hidden_states_h)
        hidden_states = hidden_states_h

        # gt motion feature extraction
        feat_q_gt, _ = self.autoencoder.get_quant(vertice - template)
        # 1, 64, 1488
        feat_q_gt = feat_q_gt.permute(0,2,1)
        # 1, 1488, 64
        # autoregressive facial motion prediction with teacher-forcing
        vertice_emb = obj_embedding 
        style_emb = vertice_emb  
        vertice_input = torch.cat((template,vertice[:,:-1]), 1) # shift one position
        vertice_input = vertice_input - template
        vertice_input = self.vertice_map(vertice_input)
        vertice_input = vertice_input + style_emb
        vertice_input = self.PPE(vertice_input)
        tgt_mask = self.biased_mask[:, :vertice_input.shape[1], :vertice_input.shape[1]].clone().detach().to(device=self.device)
        memory_mask = enc_dec_mask(self.device, self.dataset, vertice_input.shape[1], hidden_states.shape[1])
        feat_out = self.transformer_decoder(vertice_input, hidden_states, tgt_mask=tgt_mask, memory_mask=memory_mask)
        feat_out = self.feat_map(feat_out)
        feat_out = feat_out.reshape(feat_out.shape[0], feat_out.shape[1]*self.args.face_quan_num, -1)
        
        # feature quantization
        feat_out_q, _, _ = self.autoencoder.quantize(feat_out)

        # feature decoding
        vertice_out = self.autoencoder.decode(feat_out_q)
        # vertice_out = vertice_out + template

        # loss 
        loss_motion = criterion(vertice_out + template, vertice) # (batch, seq_len, V*3)
        loss_reg = criterion(feat_out, feat_q_gt.detach())

        # 修改
        if self.dataset == "vocaset":
            pkl_path = "../vocaset/FLAME_masks_unix.pkl"
            with open(pkl_path, 'rb') as f:
                self.lip_mask = pickle.load(f, encoding='latin1')["lips"]
                self.lip_map = nn.Linear(254 * 3, 1024).to(self.device)
        vertice_out.to(self.device)
        lip_out = vertice_out.reshape(vertice_out.shape[0], vertice_out.shape[1], -1, 3)[:, :, self.lip_mask,
                  :].reshape(vertice_out.shape[0], vertice_out.shape[1], -1)

        lip_offset = self.lip_map(lip_out.to(self.device)).to(self.device)
        if self.dataset == "vocaset":
            lip_offset = linear_interpolation(lip_offset, 30, 50, output_len=frame_num)
        lip_features = self.transformer(lip_offset, lip_offset)
        logits = self.lm_head(self.dropout(lip_features))

        audio_model = self.text_encoder(audio)
        text_logits = audio_model.logits
        text_logits = torch.argmax(text_logits, dim=-1)
        log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)
        text_logits = self.processor.batch_decode(text_logits)
        text_logits = self.tokenizer(text_logits, return_tensors="pt").input_ids
        text_logits = text_logits.to(self.device)

        loss_ctc = nn.functional.ctc_loss(
            log_probs,
            text_logits,
            torch.tensor([log_probs.shape[0]]),
            torch.tensor([text_logits.shape[1]]),
            blank=0,
            reduction="mean",
            zero_infinity=True,
        )

        return 1.0 * loss_motion + 1.0 * loss_reg, [loss_motion, loss_reg]



    def predict(self, audio, template, one_hot, one_hot2=None, weight_of_one_hot=None, frame_num=None):
        template = template.unsqueeze(1) # (1,1, V*3)

        # style embedding
        obj_embedding = self.learnable_style_emb(torch.argmax(one_hot, dim=1))

        # style interpolation (optional)
        if one_hot2 is not None and weight_of_one_hot is not None:
            obj_embedding2 = self.learnable_style_emb(torch.argmax(one_hot2, dim=1))
            obj_embedding = obj_embedding * weight_of_one_hot + obj_embedding2 * (1-weight_of_one_hot)
        obj_embedding = obj_embedding.unsqueeze(1)

        # audio feature extraction
        # hidden_states = self.audio_encoder(audio, self.dataset).last_hidden_state
        # if self.dataset == "BIWI":
        #     frame_num = hidden_states.shape[1]//2
        # elif self.dataset == "vocaset":
        #     frame_num = hidden_states.shape[1]
        # hidden_states = self.audio_feature_map(hidden_states)

        # audio feature extraction hubert
        hidden_states_h = self.audio_encoder_h(audio).last_hidden_state
        hidden_states = hidden_states_h
        factor = 2
        if self.dataset == "BIWI":
            if hidden_states.shape[1] % 2 != 0:
                hidden_states = hidden_states[:, :hidden_states.shape[1] - 1]
            hidden_states = torch.reshape(hidden_states, (1, hidden_states.shape[1] // 2, hidden_states.shape[2] * 2))
        elif self.dataset == "vocaset":
            audio_embedding_seq_len = frame_num * factor
            hidden_states = hidden_states.transpose(1, 2)
            hidden_states = torch.nn.functional.interpolate(hidden_states,
                                                                     size=audio_embedding_seq_len, align_corners=True,
                                                                     mode='linear')
            hidden_states = hidden_states.transpose(1, 2)

        hidden_states = torch.reshape(hidden_states, (1, hidden_states.shape[1] // factor, hidden_states.shape[2] * factor))
        hidden_states = hidden_states[:, :frame_num]
        hidden_states = self.audio_feature_map_h(hidden_states)

        # autoregressive facial motion prediction
        for i in range(frame_num):
            if i==0:
                vertice_emb = obj_embedding # (1,1,feature_dim)
                style_emb = vertice_emb
                vertice_input = self.PPE(style_emb)
            else:
                vertice_input = self.PPE(vertice_emb)

            tgt_mask = self.biased_mask[:, :vertice_input.shape[1], :vertice_input.shape[1]].clone().detach().to(device=self.device)
            memory_mask = enc_dec_mask(self.device, self.dataset, vertice_input.shape[1], hidden_states.shape[1])
            feat_out = self.transformer_decoder(vertice_input, hidden_states, tgt_mask=tgt_mask, memory_mask=memory_mask)
            feat_out = self.feat_map(feat_out)

            feat_out = feat_out.reshape(feat_out.shape[0], feat_out.shape[1]*self.args.face_quan_num, -1)
            # predicted feature to quantized one
            feat_out_q, _, _ = self.autoencoder.quantize(feat_out)
            # quantized feature to vertice
            if i == 0:
                vertice_out_q = self.autoencoder.decode(torch.cat([feat_out_q, feat_out_q], dim=-1))
                vertice_out_q = vertice_out_q[:,0].unsqueeze(1)
            else:
                vertice_out_q = self.autoencoder.decode(feat_out_q)

            if i != frame_num - 1:
                new_output = self.vertice_map(vertice_out_q[:,-1,:]).unsqueeze(1)
                new_output = new_output + style_emb
                vertice_emb = torch.cat((vertice_emb, new_output), 1)


        # quantization and decoding
        feat_out_q, _, _ = self.autoencoder.quantize(feat_out)
        vertice_out = self.autoencoder.decode(feat_out_q)

        vertice_out = vertice_out + template
        return vertice_out
