import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import AutoModelForSeq2SeqLM, AutoModel, AutoTokenizer
from transformers.modeling_outputs import Seq2SeqLMOutput


class T5RestrictedVocabWithCopy(nn.Module):
    def __init__(self, model_path, new_vocab_ids, original_vocab_size):
        super().__init__()
        self.t5 = AutoModelForSeq2SeqLM.from_pretrained(model_path)
        self.original_vocab_size = original_vocab_size

        self.new_vocab_ids = new_vocab_ids
        self.new_vocab_size = len(new_vocab_ids)
        self.new_to_old_mapping = nn.Parameter(
            torch.tensor(new_vocab_ids), requires_grad=False
        )

        self.lm_head = nn.Linear(self.t5.config.d_model, self.new_vocab_size)
        self.copy_gate = nn.Linear(self.t5.config.d_model, 1)

        with torch.no_grad():
            self.lm_head.weight.data = self.t5.lm_head.weight.data[new_vocab_ids]
        self.t5.lm_head = nn.Identity()

    def forward(
        self,
        input_ids,
        attention_mask=None,
        labels=None,
        decoder_input_ids=None,
        encoder_outputs=None,
        **kwargs
    ):
        outputs = self.t5(
            input_ids=input_ids,
            attention_mask=attention_mask,
            decoder_input_ids=labels,
            output_attentions=True,
        )

        decoder_outputs = outputs.logits

        # 计算生成概率
        gen_logits = self.lm_head(decoder_outputs)
        gen_probs = F.softmax(gen_logits, dim=-1)

        # 计算复制概率
        copy_attn = outputs.cross_attentions[-1].mean(dim=1)
        copy_gate = torch.sigmoid(self.copy_gate(decoder_outputs))

        print(gen_probs.shape, copy_attn.shape, copy_gate.shape)

        # 创建最终概率分布
        batch_size, seq_len, _ = decoder_outputs.shape
        # final_probs = torch.zeros(batch_size, seq_len, self.original_vocab_size).to(
        #     gen_probs.device
        # )

        # # 将生成概率映射到原始词表
        # final_probs.scatter_(
        #     2,
        #     torch.tensor(self.new_vocab_ids)
        #     .unsqueeze(0)
        #     .unsqueeze(0)
        #     .expand(batch_size, seq_len, -1)
        #     .to(gen_probs.device),
        #     gen_probs,
        # )

        final_probs = torch.zeros(
            batch_size, seq_len, self.original_vocab_size
        )

        # 添加复制概率
        final_probs = (1 - copy_gate) * final_probs + copy_gate * copy_attn

        return final_probs

    # def forward(
    #     self, input_ids, attention_mask=None, decoder_input_ids=None, labels=None
    # ):
    #     # 获取T5基础模型的输出
    #     outputs = self.t5(
    #         input_ids=input_ids,
    #         attention_mask=attention_mask,
    #         decoder_input_ids=decoder_input_ids,
    #         output_hidden_states=True,
    #         return_dict=True,
    #     )

    #     sequence_output = outputs.last_hidden_state
    #     encoder_hidden_states = outputs.encoder_last_hidden_state

    #     # 计算生成概率
    #     gen_logits = self.lm_head(sequence_output)
    #     gen_probs = F.softmax(gen_logits, dim=-1)

    #     # 计算复制概率
    #     copy_scores = self.copy_attention(sequence_output).squeeze(-1)
    #     copy_probs = torch.sigmoid(copy_scores)

    #     # 调整生成概率
    #     adjusted_gen_probs = gen_probs * (1 - copy_probs.unsqueeze(-1))

    #     # 计算复制概率分布
    #     attention_probs = F.softmax(
    #         torch.bmm(sequence_output, encoder_hidden_states.transpose(1, 2)), dim=-1
    #     )
    #     copy_probs_dist = copy_probs.unsqueeze(-1) * attention_probs

    #     # 合并生成概率和复制概率
    #     final_probs = adjusted_gen_probs.scatter_add(
    #         2, input_ids.unsqueeze(1).expand(-1, gen_probs.size(1), -1), copy_probs_dist
    #     )

    #     outputs.logits = torch.log(final_probs + 1e-10)

    #     if labels is not None:
    #         loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
    #         loss = loss_fct(
    #             outputs.logits.view(-1, outputs.logits.size(-1)), labels.view(-1)
    #         )
    #         outputs.loss = loss

    #     return outputs

    # def forward(
    #     self,
    #     input_ids=None,
    #     attention_mask=None,
    #     decoder_input_ids=None,
    #     decoder_attention_mask=None,
    #     head_mask=None,
    #     decoder_head_mask=None,
    #     cross_attn_head_mask=None,
    #     encoder_outputs=None,
    #     past_key_values=None,
    #     inputs_embeds=None,
    #     decoder_inputs_embeds=None,
    #     labels=None,
    #     use_cache=None,
    #     output_attentions=None,
    #     output_hidden_states=None,
    #     return_dict=None,
    # ):
    #     # 调用父类的forward方法
    #     outputs = self.t5(
    #         input_ids=input_ids,
    #         attention_mask=attention_mask,
    #         decoder_input_ids=labels,
    #         output_attentions=True,  # 我们需要注意力权重
    #     )

    #     print(outputs)

    #     # 获取解码器的隐藏状态和交叉注意力
    #     decoder_hidden_states = outputs.decoder_hidden_states[-1]
    #     cross_attentions = outputs.cross_attentions[-1]

    #     # 计算复制门
    #     copy_gate = torch.sigmoid(self.copy_gate(decoder_hidden_states))

    #     # 计算生成概率
    #     gen_logits = outputs.logits
    #     gen_probs = F.softmax(gen_logits, dim=-1)

    #     # 计算复制概率
    #     copy_probs = cross_attentions.mean(dim=1)

    #     # 合并生成概率和复制概率
    #     final_probs = copy_gate * copy_probs + (1 - copy_gate) * gen_probs

    #     # 返回修改后的输出
    #     return Seq2SeqLMOutput(
    #         loss=outputs.loss,
    #         logits=final_probs,
    #         past_key_values=outputs.past_key_values,
    #         decoder_hidden_states=outputs.decoder_hidden_states,
    #         decoder_attentions=outputs.decoder_attentions,
    #         cross_attentions=outputs.cross_attentions,
    #         encoder_last_hidden_state=outputs.encoder_last_hidden_state,
    #         encoder_hidden_states=outputs.encoder_hidden_states,
    #         encoder_attentions=outputs.encoder_attentions,
    #     )
