from transformers import BertPreTrainedModel, BertModel
import torch.nn as nn
from transformers.modeling_outputs import MaskedLMOutput

class BertForPermutationAndMaskedLM(BertPreTrainedModel):
    def __init__(self, config):
        super().__init__(config)
        self.bert = BertModel(config)
        
        # 显式定义MLM预测头
        self.mlm_head = nn.Sequential(
            nn.Linear(config.hidden_size, config.hidden_size),
            nn.GELU(),
            nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps),
            nn.Linear(config.hidden_size, config.vocab_size)
        )
        
        # 词序恢复预测头
        self.permutation_head = nn.Linear(config.hidden_size, config.max_position_embeddings)
        
        # 初始化权重
        self.post_init()
    
    def forward(
        self,
        input_ids=None,
        attention_mask=None,
        token_type_ids=None,
        position_ids=None,
        head_mask=None,
        inputs_embeds=None,
        encoder_hidden_states=None,
        encoder_attention_mask=None,
        labels=None,  # MLM标签
        permutation_labels=None,  # 词序恢复标签
        output_attentions=None,
        output_hidden_states=None,
        return_dict=None,
    ):
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        
        # BERT基础模型
        outputs = self.bert(
            input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            position_ids=position_ids,
            head_mask=head_mask,
            inputs_embeds=inputs_embeds,
            encoder_hidden_states=encoder_hidden_states,
            encoder_attention_mask=encoder_attention_mask,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )
        
        sequence_output = outputs[0]  # [batch_size, seq_len, hidden_size]
        
        # 词序恢复任务的预测
        permutation_logits = self.permutation_head(sequence_output)  # [batch_size, seq_len, max_position_embeddings]
        
        total_loss = None
        if permutation_labels is not None:
            # 计算词序恢复任务的损失
            permutation_loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
            permutation_loss = permutation_loss_fct(
                permutation_logits.view(-1, self.config.max_position_embeddings),
                permutation_labels.view(-1)
            )
            if total_loss is None:
                total_loss = permutation_loss
            else:
                total_loss += permutation_loss
        
        # 如果有MLM标签，计算MLM损失
        if labels is not None:
            prediction_scores = self.mlm_head(sequence_output)  # 使用我们定义的MLM头
            masked_lm_loss = nn.CrossEntropyLoss()(
                prediction_scores.view(-1, self.config.vocab_size),
                labels.view(-1)
            )
            total_loss += masked_lm_loss
        
        # 修改返回语句，兼容return_dict的两种情况
        if not return_dict:
            # 如果return_dict=False，返回元组 (total_loss, mlm_logits, permutation_logits, ...)
            return (total_loss, prediction_scores, permutation_logits) + outputs[1:] if total_loss is not None else outputs
        
        # 如果return_dict=True，返回自定义的输出类（包含permutation_logits）
        class PermutationMaskedLMOutput(MaskedLMOutput):
            def __init__(
                self,
                loss=None,
                logits=None,
                permutation_logits=None,
                hidden_states=None,
                attentions=None,
            ):
                super().__init__(loss=loss, logits=logits, hidden_states=hidden_states, attentions=attentions)
                self.permutation_logits = permutation_logits
        
        return PermutationMaskedLMOutput(
            loss=total_loss,
            logits=prediction_scores,
            permutation_logits=permutation_logits,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )