from transformers import BertTokenizer, BertModel
from transformers.models.bert.modeling_bert import BertEncoder
import torch
import torch.nn as nn
from model.model import Classifier
from transformers.modeling_attn_mask_utils import _prepare_4d_attention_mask_for_sdpa
import copy

__all__ = ['SharedSpecificBERT']

class SharedSpecificBERT(nn.Module):
    def __init__(self, args, pretrained_dir="pretrained/bert-base-uncased", shared_layers=6):
        super().__init__()
        # 加载预训练BERT模型
        self.bert = BertModel.from_pretrained(pretrained_dir)
        config = self.bert.config
        
        bottom_config = copy.deepcopy(config)
        bottom_config.num_hidden_layers = shared_layers
        # 分割共享层和特定分支层
        self.bottom_encoder = BertEncoder(bottom_config)
        
        # 共享分支顶层结构
        config.num_hidden_layers -= shared_layers
        self.shared_encoder = BertEncoder(config)
        
        # 特定分支顶层结构
        self.specific_encoder = BertEncoder(config)

        # 分类器
        self.shared_classifier = Classifier(args.text_dropout, args.text_out, args.post_dim, args.output_dim)
        self.specific_classifier = Classifier(args.text_dropout, args.text_out, args.post_dim, args.output_dim)

        self._init_from_pretrained(shared_layers)
        # self._freeze_parameters()
    
    def _init_from_pretrained(self, shared_layers):
        # 复制共享层的参数
        for i in range(shared_layers):
            self.bottom_encoder.layer[i].load_state_dict(self.bert.encoder.layer[i].state_dict())

        # 复制共享分支的参数
        for i in range(shared_layers, len(self.bert.encoder.layer)):
            self.shared_encoder.layer[i - shared_layers].load_state_dict(self.bert.encoder.layer[i].state_dict())

        # 复制特定分支的参数
        for i in range(shared_layers, len(self.bert.encoder.layer)):
            self.specific_encoder.layer[i - shared_layers].load_state_dict(self.bert.encoder.layer[i].state_dict())

    def _freeze_parameters(self):
        # 冻结 bottom_encoder 的参数
        for param in self.bottom_encoder.parameters():
            param.requires_grad = False

        # 冻结 shared_encoder 的参数
        for param in self.shared_encoder.parameters():
            param.requires_grad = False

        # 冻结 specific_encoder 的参数
        for param in self.specific_encoder.parameters():
            param.requires_grad = False

    def forward(self, text, as_encoder = True, label=None, infer=False):
        input_ids = torch.squeeze(text[0], 1)
        input_mask = torch.squeeze(text[2], 1)
        segment_ids = torch.squeeze(text[1], 1)
        batch_size, seq_length = input_ids.size()

        criterion = nn.CrossEntropyLoss(reduction='none')

        # 1. 嵌入层
        embedding_output = self.bert.embeddings(input_ids=input_ids, token_type_ids=segment_ids)
        extended_attention_mask = _prepare_4d_attention_mask_for_sdpa(input_mask, embedding_output.dtype, tgt_len=seq_length)

        # 2. 共享底层编码器
        bottom_outputs = self.bottom_encoder(embedding_output, attention_mask=extended_attention_mask, encoder_attention_mask=None)[0]
        
        # 3. 分支处理
        # 共享分支
        shared_states = bottom_outputs.clone()
        shared_outputs = self.shared_encoder(shared_states, attention_mask=extended_attention_mask, encoder_attention_mask=None)[0]
        if not as_encoder:
            shared_outputs = self.shared_classifier(shared_outputs[:, 0, :])
        
        # 特定分支
        specific_states = bottom_outputs.clone()
        specific_outputs = self.specific_encoder(specific_states, attention_mask=extended_attention_mask, encoder_attention_mask=None)[0]
        if not as_encoder:
            specific_outputs = self.specific_classifier(specific_outputs[:, 0, :])
        
        if as_encoder or infer:
            return shared_outputs, specific_outputs
        
        loss1 = torch.mean(criterion(shared_outputs, label))
        loss2 = torch.mean(criterion(specific_outputs, label))
        return loss1, loss2, shared_outputs, specific_outputs

    def infer(self, text):
        return self.forward(text, as_encoder=False, infer=True)