from transformers import ViTModel
from transformers.models.vit.modeling_vit import ViTEncoder
import torch
import torch.nn as nn
from model.model import Classifier
import copy

__all__ = ['SharedSpecificViT']

class SharedSpecificViT(nn.Module):
    def __init__(self, args, pretrained_dir="pretrained/vit-base-patch16-224", shared_layers=6):
        super().__init__()
        # 加载预训练ViT模型
        self.vit = ViTModel.from_pretrained(pretrained_dir)
        config = self.vit.config
        
        # 分割共享层和特定分支层
        bottom_config = copy.deepcopy(config)
        bottom_config.num_hidden_layers = shared_layers
        self.bottom_encoder = ViTEncoder(bottom_config)

        config.num_hidden_layers -= shared_layers
        self.shared_encoder = ViTEncoder(config)
        self.shared_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        self.shared_classifier = Classifier(args.img_dropout, args.img_out, args.post_dim, args.output_dim)

        self.specific_encoder = ViTEncoder(config)
        self.specific_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        self.specific_classifier = Classifier(args.img_dropout, args.img_out, args.post_dim, args.output_dim)

        self._init_from_pretrained(shared_layers)
        # self._freeze_parameters()
    
    def _init_from_pretrained(self, shared_layers):
        # 复制共享层的参数
        for i in range(shared_layers):
            self.bottom_encoder.layer[i].load_state_dict(self.vit.encoder.layer[i].state_dict())

        # 复制共享分支的参数
        for i in range(shared_layers, len(self.vit.encoder.layer)):
            self.shared_encoder.layer[i - shared_layers].load_state_dict(self.vit.encoder.layer[i].state_dict())

        # 复制特定分支的参数
        for i in range(shared_layers, len(self.vit.encoder.layer)):
            self.specific_encoder.layer[i - shared_layers].load_state_dict(self.vit.encoder.layer[i].state_dict())

        # 复制 LayerNorm 层的参数
        self.shared_layernorm.load_state_dict(self.vit.layernorm.state_dict())
        self.specific_layernorm.load_state_dict(self.vit.layernorm.state_dict())

    def _freeze_parameters(self):
        # 冻结 bottom_encoder 的参数
        for param in self.bottom_encoder.parameters():
            param.requires_grad = False

        # 冻结 shared_encoder 的参数
        for param in self.shared_encoder.parameters():
            param.requires_grad = False

        # 冻结 specific_encoder 的参数
        for param in self.specific_encoder.parameters():
            param.requires_grad = False

        # 冻结 LayerNorm 层的参数
        for param in self.shared_layernorm.parameters():
            param.requires_grad = False
        for param in self.specific_layernorm.parameters():
            param.requires_grad = False

    def forward(self, pixel_values, as_encoder = True, label=None, infer=False):
        criterion = torch.nn.CrossEntropyLoss(reduction='none')

        # 1. Patch Embedding
        pixel_values = torch.squeeze(pixel_values, 1)
        embedding_output = self.vit.embeddings(pixel_values)
        
        # 2. 共享编码器
        bottom_outputs = self.bottom_encoder(embedding_output)[0]
        
        # 3. 分支处理
        # 分支1
        shared_states = bottom_outputs.clone()
        shared_outputs = self.shared_encoder(shared_states)[0]
        shared_outputs = self.shared_layernorm(shared_outputs)
        if not as_encoder:
            shared_outputs = self.shared_classifier(shared_outputs[:, 0, :])
        
        # 分支2
        specific_states = bottom_outputs.clone()
        specific_outputs = self.specific_encoder(specific_states)[0]
        specific_outputs = self.specific_layernorm(specific_outputs)
        if not as_encoder:
            specific_outputs = self.specific_classifier(specific_outputs[:, 0, :])

        if as_encoder or infer:
            return shared_outputs, specific_outputs
        
        loss1 = torch.mean(criterion(shared_outputs, label))
        loss2 = torch.mean(criterion(specific_outputs, label))
        return loss1, loss2, shared_outputs, specific_outputs

    def infer(self, image=None):
        return self.forward(image, as_encoder = False, label=None, infer=True)
