import torch
import torch.nn as nn
import torch.nn.functional as F

from model.TextEncoder import TextEncoder
from model.ImageEncoder import ImageEncoder

__all__ = ['MMModel']


def xavier_init(m):
    if type(m) == nn.Linear:
        nn.init.xavier_normal_(m.weight)
        if m.bias is not None:
           m.bias.data.fill_(0.0)

class LinearLayer(nn.Module):
    def __init__(self, in_dim, out_dim):
        super().__init__()
        self.clf = nn.Sequential(nn.Linear(in_dim, out_dim))
        self.clf.apply(xavier_init)

    def forward(self, x):
        x = self.clf(x)
        return x

class MMModel(nn.Module):
    def __init__(self, args, data_type='it'):
        super(MMModel, self).__init__()
        
        self.args = args
        self.data_type = data_type

        if self.data_type!='text':
            self.image_encoder = ImageEncoder(pretrained_dir=args.pretrained_dir, image_encoder=args.image_encoder)
            self.image_classfier = Classifier(args.img_dropout, args.img_out, args.post_dim, args.output_dim)
        
        if self.data_type!='image':
            self.text_encoder = TextEncoder(pretrained_dir=args.pretrained_dir, text_encoder=args.text_encoder)
            self.text_classfier = Classifier(args.text_dropout, args.text_out, args.post_dim, args.output_dim)

        if self.data_type=='it':
            self.mm_classfier = Classifier(args.mm_dropout, args.text_out + args.img_out, args.post_dim, args.output_dim)

    def forward(self, text=None, image=None, label=None, infer=False):
        criterion = torch.nn.CrossEntropyLoss(reduction='none')

        if self.data_type!='image':
            text = self.text_encoder(text=text)

        if self.data_type!='text':
            image = torch.squeeze(image, 1)
            image = self.image_encoder (pixel_values=image)

        if self.data_type=='image':
            output = self.image_classfier(image[:, 0, :])
        elif self.data_type=='text':
            output = self.text_classfier(text[:, 0, :])
        else:
            fusion = torch.cat([text[:, 0, :], image[:, 0, :]], dim=-1)
            output = self.mm_classfier(fusion)
        
        if infer:
            return output

        loss = torch.mean(criterion(output, label))

        return loss, output
    
    def infer(self, text=None, image=None):
        MMlogit = self.forward(text, image, label=None, infer=True)
        return MMlogit
    
    def load_image_params(self, state_dict):
        print("Loading Image Params")

        # 加载图像编码器的参数
        missing_keys, unexpected_keys = self.image_encoder.load_state_dict(state_dict, strict=False)
        if missing_keys or unexpected_keys:
            print(f"Image Encoder - Missing keys: {missing_keys}")

        # 加载图像分类器的参数
        missing_keys, unexpected_keys = self.image_classfier.load_state_dict(state_dict, strict=False)
        if missing_keys or unexpected_keys:
            print(f"Image Classifier - Missing keys: {missing_keys}")
    
    def load_text_params(self, state_dict):
        print("Loading Text Params")

        # 加载文本编码器的参数
        missing_keys, unexpected_keys = self.text_encoder.load_state_dict(state_dict, strict=False)
        if missing_keys or unexpected_keys:
            print(f"Text Encoder - Missing keys: {missing_keys}")

        # 加载文本分类器的参数
        missing_keys, unexpected_keys = self.text_classfier.load_state_dict(state_dict, strict=False)
        if missing_keys or unexpected_keys:
            print(f"Text Classifier - Missing keys: {missing_keys}")

class Classifier(nn.Module):
    def __init__(self, dropout, in_dim, post_dim, out_dim):
        super(Classifier, self).__init__()
        self.post_dropout = nn.Dropout(p=dropout)
        self.post_layer_1 = LinearLayer(in_dim, post_dim)
        self.post_layer_2 = LinearLayer(post_dim, post_dim)
        self.post_layer_3 = LinearLayer(post_dim, out_dim)

    def forward(self, input):
        input_p1 = F.relu(self.post_layer_1(input), inplace=False)
        input_d = self.post_dropout(input_p1)
        input_p2 = F.relu(self.post_layer_2(input_d), inplace=False)
        output = self.post_layer_3(input_p2)
        return output






