import torch
import numpy as np
import cv2
import sys
import os
from cave.CVAE import CVAE  # 假设你的模型定义在 CVAE.py 文件中
from dml_torch.dml import TripletModel  # 假设你的模型定义在 TripletModel.py 文件中
from transformers import GPT2Tokenizer, GPT2LMHeadModel
import torch.nn.functional as F

# 加载模型
embedding_dim = 128  # 假设 embedding_dim 是 128
latent_dim = 32  # 假设 latent_dim 是 32

class ModelLoader:
    def __init__(self):
        self.cvae_model = CVAE(input_dim=embedding_dim, latent_dim=latent_dim)
        self.triplet_model = TripletModel()
        self.tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
        self.gpt2_model = GPT2LMHeadModel.from_pretrained('gpt2')
        self.text_decoder_model = GPT2LMHeadModel.from_pretrained('gpt2')

        self._load_models()

    def _load_models(self):
        # 加载 CVAE 模型
        state_dict = torch.load('src/model_pth/cvae_model.pth', weights_only=False, map_location=torch.device('cpu'))
        model_dict = self.cvae_model.state_dict()
        filtered_state_dict = {k: v for k, v in state_dict.items() if k in model_dict and model_dict[k].shape == v.shape}
        model_dict.update(filtered_state_dict)
        self.cvae_model.load_state_dict(model_dict)
        self.cvae_model.eval()

        # 加载 Triplet 模型
        self.triplet_model.load_state_dict(torch.load('src/model_pth/triplet_model_iu_xray.pth', weights_only=False, map_location=torch.device('cpu')))
        self.triplet_model.eval()

        # 加载文本解码模型
        self.text_decoder_model.load_state_dict(torch.load('src/model_pth/text_decoder_model_new.pth', weights_only=False, map_location=torch.device('cpu')))
        self.text_decoder_model.eval()

    def adjust_tensor_shape(self, tensor, target_shape):
        current_shape = tensor.shape
        if current_shape == target_shape:
            return tensor
        elif current_shape[1] > target_shape[1]:
            return tensor[:, :target_shape[1]]
        else:
            return F.pad(tensor, (0, target_shape[1] - current_shape[1]), mode='constant', value=0)

    def decode_text_embedding(self, text_embedding):
        prompt = "The following is a medical report based on an image:"
        input_ids = self.tokenizer.encode(prompt, return_tensors='pt')
        max_length = 1024
        if input_ids.size(1) > max_length:
            input_ids = input_ids[:, :max_length]
        attention_mask = torch.ones_like(input_ids)
        pad_token_id = self.tokenizer.eos_token_id
        position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long).unsqueeze(0).expand_as(input_ids)
        
        with torch.no_grad():
            outputs = self.text_decoder_model.generate(
                input_ids=input_ids,
                max_length=150,
                num_return_sequences=1,
                attention_mask=attention_mask,
                pad_token_id=pad_token_id,
                do_sample=True,
                top_k=50,
                top_p=0.95,
                temperature=0.7,
                position_ids=position_ids
            )
        
        decoded_text = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
        return decoded_text

    def generate_text_from_image(self, image_path):
        image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
        image = cv2.resize(image, (32, 32))
        image = image.astype('float32') / 255
        image = np.expand_dims(image, axis=-1)
        image = np.expand_dims(image, axis=0)
        image = image[:, :, :, 0]
        image_tensor = torch.tensor(image, dtype=torch.float32)
        
        with torch.no_grad():
            image_embedding = self.triplet_model(image_tensor)
        
        target_shape = (1, embedding_dim)
        adjusted_image_embedding = self.adjust_tensor_shape(image_embedding, target_shape)
        new_text_embedding = self.cvae_model(adjusted_image_embedding, adjusted_image_embedding)[0]
        
        decoded_text = self.decode_text_embedding(new_text_embedding)
        return decoded_text
