import torch
import numpy as np
import cv2
from cave.CVAE import CVAE  # 假设你的模型定义在 CVAE.py 文件中
from dml_torch.dml import TripletModel  # 假设你的模型定义在 TripletModel.py 文件中
from transformers import GPT2Tokenizer, GPT2LMHeadModel
import torch.nn.functional as F

def English_report_generate():
    # 加载模型
    embedding_dim = 128  # 假设 embedding_dim 是 128
    latent_dim = 32  # 假设 latent_dim 是 32


    cvae_model = CVAE(input_dim=embedding_dim, latent_dim=latent_dim)
    # 手动加载不匹配的参数

    state_dict = torch.load('src/model_pth/cvae_model.pth', weights_only=False, map_location=torch.device('cpu'))
    model_dict = cvae_model.state_dict()

    # 过滤掉不匹配的参数
    filtered_state_dict = {k: v for k, v in state_dict.items() if k in model_dict and model_dict[k].shape == v.shape}

    # 更新模型参数
    model_dict.update(filtered_state_dict)
    cvae_model.load_state_dict(model_dict)
    cvae_model.eval()  # 设置模型为评估模式

    triplet_model = TripletModel()
    triplet_model.load_state_dict(torch.load('src/model_pth/triplet_model_iu_xray.pth', weights_only=False, map_location=torch.device('cpu')))
    triplet_model.eval()  # 设置模型为评估模式

    # 加载 GPT-2 模型和分词器
    tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
    gpt2_model = GPT2LMHeadModel.from_pretrained('gpt2')
    gpt2_model.eval()  # 设置模型为评估模式

    # 加载图像
    image_path = 'data/iu_xray/iu_xray/images/CXR10_IM-0002/0.png'  # 替换为你的图像路径
    image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
    image = cv2.resize(image, (32, 32))  # 调整图像大小
    image = image.astype('float32') / 255  # 归一化
    image = np.expand_dims(image, axis=-1)  # 增加通道维度
    image = np.expand_dims(image, axis=0)  # 增加 batch 维度

    # 确保图像具有正确的通道数
    image = image[:, :, :, 0]  # 将通道数调整为 1
    image_tensor = torch.tensor(image, dtype=torch.float32)

    # 生成图像嵌入
    with torch.no_grad():  # 禁用梯度计算
        image_embedding = triplet_model(image_tensor)

    # 定义一个函数来自动调整张量的形状
    def adjust_tensor_shape(tensor, target_shape):
        current_shape = tensor.shape
        if current_shape == target_shape:
            return tensor
        elif current_shape[1] > target_shape[1]:
            return tensor[:, :target_shape[1]]
        else:
            return F.pad(tensor, (0, target_shape[1] - current_shape[1]), mode='constant', value=0)

    # 生成文本嵌入
    with torch.no_grad():  # 禁用梯度计算
        # 确保 image_embedding 的形状正确
        target_shape = (1, embedding_dim)
        adjusted_image_embedding = adjust_tensor_shape(image_embedding, target_shape)
        # 使用整个 CVAE 模型生成文本嵌入
        new_text_embedding = cvae_model(adjusted_image_embedding, adjusted_image_embedding)[0]

    # print("Generated Text Embedding:", new_text_embedding)

    # 加载文本解码模型
    text_decoder_model = GPT2LMHeadModel.from_pretrained('gpt2')
    text_decoder_model.load_state_dict(torch.load('src/model_pth/text_decoder_model_new.pth', weights_only=False, map_location=torch.device('cpu')))
    text_decoder_model.eval()  # 设置模型为评估模式

    # 解码文本嵌入
    def decode_text_embedding(text_embedding):
        prompt = "The following is a medical report based on an image:"
        # 将提示转换为 GPT-2 模型的输入格式
        input_ids = tokenizer.encode(prompt, return_tensors='pt')
        
        # 截断输入以确保其长度在限制范围内
        max_length = 1024
        if input_ids.size(1) > max_length:
            input_ids = input_ids[:, :max_length]
        
        # 手动生成 attention_mask
        attention_mask = torch.ones_like(input_ids)
        
        # 设置 pad_token_id
        pad_token_id = tokenizer.eos_token_id

        position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long).unsqueeze(0).expand_as(input_ids)
        
        # 生成文本
        with torch.no_grad():  # 禁用梯度计算
            outputs = text_decoder_model.generate(
                input_ids=input_ids,
                max_length=150,  # 增加生成的最大长度
                num_return_sequences=1,
                attention_mask=attention_mask,
                pad_token_id=pad_token_id,
                do_sample=True,  # 启用采样以生成更自然的文本
                top_k=50,  # 设置 top_k 采样
                top_p=0.95,  # 设置 top_p 采样
                temperature=0.7,  # 设置温度以控制生成文本的多样性
                position_ids=position_ids
            )
        
        # 解码生成的文本
        decoded_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
        return decoded_text

    def output():
        decoded_text = decode_text_embedding(new_text_embedding)
        print("Decoded Text:", decoded_text)

# 有没有一种可能其实没有训练，只是去执行这个模型去了
if __name__ == "__main__":
    English_report_generate.output()