import torch
import numpy as np
import cv2
from cave.CVAE import CVAE  # 假设你的模型定义在 CVAE.py 文件中
from dml_torch.dml import TripletModel  # 假设你的模型定义在 TripletModel.py 文件中
from transformers import GPT2Tokenizer, GPT2LMHeadModel
import torch.nn.functional as F
from reportlab.lib.pagesizes import letter
from reportlab.pdfgen import canvas
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.platypus import Paragraph, SimpleDocTemplate, Spacer, Image
from PIL import Image as PILImage

# 加载模型
embedding_dim = 128  # 假设 embedding_dim 是 128
latent_dim = 32  # 假设 latent_dim 是 32

cvae_model = CVAE(input_dim=embedding_dim, latent_dim=latent_dim)

# 手动加载不匹配的参数
state_dict = torch.load('src/model_pth/cvae_model.pth', weights_only=False, map_location=torch.device('cpu'))
model_dict = cvae_model.state_dict()

# 过滤掉不匹配的参数
filtered_state_dict = {k: v for k, v in state_dict.items() if k in model_dict and model_dict[k].shape == v.shape}

# 更新模型参数
model_dict.update(filtered_state_dict)
cvae_model.load_state_dict(model_dict)
cvae_model.eval()  # 设置模型为评估模式

triplet_model = TripletModel()
triplet_model.load_state_dict(torch.load('src/model_pth/triplet_model_iu_xray.pth', weights_only=False, map_location=torch.device('cpu')))
triplet_model.eval()  # 设置模型为评估模式

# 加载 GPT-2 模型和分词器
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
gpt2_model = GPT2LMHeadModel.from_pretrained('gpt2')
gpt2_model.eval()  # 设置模型为评估模式

# 加载图像
image_path = 'data/iu_xray/iu_xray/images/CXR2785_IM-1220/0.png'  # 替换为你的图像路径
image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
image = cv2.resize(image, (32, 32))  # 调整图像大小
image = image.astype('float32') / 255  # 归一化
image = np.expand_dims(image, axis=-1)  # 增加通道维度
image = np.expand_dims(image, axis=0)  # 增加 batch 维度

# 确保图像具有正确的通道数
image = image[:, :, :, 0]  # 将通道数调整为 1
image_tensor = torch.tensor(image, dtype=torch.float32)

# 生成图像嵌入
with torch.no_grad():  # 禁用梯度计算
    image_embedding = triplet_model(image_tensor)

# 定义一个函数来自动调整张量的形状
def adjust_tensor_shape(tensor, target_shape):
    current_shape = tensor.shape
    if current_shape == target_shape:
        return tensor
    elif current_shape[1] > target_shape[1]:
        return tensor[:, :target_shape[1]]
    else:
        return F.pad(tensor, (0, target_shape[1] - current_shape[1]), mode='constant', value=0)

# 生成文本嵌入
with torch.no_grad():  # 禁用梯度计算
    # 确保 image_embedding 的形状正确
    target_shape = (1, embedding_dim)
    adjusted_image_embedding = adjust_tensor_shape(image_embedding, target_shape)
    # 使用整个 CVAE 模型生成文本嵌入
    new_text_embedding = cvae_model(adjusted_image_embedding, adjusted_image_embedding)[0]

# 加载文本解码模型
text_decoder_model = GPT2LMHeadModel.from_pretrained('gpt2')
text_decoder_model.load_state_dict(torch.load('src/model_pth/text_decoder_model.pth', weights_only=False, map_location=torch.device('cpu')))
text_decoder_model.eval()  # 设置模型为评估模式

# 解码文本嵌入
def decode_text_embedding(text_embedding):
    prompt = "The following is a medical report based on an image:"
    # 将提示转换为 GPT-2 模型的输入格式
    input_ids = tokenizer.encode(prompt, return_tensors='pt')
    
    # 截断输入以确保其长度在限制范围内
    max_length = 1024
    if input_ids.size(1) > max_length:
        input_ids = input_ids[:, :max_length]
    
    # 手动生成 attention_mask
    attention_mask = torch.ones_like(input_ids)
    
    # 设置 pad_token_id
    pad_token_id = tokenizer.eos_token_id

    position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long).unsqueeze(0).expand_as(input_ids)
    
    # 生成文本
    with torch.no_grad():  # 禁用梯度计算
        outputs = text_decoder_model.generate(
            input_ids=input_ids,
            max_length=150,  # 增加生成的最大长度
            num_return_sequences=1,
            attention_mask=attention_mask,
            pad_token_id=pad_token_id,
            do_sample=True,  # 启用采样以生成更自然的文本
            top_k=50,  # 设置 top_k 采样
            top_p=0.95,  # 设置 top_p 采样
            temperature=0.7,  # 设置温度以控制生成文本的多样性
            position_ids=position_ids
        )
    
    # 解码生成的文本
    decoded_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return decoded_text

decoded_text = decode_text_embedding(new_text_embedding)
print("Decoded Text:", decoded_text)

# 生成PDF报告
def generate_pdf_report(image_path, decoded_text, output_pdf_path):
    # 创建PDF文档
    doc = SimpleDocTemplate(output_pdf_path, pagesize=letter)
    story = []

    # 添加标题
    styles = getSampleStyleSheet()
    title = Paragraph("Medical Report", styles['Title'])
    story.append(title)
    story.append(Spacer(1, 12))

    # 添加图像
    img = PILImage.open(image_path)
    img_width, img_height = img.size
    aspect_ratio = img_height / img_width
    new_width = 400
    new_height = int(new_width * aspect_ratio)
    img = img.resize((new_width, new_height), PILImage.Resampling.LANCZOS)
    img_path = "temp_image.png"
    img.save(img_path)
    story.append(Image(img_path, width=new_width, height=new_height))
    story.append(Spacer(1, 12))

    # 添加文本报告
    text = Paragraph(decoded_text, styles['Normal'])
    story.append(text)

    # 构建PDF文档
    doc.build(story)

# 生成PDF报告
output_pdf_path = "src/generate_pdf/medical_report.pdf"
generate_pdf_report(image_path, decoded_text, output_pdf_path)
print(f"PDF report generated at {output_pdf_path}")