import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
import json
import numpy as np
from transformers import GPT2Tokenizer, GPT2LMHeadModel
from torch.nn.utils.rnn import pad_sequence
import matplotlib.pyplot as plt

# 检查是否有可用的 GPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f'Using device: {device}')

# 加载 text_embeddings.json 文件
with open('data/iu_xray/iu_xray/text_embeddings.json', 'r') as f:
    text_embeddings_dict = json.load(f)

# 提取 report_embedding 和 report_text
report_embeddings = []
report_texts = []
for item in text_embeddings_dict['train']:
    report_embeddings.append(item['report_embedding'])
    report_texts.append(item['report_text'])

# 转换为 numpy 数组
report_embeddings = np.array(report_embeddings)

# 加载 GPT-2 tokenizer 和模型
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2LMHeadModel.from_pretrained('gpt2')

# 手动设置 pad_token 并获取其对应的 pad_token_id
tokenizer.pad_token = tokenizer.eos_token
pad_token_id = tokenizer.pad_token_id

# 将模型移动到 GPU
model.to(device)

# 定义数据集类
class TextDataset(Dataset):
    def __init__(self, embeddings, texts):
        self.embeddings = embeddings
        self.texts = texts
    
    def __len__(self):
        return len(self.embeddings)
    
    def __getitem__(self, idx):
        embedding = self.embeddings[idx]
        text = self.texts[idx]
        input_ids = tokenizer.encode(text, return_tensors='pt', max_length=256, truncation=True)
        return embedding, input_ids.squeeze(0)  # 去除 batch 维度

# 定义 collate_fn 函数
def collate_fn(batch):
    embeddings, input_ids = zip(*batch)
    embeddings = torch.tensor(embeddings)
    input_ids = pad_sequence(input_ids, batch_first=True, padding_value=float(pad_token_id))
    return embeddings, input_ids

# 创建数据集
dataset = TextDataset(report_embeddings, report_texts)
train_loader = DataLoader(dataset, batch_size=5, shuffle=True, collate_fn=collate_fn)

# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=1e-4)

loss_history = []

# 训练模型
num_epochs = 20 
for epoch in range(num_epochs):
    model.train()
    total_loss = 0
    for batch_idx, (embeddings, input_ids) in enumerate(train_loader):
        optimizer.zero_grad()
        input_ids = input_ids.to(device)  # 移动到 GPU
        outputs = model(input_ids, labels=input_ids)
        loss = outputs.loss
        loss.backward()
        optimizer.step()
        total_loss += loss.item()
    
    avg_loss = total_loss / len(train_loader)
    loss_history.append(avg_loss)  # 添加这一行
    print(f'Epoch {epoch+1}/{num_epochs}, Loss: {total_loss/len(train_loader):.4f}')

# 保存模型
torch.save(model.state_dict(), 'src/model_pth/text_decoder_model_256.pth')
# 在训练结束后，绘制损失曲线
plt.plot(loss_history, label='Training Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Training Loss Curve')
plt.legend()
plt.show()