import os
import json
import torch
import numpy as np
from torch.utils.data import DataLoader, Dataset
import torch.nn as nn
import torch.optim as optim
from sklearn.model_selection import train_test_split

# 定义文本数据集类
class TextDataset(Dataset):
    def __init__(self, texts):
        self.texts = texts
    
    def __len__(self):
        return len(self.texts)
    
    def __getitem__(self, idx):
        return self.texts[idx]

# 定义文本嵌入模型
class TextEmbeddingModel(nn.Module):
    def __init__(self, input_dim, embedding_dim):
        super(TextEmbeddingModel, self).__init__()
        self.embedding = nn.Embedding(input_dim, embedding_dim)
        self.fc1 = nn.Linear(embedding_dim, 128)
        self.fc2 = nn.Linear(128, 64)
    
    def forward(self, x):
        x = self.embedding(x)
        x = torch.mean(x, dim=1)
        x = torch.relu(self.fc1(x))
        x = self.fc2(x)
        return x

# 加载annotation.json文件
with open('data/iu_xray/iu_xray/annotation.json', 'r') as f:
    annotation = json.load(f)

# 提取报告文本
texts = []
for item in annotation['train']:
    texts.append(item['report'])

# 将文本转换为索引
vocab = set(' '.join(texts).split())
word_to_idx = {word: idx for idx, word in enumerate(vocab)}
texts_idx = [[word_to_idx[word] for word in text.split()] for text in texts]

# 创建数据集
text_dataset = TextDataset(texts_idx)

# 自定义 collate_fn 函数
def collate_fn(batch):
    max_len = max([len(text) for text in batch])
    padded_batch = [text + [0] * (max_len - len(text)) for text in batch]
    return torch.tensor(padded_batch, dtype=torch.long)

text_loader = DataLoader(text_dataset, batch_size=32, shuffle=False, collate_fn=collate_fn)

# 加载训练好的文本嵌入模型
text_model = TextEmbeddingModel(input_dim=len(vocab), embedding_dim=128)
text_model.load_state_dict(torch.load('src/model_pth/text_embedding_model.pth',weights_only=False))
text_model.eval()

# 生成嵌入向量
text_embeddings = []
with torch.no_grad():
    for batch in text_loader:
        batch_embeddings = text_model(batch)
        text_embeddings.append(batch_embeddings.numpy())

text_embeddings = np.vstack(text_embeddings)

# 生成text_embeddings.json文件
text_embeddings_dict = {
    'train': []
}

for i, text in enumerate(texts):
    text_embeddings_dict['train'].append({
        'id': i,
        'report_embedding': text_embeddings[i].tolist(),
        'report_text': text
    })

# 保存text_embeddings.json文件
with open('data/iu_xray/iu_xray/text_embeddings.json', 'w') as f:
    json.dump(text_embeddings_dict, f, indent=4)