import os
import json
import torch
import numpy as np
from torch.utils.data import DataLoader, Dataset
import torch.nn as nn
import torch.optim as optim
from sklearn.model_selection import train_test_split

# 定义文本数据集类
class TextDataset(Dataset):
    def __init__(self, texts, max_length):
        self.texts = texts
        self.max_length = max_length
    
    def __len__(self):
        return len(self.texts)
    
    def __getitem__(self, idx):
        text = self.texts[idx]
        # 填充文本，使其长度一致
        padded_text = text + [0] * (self.max_length - len(text))
        return torch.tensor(padded_text, dtype=torch.long)

# 定义文本嵌入模型
class TextEmbeddingModel(nn.Module):
    def __init__(self, input_dim, embedding_dim):
        super(TextEmbeddingModel, self).__init__()
        self.embedding = nn.Embedding(input_dim, embedding_dim)
        self.fc1 = nn.Linear(embedding_dim, 128)
        self.fc2 = nn.Linear(128, 64)
    
    def forward(self, x):
        x = self.embedding(x)
        x = torch.mean(x, dim=1)
        x = torch.relu(self.fc1(x))
        x = self.fc2(x)
        return x

# 定义三元组损失函数
def triplet_loss(anchor, positive, negative, alpha=0.2):
    pos_dist = torch.sum(torch.square(anchor - positive), dim=1)
    neg_dist = torch.sum(torch.square(anchor - negative), dim=1)
    basic_loss = pos_dist - neg_dist + alpha
    loss = torch.mean(torch.clamp(basic_loss, min=0.0))
    return loss

# 加载annotation.json文件
with open('data/iu_xray/iu_xray/annotation.json', 'r') as f:
    annotation = json.load(f)

# 提取报告文本
texts = []
for item in annotation['train']:
    texts.append(item['report'])

# 将文本转换为索引
vocab = set(' '.join(texts).split())
word_to_idx = {word: idx for idx, word in enumerate(vocab)}
texts_idx = [[word_to_idx[word] for word in text.split()] for text in texts]

# 计算最大文本长度
max_length = max(len(text) for text in texts_idx)

# 创建数据集
text_dataset = TextDataset(texts_idx, max_length)
text_loader = DataLoader(text_dataset, batch_size=32, shuffle=True)

# 初始化模型、优化器
text_model = TextEmbeddingModel(input_dim=len(vocab), embedding_dim=128)
text_optimizer = optim.Adam(text_model.parameters(), lr=0.001)

# 训练文本嵌入模型
num_epochs = 10
for epoch in range(num_epochs):
    text_model.train()
    running_loss = 0.0
    for batch in text_loader:
        # print(batch)
        # print(batch.size())
        # 不知怎么着就debug成功了
        # 将每个元素堆叠成为张量
        anchor = torch.stack([item[0] for item in batch])
        positive = torch.stack([item[1] for item in batch])
        negative = torch.stack([item[2] for item in batch])
        # banchor, positive, negative = batch
        anchor = anchor.unsqueeze(1)  # 增加通道维度
        positive = positive.unsqueeze(1)  # 增加通道维度
        negative = negative.unsqueeze(1)  # 增加通道维度
        
        # 确保 batch 大小一致
        # batch_size = anchor.size(0)
        # if positive.size(0) != batch_size or negative.size(0) != batch_size:
        #     continue
        text_optimizer.zero_grad()
        anchor_embedding = text_model(anchor)
        positive_embedding = text_model(positive)
        negative_embedding = text_model(negative)
        loss = triplet_loss(anchor_embedding, positive_embedding, negative_embedding)
        loss.backward()
        text_optimizer.step()
        running_loss += loss.item()
    print(f'Text Epoch {epoch+1}/{num_epochs}, Loss: {running_loss/len(text_loader)}')

# 保存训练好的文本嵌入模型
torch.save(text_model.state_dict(), 'src/model_pth/text_embedding_model.pth')