import os
import json
import torch
import numpy as np
from PIL import Image
from torchvision import transforms
from torch.utils.data import DataLoader, Dataset
import torch.nn as nn
import torch.optim as optim
from sklearn.model_selection import train_test_split
from dml import TripletModel  # 假设TripletModel定义在一个单独的文件中
from transformers import BertModel, BertTokenizer
from tqdm import tqdm

# 设置设备
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# 加载IU X-ray数据集
def load_iu_xray_data(data_dir, max_samples=1000):
    images = []
    labels = []
    for root, dirs, files in os.walk(data_dir):
        for file in files:
            if file.endswith(".png"):
                img_path = os.path.join(root, file)
                img = Image.open(img_path).convert('L')  # 转换为灰度图像
                img = img.resize((32, 32))  # 调整图像大小
                img = np.array(img).astype('float32') / 255  # 归一化
                images.append(img)
                labels.append(os.path.basename(root))  # 使用文件夹名称作为标签
                if len(images) >= max_samples:
                    break
        if len(images) >= max_samples:
            break
    images = np.array(images)
    labels = np.array(labels)
    return images, labels

# 定义数据集类
class ImageDataset(Dataset):
    def __init__(self, images):
        self.images = images
        self.transform = transforms.Compose([
            transforms.ToTensor()
        ])
    
    def __len__(self):
        return len(self.images)
    
    def __getitem__(self, idx):
        img = self.images[idx]
        img = self.transform(img)
        return img

# 定义文本数据集类
class TextDataset(Dataset):
    def __init__(self, texts):
        self.texts = texts
        self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
    
    def __len__(self):
        return len(self.texts)
    
    def __getitem__(self, idx):
        text = self.texts[idx]
        encoded_text = self.tokenizer(text, padding='max_length', truncation=True, max_length=128, return_tensors='pt')
        return encoded_text['input_ids'].squeeze(0), encoded_text['attention_mask'].squeeze(0)

# 定义文本嵌入模型
class TextEmbeddingModel(nn.Module):
    def __init__(self):
        super(TextEmbeddingModel, self).__init__()
        self.bert = BertModel.from_pretrained('bert-base-uncased')
        self.fc = nn.Linear(self.bert.config.hidden_size, 64)
    
    def forward(self, input_ids, attention_mask):
        outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)
        pooled_output = outputs.pooler_output
        output = self.fc(pooled_output)
        return output

# 定义三元组损失函数
def triplet_loss(anchor, positive, negative, alpha=0.2):
    pos_dist = torch.sum(torch.square(anchor - positive), dim=1)
    neg_dist = torch.sum(torch.square(anchor - negative), dim=1)
    basic_loss = pos_dist - neg_dist + alpha
    loss = torch.mean(torch.clamp(basic_loss, min=0.0))
    return loss

# 加载数据
data_dir = 'data/iu_xray/iu_xray'  # 替换为IU X-ray数据集的路径
X_train, y_train = load_iu_xray_data(data_dir, max_samples=1000)

# 加载annotation.json文件
with open('data/iu_xray/iu_xray/annotation.json', 'r') as f:
    annotation = json.load(f)

# 提取报告文本
texts = []
for item in annotation['train']:
    texts.append(item['report'])

# 创建数据集
image_dataset = ImageDataset(X_train)
text_dataset = TextDataset(texts)

# 创建数据加载器
image_loader = DataLoader(image_dataset, batch_size=32, shuffle=True)
text_loader = DataLoader(text_dataset, batch_size=32, shuffle=True)

# 初始化模型、优化器
image_model = TripletModel().to(device)
text_model = TextEmbeddingModel().to(device)
image_optimizer = optim.Adam(image_model.parameters(), lr=0.001)
text_optimizer = optim.Adam(text_model.parameters(), lr=0.001)

# 训练文本嵌入模型
num_epochs = 3
batch_size = 64

from torch.cuda.amp import autocast, GradScaler
scaler = GradScaler()

# for epoch in range(num_epochs):
#     text_model.train()
#     running_loss = 0.0
#     progress_bar = tqdm(text_loader, desc=f'Text Epoch {epoch+1}/{num_epochs}', leave=False)
#     for batch in progress_bar:
#         input_ids, attention_mask = batch
#         input_ids = input_ids.to(device)
#         attention_mask = attention_mask.to(device)
        
#         text_optimizer.zero_grad()
#         anchor_embedding = text_model(input_ids, attention_mask)
#         positive_embedding = text_model(input_ids, attention_mask)
#         negative_embedding = text_model(input_ids, attention_mask)
#         loss = triplet_loss(anchor_embedding, positive_embedding, negative_embedding)
#         loss.backward()
#         text_optimizer.step()
        
#         running_loss += loss.item()
#         progress_bar.set_postfix({'Loss': running_loss / (len(progress_bar) + 1)})
    
#     print(f'Text Epoch {epoch+1}/{num_epochs}, Loss: {running_loss/len(text_loader)}')

for epoch in range(num_epochs):
    text_model.train()
    running_loss = 0.0
    progress_bar = tqdm(text_loader, desc=f'Text Epoch {epoch+1}/{num_epochs}', leave=False)
    for batch in progress_bar:
        input_ids, attention_mask = batch
        input_ids = input_ids.to(device)
        attention_mask = attention_mask.to(device)
        
        text_optimizer.zero_grad()
        
        with autocast():
            anchor_embedding = text_model(input_ids, attention_mask)
            positive_embedding = text_model(input_ids, attention_mask)
            negative_embedding = text_model(input_ids, attention_mask)
            loss = triplet_loss(anchor_embedding, positive_embedding, negative_embedding)
        
        scaler.scale(loss).backward()
        scaler.step(text_optimizer)
        scaler.update()
        
        running_loss += loss.item()
        progress_bar.set_postfix({'Loss': running_loss / (len(progress_bar) + 1)})
    
    print(f'Text Epoch {epoch+1}/{num_epochs}, Loss: {running_loss/len(text_loader)}')


# 保存文本嵌入模型
torch.save(text_model.state_dict(), 'text_embedding_model.pth')

# 加载文本嵌入模型
text_model = TextEmbeddingModel().to(device)
text_model.load_state_dict(torch.load('text_embedding_model.pth'))
text_model.eval()

# 生成嵌入向量
image_model.eval()
text_model.eval()
image_embeddings = []
text_embeddings = []
with torch.no_grad():
    for batch in tqdm(image_loader, desc='Generating Image Embeddings', leave=False):
        batch = batch.to(device)
        batch_embeddings = image_model(batch)
        image_embeddings.append(batch_embeddings.cpu().numpy())
    for batch in tqdm(text_loader, desc='Generating Text Embeddings', leave=False):
        input_ids, attention_mask = batch
        input_ids = input_ids.to(device)
        attention_mask = attention_mask.to(device)
        batch_embeddings = text_model(input_ids, attention_mask)
        text_embeddings.append(batch_embeddings.cpu().numpy())

image_embeddings = np.vstack(image_embeddings)
text_embeddings = np.vstack(text_embeddings)

# 生成embeddings.json文件
embeddings_dict = {
    'train': []
}

for i, (img, text) in enumerate(zip(image_embeddings, text_embeddings)):
    embeddings_dict['train'].append({
        'id': i,
        'image_embedding': img.tolist(),
        'report_embedding': text.tolist(),
        'image_path': f'data/iu_xray/iu_xray/{y_train[i]}/{i}.png'
    })

# 保存embeddings.json文件
with open('data/iu_xray/iu_xray/embeddings.json', 'w') as f:
    json.dump(embeddings_dict, f, indent=4)
    print("finish")