# ==============================================================================
# 文件: baselines.py
# 作用: 实现所有用于对比的基准方法。
#      每个基准方法都封装成一个独立的函数，方便在主程序中调用。
# ==============================================================================
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
import numpy as np
from transformers import AutoProcessor, AutoModel
from tqdm import tqdm
import os

# 设置Hugging Face的国内镜像，解决国内访问超时问题
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'

import config
from models import SimpleMLP

def run_fully_supervised(p_embeddings, u_embeddings, u_labels):
    """基准一：理想上限的全监督学习。"""
    print("\n--- 正在运行基准: 全监督学习 (理想上限) ---")
    
    # 合并P和U的数据，并使用U的真实标签
    full_train_embeds = torch.cat([p_embeddings, u_embeddings], dim=0)
    p_labels = torch.ones(p_embeddings.shape[0], 1)
    u_labels_reshaped = u_labels.reshape(-1, 1)
    full_train_labels = torch.cat([p_labels, u_labels_reshaped], dim=0)
    
    # 创建数据集和加载器
    dataset = TensorDataset(full_train_embeds, full_train_labels)
    dataloader = DataLoader(dataset, batch_size=config.INITIAL_TRAIN_BATCH_SIZE, shuffle=True)
    
    # 初始化模型、损失和优化器
    model = SimpleMLP(input_dim=config.EMBEDDING_DIM, hidden_dims=config.HIDDEN_DIMS).to(config.DEVICE)
    criterion = nn.BCEWithLogitsLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=config.INITIAL_TRAIN_LR)
    
    # 训练
    model.train()
    for epoch in range(config.INITIAL_TRAIN_EPOCHS):
        for embeds, labels in dataloader:
            embeds, labels = embeds.to(config.DEVICE), labels.to(config.DEVICE)
            optimizer.zero_grad()
            outputs = model(embeds)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
    
    print("全监督模型训练完成。")
    return model

def run_zero_shot(u_images_dataset, u_labels):
    """基准二：零样本分类。"""
    print("\n--- 正在运行基准: 零样本分类 ---")
    
    # 加载模型和处理器
    processor = AutoProcessor.from_pretrained(config.FEATURE_EXTRACTOR_NAME)
    model = AutoModel.from_pretrained(config.FEATURE_EXTRACTOR_NAME).to(config.DEVICE)
    
    # 创建文本提示
    text_prompts = [f"a photo of a {config.POSITIVE_CLASS_NAME}", f"a photo of a {config.NEGATIVE_CLASS_NAME}"]
    print(f"使用的文本提示: {text_prompts}")
    
    # 准备数据加载器
    dataloader = DataLoader(u_images_dataset, batch_size=128)
    
    all_preds = []
    model.eval()
    with torch.no_grad():
        for batch in tqdm(dataloader, desc="零样本预测"):
            images = batch[0]
            # 处理图像和文本
            inputs = processor(text=text_prompts, images=images, return_tensors="pt", padding=True).to(config.DEVICE)
            # 获取图文相似度分数
            outputs = model(**inputs)
            logits_per_image = outputs.logits_per_image
            # 预测结果是相似度分数更高的那个文本提示的索引
            preds = logits_per_image.argmax(dim=-1)
            # 我们的正例是第一个提示，所以预测为0的应该映射为1，预测为1的映射为0
            # 这里需要根据你的类别和提示顺序来定，假设第一个提示是正类
            # CLIP的输出索引对应text_prompts的索引。如果第一个是正类，那么pred=0就是正类(1)
            mapped_preds = (preds == 0).int()
            all_preds.append(mapped_preds.cpu())
            
    predictions = torch.cat(all_preds).numpy()
    true_labels_np = u_labels.numpy()
    
    # 直接返回预测结果和真实标签，由evaluator计算指标
    return predictions, true_labels_np

def run_naive_sampling(p_embeddings, u_embeddings):
    """基准三：天真负采样。"""
    print("\n--- 正在运行基准: 天真负采样 ---")
    
    # 从U中随机抽取k个样本作为负例
    num_u = u_embeddings.shape[0]
    indices = np.random.choice(num_u, config.NAIVE_K_NEGATIVES, replace=False)
    naive_neg_embeds = u_embeddings[indices]
    
    # 训练模型，逻辑与初始模型训练完全相同
    train_embeds = torch.cat([p_embeddings, naive_neg_embeds], dim=0)
    train_labels = torch.cat([torch.ones(p_embeddings.shape[0], 1), torch.zeros(naive_neg_embeds.shape[0], 1)], dim=0)
    
    dataset = TensorDataset(train_embeds, train_labels)
    dataloader = DataLoader(dataset, batch_size=config.INITIAL_TRAIN_BATCH_SIZE, shuffle=True)
    
    model = SimpleMLP(input_dim=config.EMBEDDING_DIM, hidden_dims=config.HIDDEN_DIMS).to(config.DEVICE)
    criterion = nn.BCEWithLogitsLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=config.INITIAL_TRAIN_LR)
    
    model.train()
    for epoch in range(config.INITIAL_TRAIN_EPOCHS):
        for embeds, labels in dataloader:
            embeds, labels = embeds.to(config.DEVICE), labels.to(config.DEVICE)
            optimizer.zero_grad()
            outputs = model(embeds)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            
    print("天真采样模型训练完成。")
    return model