import torch
import pandas as pd
import numpy as np
import torch
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, accuracy_score, f1_score
from transformers import (
    BertTokenizer, BertPreTrainedModel, BertModel,
    Trainer, TrainingArguments
)
from transformers import get_linear_schedule_with_warmup
from datasets import Dataset
import os
from torch import nn, optim
from torch.optim import AdamW
import random
from typing import Optional, Tuple
from tqdm import tqdm
import matplotlib.pyplot as plt

# 设置随机种子
def set_seed(seed=42):
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    random.seed(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

def load_dual_relation_data(file_path, neg_samples_ratio=1.0):
    df = pd.read_csv(file_path)
    
    required_cols = ["hobby_label", "attraction_text", "attraction_label"]
    missing_cols = [col for col in required_cols if col not in df.columns]
    if missing_cols:
        raise ValueError(f"数据集缺少必要列：{missing_cols}，请检查CSV格式")
    
    # 1. 定义完整的爱好类别映射
    category_mapping = {
        # 1. 历史文化类
        '历史研究': '历史文化类',
        '古代文物鉴赏': '历史文化类',
        '历史文化书籍阅读': '历史文化类',
        '古建筑绘画': '历史文化类',
        
        # 2. 自然户外类
        '自然风光摄影': '自然户外类',
        '徒步旅行': '自然户外类',
        '登山运动': '自然户外类',
        '滑雪体验': '自然户外类',
        '自然写生': '自然户外类',
        
        # 3. 亲子互动类
        '亲子手工制作': '亲子互动类',
        '亲子户外探险': '亲子互动类',
        '亲子摄影': '亲子互动类',
        
        # 4. 游乐体验类
        '去游乐园': '游乐体验类',
        '去游乐场': '游乐体验类',
        
        # 5. 海洋海滨类
        '看海': '海洋海滨类',
        '海边玩': '海洋海滨类',
        
        # 6. 动物科普类
        '看动物': '动物科普类',
        '动物科普': '动物科普类',
        '逛动物园': '动物科普类',
        
        # 7. 昆虫研究类
        '昆虫研究': '昆虫研究类',
        '观察昆虫': '昆虫研究类',
        '养小蚂蚁': '昆虫研究类',
        '养昆虫': '昆虫研究类',
        
        # 8. 美食相关类
        '特色美食品尝': '美食相关类',
        '美食烹饪学习': '美食相关类',
        '美食摄影': '美食相关类',
        
        # 9. 绘画创作类
        '风景绘画': '绘画创作类',
        '人物绘画': '绘画创作类',
        '写生创作': '绘画创作类',
        
        # 10. 音乐相关类
        '乐器演奏': '音乐相关类',
        '音乐创作': '音乐相关类',
        '音乐欣赏': '音乐相关类',
        
        # 11. 读书阅读类
        '文学书籍阅读': '读书阅读类',
        '历史书籍阅读': '读书阅读类',
        '科普书籍阅读': '读书阅读类',
        
        # 12. 购物消费类
        '特色商品购物': '购物消费类',
        '古董购物': '购物消费类',
        '时尚购物': '购物消费类',
        
        # 13. 宠物饲养类
        '宠物饲养': '宠物饲养类',
        '宠物摄影': '宠物饲养类',
        '宠物知识学习': '宠物饲养类',
        '养宠物': '宠物饲养类',
        
        # 14. 古镇建筑类
        '古建筑研究': '古镇建筑类',
        '逛历史文化街区': '古镇建筑类',
        '游玩古镇': '古镇建筑类',
        
        # 15. 景色摄影类
        '风景摄影': '景色摄影类',
        '景色拍摄': '景色摄影类',
        '旅行摄影': '景色摄影类'
    }
    
    # 2. 为每条数据添加爱好类别标签
    def get_hobby_category(hobby_label):
        # 提取纯粹的爱好文本（去除"爱好是"前缀）
        pure_hobby = hobby_label.replace("爱好是", "")
        return category_mapping.get(pure_hobby, "其他类")
    
    df["hobby_category"] = df["hobby_label"].apply(get_hobby_category)
    
    # 3. 准备基础数据
    all_types = df["attraction_label"].unique().tolist()
    num_samples = len(df)
    num_neg_samples = int(num_samples * neg_samples_ratio)

    # 4. 生成关系1样本（爱好-景点）
    # 4.1 正样本
    hobby_attraction_pos = pd.DataFrame({
        "text1": df["hobby_label"],
        "text2": df["attraction_text"],
        "rel1_score": 1.0,
        "hobby_category": df["hobby_category"]  # 保留类别用于负样本生成
    })
    
    # 4.2 负样本（确保与正样本不同类别）
    neg_hobbies = []
    # 随机采样与负样本数量相同的正例数据作为参考
    reference_pos_samples = hobby_attraction_pos.sample(n=num_neg_samples, replace=True)
    
    for _, ref_row in tqdm(reference_pos_samples.iterrows(), desc="生成关系1负样本", total=num_neg_samples):
        # 当前参考正例的类别（需要排除）
        excluded_category = ref_row["hobby_category"]
        # 从不同类别的爱好中随机选择
        candidate_hobbies = df[df["hobby_category"] != excluded_category]["hobby_label"].tolist()
        # 确保有足够的候选样本
        if not candidate_hobbies:
            raise ValueError(f"类别 {excluded_category} 之外没有可用的爱好样本，请检查数据多样性")
        # 随机选择一个不同类别的爱好
        neg_hobby = random.choice(candidate_hobbies)
        neg_hobbies.append(neg_hobby)
    
    # 负样本的景点文本随机选择
    neg_attractions = df["attraction_text"].sample(n=num_neg_samples, replace=True).tolist()
    hobby_attraction_neg = pd.DataFrame({
        "text1": neg_hobbies,
        "text2": neg_attractions,
        "rel1_score": 0.0
    })
    
    # 合并关系1的正负样本
    rel1_samples = pd.concat([hobby_attraction_pos.drop("hobby_category", axis=1), 
                             hobby_attraction_neg], ignore_index=True)

    # 5. 生成关系2样本（景点-类型）
    # 5.1 正样本
    attraction_type_pos = pd.DataFrame({
        "text2": df["attraction_text"],
        "text3": df["attraction_label"],
        "rel2_score": 1.0
    })
    
    # 5.2 负样本（确保与正样本不同类型）
    neg_types = []
    # 随机采样参考正例
    ref_type_samples = df.sample(n=num_neg_samples, replace=True)
    
    for _, ref_row in tqdm(ref_type_samples.iterrows(), desc="生成关系2负样本", total=num_neg_samples):
        # 当前参考正例的类型（需要排除）
        excluded_type = ref_row["attraction_label"]
        # 从不同类型中选择
        other_types = [t for t in all_types if t != excluded_type]
        if not other_types:
            raise ValueError(f"类型 {excluded_type} 之外没有可用的类型样本，请检查数据多样性")
        neg_type = random.choice(other_types)
        neg_types.append(neg_type)
    
    neg_attractions2 = df["attraction_text"].sample(n=num_neg_samples, replace=True).tolist()
    attraction_type_neg = pd.DataFrame({
        "text2": neg_attractions2,
        "text3": neg_types,
        "rel2_score": 0.0
    })
    
    # 合并关系2的正负样本
    rel2_samples = pd.concat([attraction_type_pos, attraction_type_neg], ignore_index=True)

    # 6. 合并样本并分割训练集和验证集
    min_len = min(len(rel1_samples), len(rel2_samples))
    rel1_samples = rel1_samples.sample(min_len, random_state=42).reset_index(drop=True)
    rel2_samples = rel2_samples.sample(min_len, random_state=42).reset_index(drop=True)

    combined_df = pd.DataFrame({
        "hobby": rel1_samples["text1"],
        "attraction": rel1_samples["text2"],
        "type": rel2_samples["text3"],
        "rel1_score": rel1_samples["rel1_score"],
        "rel2_score": rel2_samples["rel2_score"]
    })

    train_df, val_df = train_test_split(combined_df, test_size=0.2, random_state=42)

    # 7. 输出统计信息（包括类别分布检查）
    print(f"数据生成完成：总样本{len(combined_df)}条（含正负样本）")
    print(f"关系1（爱好-景点）正样本比例：{rel1_samples['rel1_score'].mean():.2%}")
    print(f"关系2（景点-类型）正样本比例：{rel2_samples['rel2_score'].mean():.2%}")
    
    # 检查负样本类别是否正确（随机抽查10个负样本）
    sample_neg_hobbies = hobby_attraction_neg["text1"].sample(min(10, num_neg_samples)).tolist()
    sample_neg_categories = [category_mapping.get(h.replace("爱好是", ""), "其他类") 
                            for h in sample_neg_hobbies]
    print(f"\n随机抽查10个负样本的类别：{sample_neg_categories}")

    return Dataset.from_pandas(train_df), Dataset.from_pandas(val_df)


# -------------------------- 2. 数据预处理 --------------------------
def preprocess_dual_relation(examples, tokenizer, max_length=128):
    # 分别处理三个文本：爱好、景点、类型
    hobby_encoding = tokenizer(examples["hobby"], truncation=True, padding="max_length", max_length=max_length)
    attraction_encoding = tokenizer(examples["attraction"], truncation=True, padding="max_length", max_length=max_length)
    type_encoding = tokenizer(examples["type"], truncation=True, padding="max_length", max_length=max_length)

    result = {
        # 爱好的编码
        "hobby_input_ids": hobby_encoding["input_ids"],
        "hobby_attention_mask": hobby_encoding["attention_mask"],
        # 景点的编码
        "attraction_input_ids": attraction_encoding["input_ids"],
        "attraction_attention_mask": attraction_encoding["attention_mask"],
        # 类型的编码
        "type_input_ids": type_encoding["input_ids"],
        "type_attention_mask": type_encoding["attention_mask"],
        # 标签
        "rel1_labels": examples["rel1_score"],
        "rel2_labels": examples["rel2_score"]
    }
    return result


# -------------------------- 3. 双关系联合学习模型 --------------------------
class BertForDualRelation(BertPreTrainedModel):
    def __init__(self, config):
        super().__init__(config)
        self.bert = BertModel(config)  # 共享的BERT编码器
        
        # 关系1（爱好-景点）的预测头，增加模型复杂度并使用LayerNorm
        self.rel1_head = nn.Sequential(
            nn.Linear(config.hidden_size * 2, config.hidden_size),
            nn.LayerNorm(config.hidden_size),
            nn.GELU(),  # 替换Tanh为GELU，提升非线性表达能力
            nn.Dropout(0.4),  # 降低Dropout比例，保留更多特征
            nn.Linear(config.hidden_size, config.hidden_size),  # 增加一层
            nn.LayerNorm(config.hidden_size),
            nn.GELU(),
            nn.Dropout(0.4),
            nn.Linear(config.hidden_size, 1),
            nn.Sigmoid()
        )

        # 关系2（景点-类型）的预测头，与关系1相同结构
        self.rel2_head = nn.Sequential(
            nn.Linear(config.hidden_size * 2, config.hidden_size),
            nn.LayerNorm(config.hidden_size),
            nn.Tanh(),
            nn.Dropout(0.5),
            nn.Linear(config.hidden_size, config.hidden_size // 2),
            nn.LayerNorm(config.hidden_size // 2),
            nn.Tanh(),
            nn.Dropout(0.5),
            nn.Linear(config.hidden_size // 2, 1),
            nn.Sigmoid()
        )

        # 初始化权重
        self.init_weights()

    def forward(
            self,
            hobby_input_ids: Optional[torch.Tensor] = None,
            hobby_attention_mask: Optional[torch.Tensor] = None,
            attraction_input_ids: Optional[torch.Tensor] = None,
            attraction_attention_mask: Optional[torch.Tensor] = None,
            type_input_ids: Optional[torch.Tensor] = None,
            type_attention_mask: Optional[torch.Tensor] = None,
            rel1_labels: Optional[torch.Tensor] = None,
            rel2_labels: Optional[torch.Tensor] = None,
    ) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]:
        # 初始化结果
        total_loss = None
        rel1_score = None
        rel2_score = None
        
        # 计算各个文本的嵌入（使用共享的BERT编码器）
        hobby_embedding = None
        attraction_embedding = None
        type_embedding = None
        
        if hobby_input_ids is not None:
            hobby_outputs = self.bert(hobby_input_ids, attention_mask=hobby_attention_mask)
            hobby_embedding = hobby_outputs[1]  # CLS token
        
        if attraction_input_ids is not None:
            attraction_outputs = self.bert(attraction_input_ids, attention_mask=attraction_attention_mask)
            attraction_embedding = attraction_outputs[1]  # CLS token
            
        if type_input_ids is not None:
            type_outputs = self.bert(type_input_ids, attention_mask=type_attention_mask)
            type_embedding = type_outputs[1]  # CLS token

        # 计算关系1分数（爱好-景点）
        if hobby_embedding is not None and attraction_embedding is not None:
            # 拼接两个嵌入向量
            rel1_input = torch.cat([hobby_embedding, attraction_embedding], dim=1)
            rel1_score = self.rel1_head(rel1_input).squeeze(-1)

        # 计算关系2分数（景点-类型）
        if attraction_embedding is not None and type_embedding is not None:
            # 拼接两个嵌入向量
            rel2_input = torch.cat([attraction_embedding, type_embedding], dim=1)
            rel2_score = self.rel2_head(rel2_input).squeeze(-1)

        # 计算损失，加入权重平衡两个任务的重要性
        if rel1_labels is not None or rel2_labels is not None:
            loss_fct = nn.MSELoss()
            current_loss = 0.0
            loss_count = 0

            if rel1_labels is not None and rel1_score is not None:
                rel1_loss = loss_fct(rel1_score, rel1_labels.float())
                current_loss += rel1_loss * 0.6  # 给关系1更高的权重
                loss_count += 0.7

            if rel2_labels is not None and rel2_score is not None:
                rel2_loss = loss_fct(rel2_score, rel2_labels.float())
                current_loss += rel2_loss * 0.4  # 关系2权重稍低
                loss_count += 0.3

            if loss_count > 0:
                total_loss = current_loss / loss_count

        return (total_loss, rel1_score, rel2_score)


# -------------------------- 4. 评估指标 --------------------------
def compute_dual_metrics(eval_pred):
    # 预测部分包含两个元素(rel1_preds, rel2_preds)
    (rel1_preds, rel2_preds), (rel1_labels, rel2_labels) = eval_pred

    # 计算关系1的MSE和RMSE
    rel1_mse = mean_squared_error(rel1_labels, rel1_preds)
    rel1_rmse = np.sqrt(rel1_mse)

    # 计算关系2的MSE和RMSE
    rel2_mse = mean_squared_error(rel2_labels, rel2_preds)
    rel2_rmse = np.sqrt(rel2_mse)
    
    # 计算分类指标（阈值设为0.5）
    rel1_preds_binary = (rel1_preds > 0.5).astype(int)
    rel2_preds_binary = (rel2_preds > 0.5).astype(int)
    
    rel1_acc = accuracy_score(rel1_labels, rel1_preds_binary)
    rel2_acc = accuracy_score(rel2_labels, rel2_preds_binary)
    
    rel1_f1 = f1_score(rel1_labels, rel1_preds_binary, average='weighted')
    rel2_f1 = f1_score(rel2_labels, rel2_preds_binary, average='weighted')

    return {
        "rel1_mse": rel1_mse, "rel1_rmse": rel1_rmse,
        "rel2_mse": rel2_mse, "rel2_rmse": rel2_rmse,
        "rel1_acc": rel1_acc, "rel1_f1": rel1_f1,
        "rel2_acc": rel2_acc, "rel2_f1": rel2_f1
    }

# -------------------------- 5. 自定义数据整理器 --------------------------
class DualRelationDataCollator:
    def __init__(self, tokenizer):
        self.tokenizer = tokenizer

    def __call__(self, features):
        # 提取各个文本的输入和标签
        hobby_input_ids = [f["hobby_input_ids"] for f in features]
        hobby_attention_mask = [f["hobby_attention_mask"] for f in features]
        
        attraction_input_ids = [f["attraction_input_ids"] for f in features]
        attraction_attention_mask = [f["attraction_attention_mask"] for f in features]
        
        type_input_ids = [f["type_input_ids"] for f in features]
        type_attention_mask = [f["type_attention_mask"] for f in features]
        
        rel1_labels = [f["rel1_labels"] for f in features]
        rel2_labels = [f["rel2_labels"] for f in features]

        # 分别对三个文本的输入进行padding
        hobby_padded = self.tokenizer.pad(
            {"input_ids": hobby_input_ids, "attention_mask": hobby_attention_mask},
            return_tensors="pt"
        )
        
        attraction_padded = self.tokenizer.pad(
            {"input_ids": attraction_input_ids, "attention_mask": attraction_attention_mask},
            return_tensors="pt"
        )
        
        type_padded = self.tokenizer.pad(
            {"input_ids": type_input_ids, "attention_mask": type_attention_mask},
            return_tensors="pt"
        )

        # 组合成batch
        batch = {
            "hobby_input_ids": hobby_padded["input_ids"],
            "hobby_attention_mask": hobby_padded["attention_mask"],
            "attraction_input_ids": attraction_padded["input_ids"],
            "attraction_attention_mask": attraction_padded["attention_mask"],
            "type_input_ids": type_padded["input_ids"],
            "type_attention_mask": type_padded["attention_mask"],
            "rel1_labels": torch.tensor(rel1_labels, dtype=torch.float32),
            "rel2_labels": torch.tensor(rel2_labels, dtype=torch.float32)
        }

        return batch


# -------------------------- 6. 训练函数 --------------------------
def train_dual_relation_model(
        model_name="bert-base-chinese",
        data_path="./user_profile_attraction_preference.csv",
        output_dir="./optimized_dual_relation_bert",
        epochs=20,
        batch_size=16,
        learning_rate=2e-5,
        max_length=128,
        neg_samples_ratio=1.0,
        weight_decay=0.005,
        warmup_ratio=0.1
):
    # 设置随机种子
    set_seed()
    
    # 确保输出目录存在
    os.makedirs(output_dir, exist_ok=True)
    
    # 加载训练/验证数据，增加负样本比例到1.0，使正负样本平衡
    print("开始加载和处理数据...")
    train_dataset, val_dataset = load_dual_relation_data(
        data_path, neg_samples_ratio=neg_samples_ratio
    )

    # 加载分词器和模型
    print(f"加载预训练模型：{model_name}")
    tokenizer = BertTokenizer.from_pretrained(model_name)
    model = BertForDualRelation.from_pretrained(model_name)
    
    # 使用混合精度训练（如果CUDA可用）
    if torch.cuda.is_available():
        scaler = torch.cuda.amp.GradScaler()
    else:
        scaler = None

    # 数据预处理
    print("开始处理双关系文本对...")
    tokenized_train = train_dataset.map(
        lambda x: preprocess_dual_relation(x, tokenizer, max_length),
        batched=True,
        remove_columns=train_dataset.column_names
    )
    tokenized_val = val_dataset.map(
        lambda x: preprocess_dual_relation(x, tokenizer, max_length),
        batched=True,
        remove_columns=val_dataset.column_names
    )

    # 格式化数据集为PyTorch张量
    tokenized_train.set_format(
        "torch",
        columns=["hobby_input_ids", "hobby_attention_mask",
                 "attraction_input_ids", "attraction_attention_mask",
                 "type_input_ids", "type_attention_mask",
                 "rel1_labels", "rel2_labels"]
    )
    tokenized_val.set_format(
        "torch",
        columns=["hobby_input_ids", "hobby_attention_mask",
                 "attraction_input_ids", "attraction_attention_mask",
                 "type_input_ids", "type_attention_mask",
                 "rel1_labels", "rel2_labels"]
    )

    # 计算训练步数
    steps_per_epoch = len(tokenized_train) // batch_size
    total_steps = steps_per_epoch * epochs
    warmup_steps = int(total_steps * warmup_ratio)

    # 训练参数配置，增加早停机制和更频繁的评估
    training_args = TrainingArguments(
        output_dir=output_dir,
        overwrite_output_dir=True,
        num_train_epochs=epochs,
        per_device_train_batch_size=batch_size,
        per_device_eval_batch_size=batch_size * 2,  # 验证时使用更大的batch size
        learning_rate=learning_rate,
        weight_decay=weight_decay,
        logging_dir="./logs",
        logging_steps=50,  # 更频繁的日志记录
        do_eval=True,
        eval_steps=steps_per_epoch // 2,  # 每个epoch评估两次
        save_steps=steps_per_epoch // 2,
        save_total_limit=3,  # 只保存最近3个检查点
        metric_for_best_model="rel1_rmse",
        greater_is_better=False,
        # 移除load_best_model_at_end以兼容当前transformers版本
        fp16=torch.cuda.is_available(),
        gradient_accumulation_steps=1,
        gradient_checkpointing=False,
        optim="adamw_torch",
        lr_scheduler_type="linear",
    )

    # 初始化数据整理器
    data_collator = DualRelationDataCollator(tokenizer)

    # 自定义优化器和学习率调度器
    optimizer = AdamW(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
    scheduler = get_linear_schedule_with_warmup(
        optimizer, num_warmup_steps=warmup_steps, num_training_steps=total_steps
    )

    # 初始化Trainer，使用自定义优化器和调度器
    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=tokenized_train,
        eval_dataset=tokenized_val,
        data_collator=data_collator,
        compute_metrics=compute_dual_metrics,
        optimizers=(optimizer, scheduler)
    )

    # 开始训练
    print("开始双关系联合学习...")
    trainer.train()

    # 保存最终模型和分词器
    model.save_pretrained(output_dir)
    tokenizer.save_pretrained(output_dir)
    print(f"模型保存至：{output_dir}")

    # 输出最终评估结果
    eval_results = trainer.evaluate()
    print(f"\n=== 最终评估结果 ===")
    print(f"关系1（爱好-景点）：MSE={eval_results['eval_rel1_mse']:.4f}, RMSE={eval_results['eval_rel1_rmse']:.4f}")
    print(f"                    准确率={eval_results['eval_rel1_acc']:.2%}, F1分数={eval_results['eval_rel1_f1']:.4f}")
    print(f"关系2（景点-类型）：MSE={eval_results['eval_rel2_mse']:.4f}, RMSE={eval_results['eval_rel2_rmse']:.4f}")
    print(f"                    准确率={eval_results['eval_rel2_acc']:.2%}, F1分数={eval_results['eval_rel2_f1']:.4f}")

    return model, tokenizer


# -------------------------- 7. 模型应用类 --------------------------
class DualRelationScorer:
    def __init__(self, model_dir, max_length=128):
        self.tokenizer = BertTokenizer.from_pretrained(model_dir)
        self.model = BertForDualRelation.from_pretrained(model_dir)
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model.to(self.device)
        self.model.eval()
        self.max_length = max_length

    def _get_embedding(self, text):
        """获取单个文本的嵌入向量"""
        inputs = self.tokenizer(
            text, return_tensors="pt", truncation=True,
            padding="max_length", max_length=self.max_length
        ).to(self.device)
        
        with torch.no_grad():
            outputs = self.model.bert(**inputs)
        return outputs[1]  # 返回CLS token的嵌入

    def score_hobby_attraction(self, hobby_text, attraction_text):
        """计算“爱好-景点”的关联度"""
        # 分别获取嵌入
        hobby_emb = self._get_embedding(hobby_text)
        attraction_emb = self._get_embedding(attraction_text)
        
        # 拼接后送入关系1的检测头
        with torch.no_grad():
            rel1_input = torch.cat([hobby_emb, attraction_emb], dim=1)
            rel1_score = self.model.rel1_head(rel1_input).squeeze(-1)
            
        return rel1_score.item()

    def score_attraction_type(self, attraction_text, type_text):
        """计算“景点-类型”的关联度"""
        # 分别获取嵌入
        attraction_emb = self._get_embedding(attraction_text)
        type_emb = self._get_embedding(type_text)
        
        # 拼接后送入关系2的检测头
        with torch.no_grad():
            rel2_input = torch.cat([attraction_emb, type_emb], dim=1)
            rel2_score = self.model.rel2_head(rel2_input).squeeze(-1)
            
        return rel2_score.item()

    def batch_score_hobby_attraction(self, hobby_texts, attraction_texts):
        """批量计算“爱好-景点”的关联度，提高推理效率"""
        scores = []
        # 分批次处理，每批32个样本
        batch_size = 32
        for i in range(0, len(hobby_texts), batch_size):
            batch_hobbies = hobby_texts[i:i+batch_size]
            batch_attractions = attraction_texts[i:i+batch_size]
            
            # 批量编码
            hobby_inputs = self.tokenizer(
                batch_hobbies, return_tensors="pt", truncation=True,
                padding="max_length", max_length=self.max_length
            ).to(self.device)
            
            attraction_inputs = self.tokenizer(
                batch_attractions, return_tensors="pt", truncation=True,
                padding="max_length", max_length=self.max_length
            ).to(self.device)
            
            with torch.no_grad():
                # 获取批量嵌入
                hobby_embeddings = self.model.bert(**hobby_inputs)[1]
                attraction_embeddings = self.model.bert(**attraction_inputs)[1]
                
                # 计算分数
                rel1_inputs = torch.cat([hobby_embeddings, attraction_embeddings], dim=1)
                batch_scores = self.model.rel1_head(rel1_inputs).squeeze(-1)
                scores.extend(batch_scores.cpu().numpy().tolist())
        
        return scores


# -------------------------- 示例运行 --------------------------
if __name__ == "__main__":
    # 训练模型
    model, tokenizer = train_dual_relation_model(
        data_path="./user_profile_attraction_preference.csv",
        output_dir="./optimized_dual_relation_bert_25epochs",
        epochs=25,
        batch_size=16,
        neg_samples_ratio=1.0,
        learning_rate=1.5e-5,
        weight_decay=0.005
    )

    # 测试模型推理
    scorer = DualRelationScorer("./optimized_dual_relation_bert_25epochs")
    test_cases = [
        (
            "爱好是古建筑绘画",
            "北京故宫博物院是明清两代皇家宫殿，有太和殿、乾清宫等古建筑",
            "历史古迹"
        ),
        (
            "爱好是户外徒步",
            "上海迪士尼乐园有过山车和卡通人物表演",
            "主题乐园"
        ),
        (
            "爱好是亲子手工制作",
            "广州长隆欢乐世界有垂直过山车、十环过山车等刺激游乐设施，还有儿童专属的开心乐园区域",
            "亲子乐园"
        ),
        (
            "爱好是自然风光摄影",
            "张家界国家森林公园有壮观的石英砂岩峰林地貌，如哈利路亚山",
            "自然景观"
        )
    ]

    # 输出测试结果
    print("\n=== 双关系评分测试结果 ===")
    for idx, (hobby, attraction, type_label) in enumerate(test_cases, 1):
        rel1 = scorer.score_hobby_attraction(hobby, attraction)
        rel2 = scorer.score_attraction_type(attraction, type_label)
        print(f"测试用例 {idx}：")
        print(f"  爱好：{hobby}")
        print(f"  景点：{attraction[:50]}..." if len(attraction) > 50 else f"  景点：{attraction}")
        print(f"  类型：{type_label}")
        print(f"  爱好-景点关联度：{rel1:.4f}（越高越匹配）")
        print(f"  景点-类型关联度：{rel2:.4f}（越高越匹配）")
        print("-" * 80)

    # 批量推理测试
    print("\n=== 批量推理测试 ===")
    hobbies = [case[0] for case in test_cases]
    attractions = [case[1] for case in test_cases]
    batch_scores = scorer.batch_score_hobby_attraction(hobbies, attractions)
    print(f"批量计算的爱好-景点关联度：{[f'{score:.4f}' for score in batch_scores]}")