import torch
from transformers import BertTokenizer, BertPreTrainedModel, BertModel
from typing import List, Tuple, Union
from optimized_finetune import BertForDualRelation
class DualRelationProcessor:
    """双关系处理类，支持爱好-景点和景点-类型的关系评分及嵌入提取"""
    
    def __init__(self, model_dir: str, max_length: int = 128):
        """
        初始化处理器
        
        Args:
            model_dir: 模型保存目录
            max_length: 文本最大长度
        """
        self.tokenizer = BertTokenizer.from_pretrained(model_dir)
        self.model = BertForDualRelation.from_pretrained(model_dir)  # 复用原代码中的模型类
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model.to(self.device)
        self.model.eval()
        self.max_length = max_length

    # -------------------------- 1. 模型加载相关 --------------------------
    @classmethod
    def from_pretrained(cls, model_dir: str, max_length: int = 128) -> "DualRelationProcessor":
        """加载预训练模型创建处理器实例"""
        return cls(model_dir, max_length)

    # -------------------------- 2. 爱好-景点关系处理 --------------------------
    def get_hobby_attraction_score(self, hobby_text: str, attraction_text: str) -> float:
        """
        计算爱好与景点的匹配程度
        
        Args:
            hobby_text: 爱好文本（如"爱好是古建筑绘画"）
            attraction_text: 景点介绍文本
            
        Returns:
            匹配分数（0-1之间，越高越匹配）
        """
        hobby_emb = self.get_hobby_embedding(hobby_text)
        attraction_emb = self.get_attraction_embedding(attraction_text)
        return self.get_hobby_attraction_score_from_embeddings(hobby_emb, attraction_emb)

    def get_hobby_embedding(self, hobby_text: str) -> torch.Tensor:
        """获取爱好文本的嵌入向量"""
        return self._get_text_embedding(hobby_text)

    def get_attraction_embedding(self, attraction_text: str) -> torch.Tensor:
        """获取景点文本的嵌入向量"""
        return self._get_text_embedding(attraction_text)

    def get_hobby_attraction_score_from_embeddings(
            self, hobby_emb: torch.Tensor, attraction_emb: torch.Tensor
    ) -> float:
        """
        根据嵌入向量计算爱好与景点的匹配程度
        
        Args:
            hobby_emb: 爱好文本的嵌入向量
            attraction_emb: 景点文本的嵌入向量
            
        Returns:
            匹配分数（0-1之间）
        """
        with torch.no_grad():
            # 确保嵌入向量在正确设备上
            hobby_emb = hobby_emb.to(self.device)
            attraction_emb = attraction_emb.to(self.device)
            
            # 拼接嵌入并计算分数
            rel1_input = torch.cat([hobby_emb, attraction_emb], dim=1)
            score = self.model.rel1_head(rel1_input).squeeze(-1)
        return score.item()

    # -------------------------- 3. 景点-类型关系处理 --------------------------
    def get_attraction_type_score(self, attraction_text: str, type_text: str) -> float:
        """
        计算景点与类型的匹配程度
        
        Args:
            attraction_text: 景点介绍文本
            type_text: 类型文本（如"历史古迹"）
            
        Returns:
            匹配分数（0-1之间，越高越匹配）
        """
        attraction_emb = self.get_attraction_embedding(attraction_text)
        type_emb = self.get_type_embedding(type_text)
        return self.get_attraction_type_score_from_embeddings(attraction_emb, type_emb)

    def get_type_embedding(self, type_text: str) -> torch.Tensor:
        """获取类型文本的嵌入向量"""
        return self._get_text_embedding(type_text)

    def get_attraction_type_score_from_embeddings(
            self, attraction_emb: torch.Tensor, type_emb: torch.Tensor
    ) -> float:
        """
        根据嵌入向量计算景点与类型的匹配程度
        
        Args:
            attraction_emb: 景点文本的嵌入向量
            type_emb: 类型文本的嵌入向量
            
        Returns:
            匹配分数（0-1之间）
        """
        with torch.no_grad():
            # 确保嵌入向量在正确设备上
            attraction_emb = attraction_emb.to(self.device)
            type_emb = type_emb.to(self.device)
            
            # 拼接嵌入并计算分数
            rel2_input = torch.cat([attraction_emb, type_emb], dim=1)
            score = self.model.rel2_head(rel2_input).squeeze(-1)
        return score.item()

    # -------------------------- 4. 批量处理方法 --------------------------
    def batch_get_hobby_attraction_scores(
            self, hobby_texts: List[str], attraction_texts: List[str]
    ) -> List[float]:
        """批量计算爱好与景点的匹配分数"""
        if len(hobby_texts) != len(attraction_texts):
            raise ValueError("爱好文本列表与景点文本列表长度必须一致")
            
        return self._batch_get_scores(
            texts1=hobby_texts,
            texts2=attraction_texts,
            score_head=self.model.rel1_head
        )

    def batch_get_attraction_type_scores(
            self, attraction_texts: List[str], type_texts: List[str]
    ) -> List[float]:
        """批量计算景点与类型的匹配分数"""
        if len(attraction_texts) != len(type_texts):
            raise ValueError("景点文本列表与类型文本列表长度必须一致")
            
        return self._batch_get_scores(
            texts1=attraction_texts,
            texts2=type_texts,
            score_head=self.model.rel2_head
        )

    # -------------------------- 5. 余弦相似性计算 --------------------------

    def get_cosine_similarity(self, emb1: torch.Tensor, emb2: torch.Tensor) -> float:
        """
        计算两个嵌入向量的余弦相似性
        
        Args:
            emb1: 第一个嵌入向量
            emb2: 第二个嵌入向量
            
        Returns:
            余弦相似性分数（-1到1之间，越高表示越相似）
        """
        with torch.no_grad():
            # 确保向量在同一设备上
            emb1 = emb1.to(self.device)
            emb2 = emb2.to(self.device)
            
            # 处理可能的单维度输入
            if emb1.dim() == 1:
                emb1 = emb1.unsqueeze(0)
            if emb2.dim() == 1:
                emb2 = emb2.unsqueeze(0)
                
            # 计算余弦相似性并返回标量值
            similarity = cosine_similarity(emb1, emb2, dim=1).item()
        return similarity
    # -------------------------- 内部工具方法 --------------------------
    def _get_text_embedding(self, text: str) -> torch.Tensor:
        """获取单个文本的BERT嵌入向量（CLS token）"""
        inputs = self.tokenizer(
            text,
            return_tensors="pt",
            truncation=True,
            padding="max_length",
            max_length=self.max_length
        ).to(self.device)
        
        with torch.no_grad():
            outputs = self.model.bert(** inputs)
        return outputs[1]  # 返回CLS token的嵌入

    def _batch_get_scores(
            self, texts1: List[str], texts2: List[str], score_head
    ) -> List[float]:
        """批量计算两个文本列表的匹配分数"""
        scores = []
        batch_size = 32  # 批处理大小
        
        for i in range(0, len(texts1), batch_size):
            batch_texts1 = texts1[i:i+batch_size]
            batch_texts2 = texts2[i:i+batch_size]
            
            # 批量编码文本1
            inputs1 = self.tokenizer(
                batch_texts1,
                return_tensors="pt",
                truncation=True,
                padding="max_length",
                max_length=self.max_length
            ).to(self.device)
            
            # 批量编码文本2
            inputs2 = self.tokenizer(
                batch_texts2,
                return_tensors="pt",
                truncation=True,
                padding="max_length",
                max_length=self.max_length
            ).to(self.device)
            
            with torch.no_grad():
                # 获取批量嵌入
                emb1 = self.model.bert(**inputs1)[1]
                emb2 = self.model.bert(** inputs2)[1]
                
                # 计算分数
                input_feat = torch.cat([emb1, emb2], dim=1)
                batch_scores = score_head(input_feat).squeeze(-1)
                scores.extend(batch_scores.cpu().numpy().tolist())
        
        return scores


# 使用示例
if __name__ == "__main__":
    # 1. 加载模型
    processor = DualRelationProcessor.from_pretrained(
        model_dir="./optimized_dual_relation_bert",
        max_length=128
    )
    
    # 测试数据
    hobby = "爱好是自然风光"
    attraction = "北京故宫博物院是明清两代皇家宫殿，有太和殿、乾清宫等古建筑"
    attraction_type = "历史古迹"
    
    # 2. 计算爱好-景点匹配程度
    rel1_score = processor.get_hobby_attraction_score(hobby, attraction)
    print(f"爱好-景点匹配度: {rel1_score:.4f}")
    
    # 3. 获取爱好和景点的嵌入
    hobby_emb = processor.get_hobby_embedding(hobby)
    attraction_emb = processor.get_attraction_embedding(attraction)
    print(f"爱好嵌入形状: {hobby_emb.shape}")
    print(f"景点嵌入形状: {attraction_emb.shape}")
    
    # 4. 用嵌入计算爱好-景点匹配程度
    rel1_score_from_emb = processor.get_hobby_attraction_score_from_embeddings(
        hobby_emb, attraction_emb
    )
    print(f"嵌入计算的爱好-景点匹配度: {rel1_score_from_emb:.4f}")
    
    # 5. 类型相关计算
    rel2_score = processor.get_attraction_type_score(attraction, attraction_type)
    print(f"景点-类型匹配度: {rel2_score:.4f}")
    
    type_emb = processor.get_type_embedding(attraction_type)
    rel2_score_from_emb = processor.get_attraction_type_score_from_embeddings(
        attraction_emb, type_emb
    )
    print(f"嵌入计算的景点-类型匹配度: {rel2_score_from_emb:.4f}")
    
    # 6. 批量计算测试
    batch_hobbies = [hobby, "爱好是户外徒步"]
    batch_attractions = [attraction, "黄山风景区以奇松、怪石、云海著称"]
    batch_scores = processor.batch_get_hobby_attraction_scores(batch_hobbies, batch_attractions)
    print(f"批量爱好-景点匹配度: {[f'{s:.4f}' for s in batch_scores]}")