import os
import torch
import logging
import numpy as np
from typing import List, Dict, Any, Optional
from transformers import AutoTokenizer, AutoModel
from sklearn.metrics.pairwise import cosine_similarity

class RobertaRelevanceClassifier:
    """
    使用RoBERTa模型对检索到的文档进行相关性评分和过滤
    """
    
    def __init__(
        self,
        model_name_or_path: str = "hfl/chinese-roberta-wwm-ext",
        threshold: float = 0.65,
        cache_dir: Optional[str] = None
    ):
        """
        初始化RoBERTa相关性分类器
        
        Args:
            model_name_or_path: RoBERTa模型路径或名称
            threshold: 相关性阈值，低于此阈值的文档将被过滤掉
            cache_dir: 模型缓存目录
        """
        self.threshold = threshold
        self.logger = logging.getLogger(__name__)
        self.logger.info(f"初始化RoBERTa相关性分类器，使用模型: {model_name_or_path}")
        
        try:
            self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, cache_dir=cache_dir)
            self.model = AutoModel.from_pretrained(model_name_or_path, cache_dir=cache_dir)
            self.logger.info("RoBERTa模型和分词器加载完成")
        except Exception as e:
            self.logger.error(f"RoBERTa模型加载失败: {str(e)}")
            raise RuntimeError(f"无法加载RoBERTa模型: {str(e)}")
    
    def _mean_pooling(self, model_output, attention_mask):
        """
        对模型输出进行平均池化，提取句子表示
        
        Args:
            model_output: 模型输出
            attention_mask: 注意力掩码
            
        Returns:
            句子的表示向量
        """
        token_embeddings = model_output.last_hidden_state
        input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
        return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
    
    def compute_relevance(self, query: str, documents: List[Dict[str, Any]], top_k: int = 5) -> List[Dict[str, Any]]:
        """
        计算查询与文档之间的相关性，并返回相关性高的文档
        
        Args:
            query: 用户查询
            documents: 候选文档列表，每个文档需包含'text'字段
            top_k: 返回的最相关文档数量
            
        Returns:
            按相关性排序的文档列表
        """
        if not documents:
            self.logger.warning("没有提供候选文档，无法计算相关性")
            return []
        
        self.logger.info(f"计算查询与{len(documents)}个文档的相关性")
        
        # 提取文档文本
        doc_texts = [doc.get("text", "") for doc in documents]
        if not any(doc_texts):
            self.logger.warning("所有候选文档都没有文本内容")
            return []
        
        # 对查询和文档进行编码
        inputs = self.tokenizer(
            [query] + doc_texts,
            padding=True,
            truncation=True,
            max_length=512,
            return_tensors="pt"
        )
        
        # 使用模型计算表示
        with torch.no_grad():
            outputs = self.model(**inputs)
            embeddings = self._mean_pooling(outputs, inputs['attention_mask'])
            
            # 归一化向量
            embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
            
            # 分离查询和文档的表示
            query_embedding = embeddings[0].unsqueeze(0).numpy()
            doc_embeddings = embeddings[1:].numpy()
            
            # 计算余弦相似度
            similarities = cosine_similarity(query_embedding, doc_embeddings)[0]
        
        # 根据相似度对文档进行排序
        scored_documents = []
        for i, (doc, score) in enumerate(zip(documents, similarities)):
            if score >= self.threshold:
                doc_copy = doc.copy()
                doc_copy["roberta_score"] = float(score)
                doc_copy["roberta_score_percentage"] = int(score * 100)
                scored_documents.append(doc_copy)
        
        # 按相关性排序
        scored_documents.sort(key=lambda x: x["roberta_score"], reverse=True)
        
        self.logger.info(f"共有{len(scored_documents)}个文档通过相关性阈值过滤")
        
        # 返回前top_k个相关文档
        return scored_documents[:top_k] 