import numpy as np
import requests
import os
import warnings
from scipy.spatial.distance import jaccard

# 首先导入并设置Hugging Face配置
try:
    import hf_config
    print("🔧 已导入Hugging Face配置模块")
except ImportError:
    print("⚠️ 未找到hf_config模块，使用默认配置")

from sentence_transformers import SentenceTransformer

# 配置Hugging Face镜像源
def setup_hf_mirror():
    """配置Hugging Face镜像源，解决国内网络问题"""
    # 设置环境变量，使用国内镜像
    # 注意：正确的环境变量名称是 HF_ENDPOINT 和 HF_HUB_URL
    os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
    os.environ['HF_HUB_URL'] = 'https://hf-mirror.com'
    
    # 设置缓存目录
    cache_dir = os.path.expanduser('~/.cache/huggingface')
    os.environ['HF_HOME'] = cache_dir
    os.environ['TRANSFORMERS_CACHE'] = os.path.join(cache_dir, 'transformers')
    os.environ['HF_DATASETS_CACHE'] = os.path.join(cache_dir, 'datasets')
    
    # 禁用遥测和隐式token
    os.environ['HF_HUB_DISABLE_TELEMETRY'] = '1'
    os.environ['HF_HUB_DISABLE_IMPLICIT_TOKEN'] = '1'
    
    print(f"🔧 配置Hugging Face镜像源: {os.environ['HF_ENDPOINT']}")
    print(f"📁 缓存目录: {cache_dir}")
    
    # 创建缓存目录
    os.makedirs(cache_dir, exist_ok=True)
    os.makedirs(os.environ['TRANSFORMERS_CACHE'], exist_ok=True)
    os.makedirs(os.environ['HF_DATASETS_CACHE'], exist_ok=True)

# 使用更通用的缓存目录配置
def get_model():
    """获取文本嵌入模型，支持多种备选方案"""
    
    # 首先配置镜像源
    setup_hf_mirror()
    
    # 模型优先级列表（从国内可用性高的开始）
    model_options = [
        {
            "name": "shibing624/text2vec-base-chinese",
            "description": "中文文本向量模型（推荐）"
        },
        {
            "name": "GanymedeNil/text2vec-large-chinese", 
            "description": "中文大模型文本向量"
        },
        {
            "name": "paraphrase-multilingual-MiniLM-L12-v2",
            "description": "多语言文本向量模型"
        },
        {
            "name": "all-MiniLM-L6-v2",
            "description": "英文文本向量模型（备选）"
        }
    ]
    
    for i, model_info in enumerate(model_options):
        try:
            print(f"🔍 尝试加载模型 {i+1}/{len(model_options)}: {model_info['name']}")
            print(f"📝 描述: {model_info['description']}")
            
            # 设置下载参数
            model = SentenceTransformer(
                model_info['name'],
                cache_folder=os.environ.get('TRANSFORMERS_CACHE'),
                device='cpu'  # 强制使用CPU，避免GPU相关问题
            )
            
            print(f"✅ 成功加载模型: {model_info['name']}")
            return model
            
        except Exception as e:
            print(f"❌ 加载模型失败 {model_info['name']}: {e}")
            if i < len(model_options) - 1:
                print(f"🔄 尝试下一个模型...")
            else:
                print(f"⚠️ 所有模型加载失败，使用基础备选方案")
                # 最后的备选方案：创建一个简单的向量化函数
                return create_fallback_model()

def create_fallback_model():
    """创建备选模型，当所有Hugging Face模型都无法加载时使用"""
    print("🔧 创建备选文本向量化模型...")
    
    class FallbackModel:
        def __init__(self):
            self.dimension = 384  # 标准维度
            print(f"✅ 备选模型创建成功，向量维度: {self.dimension}")
        
        def encode(self, texts, convert_to_tensor=False):
            """简单的文本向量化实现"""
            if isinstance(texts, str):
                texts = [texts]
            
            # 使用简单的哈希方法生成向量
            import hashlib
            vectors = []
            
            for text in texts:
                # 使用文本的哈希值生成向量
                hash_obj = hashlib.md5(text.encode('utf-8'))
                hash_bytes = hash_obj.digest()
                
                # 将哈希字节转换为数值向量
                vector = []
                for i in range(0, min(len(hash_bytes), self.dimension // 4)):
                    # 每个字节转换为4个浮点数
                    byte_val = hash_bytes[i]
                    for j in range(4):
                        if len(vector) < self.dimension:
                            # 将字节值映射到[-1, 1]范围
                            val = (byte_val / 255.0) * 2 - 1
                            vector.append(val)
                
                # 如果向量长度不够，用零填充
                while len(vector) < self.dimension:
                    vector.append(0.0)
                
                # 归一化向量
                norm = np.linalg.norm(vector)
                if norm > 0:
                    vector = [v / norm for v in vector]
                
                vectors.append(vector)
            
            vectors = np.array(vectors)
            
            if convert_to_tensor:
                import torch
                return torch.tensor(vectors, dtype=torch.float32)
            
            return vectors
    
    return FallbackModel()

# 初始化模型
print("🚀 初始化文本嵌入模型...")
model = get_model()

def embedding_filter(queries: list[str], recall_results: list, threshold: float = 0.6):
    """使用文本嵌入进行相似度过滤"""
    try:
        # 生成文档向量
        data_embeddings = model.encode([item["content"] for item in recall_results], convert_to_tensor=True)
        
        all_results = []
        
        for query in queries:
            # 生成查询向量
            if type(query) == int:
                query = str(query)
            query_embedding = model.encode(query, convert_to_tensor=True)
            
            # 计算相似度
            similarities = np.array([
                np.dot(query_embedding.cpu(), embedding.cpu()) /
                (np.linalg.norm(query_embedding.cpu()) * np.linalg.norm(embedding.cpu()))
                for embedding in data_embeddings
            ])
            
            # 过滤和排序
            filtered_indices = np.where(similarities >= threshold)[0]
            sorted_indices = filtered_indices[np.argsort(-similarities[filtered_indices])]
            sorted_items = [recall_results[i] for i in sorted_indices]
            all_results += sorted_items
        
        # 去重
        filtered_results = []
        contents = set()
        for x in all_results:
            if x["content"] not in contents:
                contents.add(x["content"])
                filtered_results.append(x)
        
        return filtered_results
        
    except Exception as e:
        print(f"❌ 文本嵌入过滤失败: {e}")
        # 返回原始结果作为备选
        return recall_results

def get_embedding_dimension():
    """获取嵌入向量的维度"""
    try:
        # 测试编码一个简单文本获取维度
        test_text = "测试文本"
        embedding = model.encode(test_text)
        return len(embedding)
    except:
        return 384  # 默认维度
