"""
词向量模块 - 处理文本嵌入和相似度搜索
"""

import streamlit as st
import jieba
import numpy as np
import faiss
import json
import os
# from paddlenlp.embeddings import TokenEmbedding

from modules.config import VECTOR_INDEX_FILE, DOCS_INFO_FILE

@st.cache_resource
def load_embedding_model():
    """加载词嵌入模型"""
    try:
        # 使用随机初始化向量替代百度模型
        class RandomEmbedding:
            def __init__(self, dim=300):
                self.dim = dim
                self.word_dict = {}
                print("初始化随机词嵌入模型")
                
            def search(self, word):
                if word not in self.word_dict:
                    # 为新词创建随机向量
                    self.word_dict[word] = np.random.randn(self.dim)
                return self.word_dict[word]
        
        model = RandomEmbedding()
        print("随机词嵌入模型加载成功")
        return model
    except Exception as e:
        st.error(f"⚠️ 加载词嵌入模型失败: {str(e)}")
        return None

@st.cache_data
def load_vector_db():
    """加载向量数据库"""
    if os.path.exists(VECTOR_INDEX_FILE) and os.path.exists(DOCS_INFO_FILE):
        try:
            index = faiss.read_index(VECTOR_INDEX_FILE)
            with open(DOCS_INFO_FILE, 'r', encoding='utf-8') as f:
                documents = json.load(f)
            return index, documents
        except Exception as e:
            st.error(f"⚠️ 加载向量数据库失败: {str(e)}")
    return None, None

def embed_text(text, embedding_model):
    """将文本转换为向量嵌入"""
    if embedding_model is None:
        return np.zeros(300)
    
    try:
        words = list(jieba.cut(text))
        word_embeddings = []
        for word in words:
            word_emb = embedding_model.search(word)
            word_embeddings.append(word_emb)
        
        if len(word_embeddings) > 0:
            return np.mean(word_embeddings, axis=0)
        else:
            return np.zeros(300)
    except Exception as e:
        st.error(f"⚠️ 文本嵌入过程出错: {str(e)}")
        return np.zeros(300)

def search_similar_docs(query_embedding, index, documents, top_k=5):
    """搜索相似文档"""
    if index is None or documents is None:
        return []
    
    try:
        query_embedding = np.array([query_embedding]).astype('float32')
        
        distances, indices = index.search(query_embedding, top_k)
        
        results = []
        for i, idx in enumerate(indices[0]):
            if idx != -1 and idx < len(documents):
                similarity = 1.0 / (1.0 + float(distances[0][i]))
                
                results.append({
                    "content": documents[idx]["content"],
                    "metadata": documents[idx]["metadata"],
                    "score": similarity
                })
        
        return results
    except Exception as e:
        st.error(f"⚠️ 向量搜索过程出错: {str(e)}")
        return []

def get_document_for_artifact(artifact_name, documents):
    """获取与特定文物相关的文档"""
    if not documents:
        return None
    
    for doc in documents:
        filename = os.path.basename(doc["metadata"]["source"])
        if filename.startswith(artifact_name) or artifact_name in doc["content"]:
            return doc["content"]
    
    return None 