import os
import json
import numpy as np
import faiss
import requests
from typing import List, Dict, Tuple
import time
# 已有的文本转向量函数
def get_embedding(text: str) -> np.ndarray:
    """
    使用在线API获取文本的向量表示
    """
    url = "https://api.siliconflow.cn/v1/embeddings"

    payload = {
        "model": "BAAI/bge-m3",
        "input": text,
        "encoding_format": "float"
    }
    headers = {
        "Authorization": "Bearer sk-wdotugwguprsmeimsowcbehipjlkyaabkgbxnudkkhvjhumm",
        "Content-Type": "application/json"
    }

    response = requests.post(url, json=payload, headers=headers)
    
    embedding = np.array(response.json()["data"][0]["embedding"])
    print(embedding)
    return embedding

class DatabaseBuilder:
    def __init__(self, docs_folder: str, index_file: str = "faiss_index.faiss", metadata_file: str = "metadata.json"):
        """
        初始化数据库构建器
        
        参数:
            docs_folder: 包含markdown文档的文件夹路径
            index_file: FAISS索引文件路径
            metadata_file: 元数据文件路径
        """
        self.docs_folder = docs_folder
        self.index_file = index_file
        self.metadata_file = metadata_file
        self.index = None
        self.metadata = []
    
    def _collect_documents(self) -> Dict[str, str]:
        """
        收集docs文件夹中的所有markdown文档内容
        
        返回:
            字典: {文件路径: 文件内容}
        """
        documents = {}
        
        for root, _, files in os.walk(self.docs_folder):
            for file in files:
                if file.endswith('.md'):
                    file_path = os.path.join(root, file)
                    try:
                        with open(file_path, 'r', encoding='utf-8') as f:
                            content = f"""{f.read()}"""
                            content = content.split('# Source code')[0].strip()
                            documents[file_path] = content
                    except Exception as e:
                        print(f"读取文件 {file_path} 失败: {e}")
        
        return documents
    
    def build(self):
        """
        构建FAISS索引并保存到文件
        """
        # 收集所有文档
        documents = self._collect_documents()
        
        # 向量化文档并存储元数据
        embeddings = []
        self.metadata = []
        
        for doc_path, text in documents.items():
            if text == "":
                continue
            time.sleep(0.5)
            print(text)
            try:
                embedding = get_embedding(text)
                embeddings.append(embedding)
                self.metadata.append({
                    "doc_path": doc_path,
                    "text": text
                })
            except Exception as e:
                print(f"向量化文档 {doc_path} 失败: {e}")
        
        # 转换为numpy数组
        embeddings_array = np.array(embeddings, dtype='float32')
        
        # 创建FAISS索引
        dimension = embeddings_array.shape[1]  # 1024
        self.index = faiss.IndexFlatL2(dimension)  # 使用L2距离
        self.index.add(embeddings_array)  # 添加向量到索引
        
        print(f"构建完成，共添加 {self.index.ntotal} 个向量")
        
        # 保存索引和元数据
        self._save_index()
    
    def _save_index(self):
        """
        保存FAISS索引和元数据到文件
        """
        if self.index is not None:
            faiss.write_index(self.index, self.index_file)
            with open(self.metadata_file, 'w', encoding='utf-8') as f:
                json.dump(self.metadata, f, ensure_ascii=False, indent=2)
            print(f"索引已保存到 {self.index_file}")
            print(f"元数据已保存到 {self.metadata_file}")

if __name__ == "__main__":
    # 初始化数据库构建器
    builder = DatabaseBuilder(docs_folder="docs")
    
    # 构建并保存数据库
    builder.build()