import os
import json
from langchain_text_splitters import RecursiveCharacterTextSplitter
import pandas as pd
from typing import List, Dict, Any
from langchain.vectorstores import FAISS
from langchain_community.document_loaders import CSVLoader
from langchain.schema import Document
from configs.config import config
import asyncio
from concurrent.futures import ThreadPoolExecutor
from langchain_core.embeddings import Embeddings

class KnowledgeManager:
    def __init__(self, csv_dir: str, faiss_dir: str, max_workers: int = 4):
        self.csv_dir = csv_dir
        self.faiss_dir = faiss_dir
        self.max_workers = max_workers
        self.executor = ThreadPoolExecutor(max_workers=max_workers)

    async def csv_loader_path(self) -> List[Document]:
        """load csv file with optimized concurrency"""
        docs_list = []
        
        # 获取所有CSV文件路径
        csv_files = [
            os.path.join(self.csv_dir, file) 
            for file in os.listdir(self.csv_dir) 
            if file.endswith('.csv')
        ]
        
        # 使用线程池处理CSV文件读取（pandas操作是CPU密集型的）
        async def process_csv_file(file_path: str) -> List[Document]:
            loop = asyncio.get_event_loop()
            return await loop.run_in_executor(self.executor, self._read_csv_file, file_path)
        
        # 并发处理所有CSV文件
        tasks = [process_csv_file(file_path) for file_path in csv_files]
        results = await asyncio.gather(*tasks)
        
        # 合并结果
        for file_docs in results:
            docs_list.extend(file_docs)
            
        return docs_list
    
    def _read_csv_file(self, file_path: str) -> List[Document]:
        """同步读取单个CSV文件"""
        df = pd.read_csv(file_path, encoding='utf-8')
        file_docs = []
        for index, row in df.iterrows():
            query = str(row.iloc[0])  
            tool = str(row.iloc[1])   
            content = f"{query}"
            doc = Document(
                page_content=content,
                metadata={
                    'source': file_path,
                    'row': index + 1,
                    'tool': tool
                }
            )
            file_docs.append(doc)
        return file_docs

    async def faiss_loader_path(self, docs: List[Document], embedding_model: Embeddings) -> None:
        """load faiss file with optimized batch processing"""
        
        # 创建text splitter
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=100,
            chunk_overlap=0,
            length_function=len,
            separators=["\n\n", "\n"]
        )
        
        # 在线程池中执行文档分割
        loop = asyncio.get_event_loop()
        all_split = await loop.run_in_executor(
            self.executor, 
            text_splitter.split_documents, 
            docs
        )
        
        # 批次入库 - 使用线程池处理FAISS操作
        batch_size = 32
        first_batch = all_split[:batch_size]
        
        # 初始化FAISS数据库
        db = await loop.run_in_executor(
            self.executor,
            FAISS.from_documents,
            first_batch,
            embedding_model
        )
        
        # 并发处理剩余批次
        async def process_batch(batch: List[Document]):
            return await loop.run_in_executor(
                self.executor,
                db.add_documents,
                batch
            )
        
        # 创建所有批次的任务
        batch_tasks = []
        for i in range(batch_size, len(all_split), batch_size):
            batch = all_split[i:i+batch_size]
            task = process_batch(batch)
            batch_tasks.append(task)
        
        # 并发执行所有批次
        await asyncio.gather(*batch_tasks)

        # 保存FAISS数据库
        await loop.run_in_executor(
            self.executor,
            db.save_local,
            self.faiss_dir
        )
        
        print(f"FAISS save success, processed {len(all_split)} documents")
    
    def __del__(self):
        """清理线程池"""
        if hasattr(self, 'executor'):
            self.executor.shutdown(wait=True)

# 创建实例
km = KnowledgeManager(
    csv_dir=config.knowledge.csv_path, 
    faiss_dir=config.knowledge.faiss_path,
    max_workers=4  # 根据CPU核心数调整
)

