import pandas as pd
from haystack.document_stores import FAISSDocumentStore
from haystack.nodes import EmbeddingRetriever
from haystack.schema import Document
import logging
from logging.handlers import RotatingFileHandler
import os
from datetime import datetime
from tqdm import tqdm
import threading
import mysql.connector
from mysql.connector import Error

# 配置日志
def setup_logger():
    log_dir = 'logs'
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    
    current_date = datetime.now().strftime('%Y%m%d')
    log_file = os.path.join(log_dir, f'faiss_sync_{current_date}.log')
    
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    )
    
    file_handler = RotatingFileHandler(
        log_file,
        maxBytes=200 * 1024 * 1024,
        backupCount=10,
        encoding='utf-8'
    )
    file_handler.setFormatter(formatter)
    
    console_handler = logging.StreamHandler()
    console_handler.setFormatter(formatter)
    
    root_logger = logging.getLogger()
    root_logger.setLevel(logging.INFO)
    root_logger.addHandler(file_handler)
    root_logger.addHandler(console_handler)
    
    return logging.getLogger(__name__)

logger = setup_logger()

# MySQL 配置
MYSQL_CONFIG = {
    'host': '47.104.188.246',
    'port': 13307,
    'database': 'rag',
    'user': 'root',
    'password': 'G@lMmX8rVCLcegOH',
    'charset': 'utf8mb4'
}

# 从数据库读取数据集配置
def load_dataset_mapping():
    try:
        connection = mysql.connector.connect(**MYSQL_CONFIG)
        cursor = connection.cursor(dictionary=True)
        
        query = "SELECT code, path FROM dataset_config order by id asc"
        cursor.execute(query)
        dataset_configs = cursor.fetchall()
        
        dataset_mapping = {row['code']: row['path'] for row in dataset_configs}
        
        cursor.close()
        connection.close()
        
        return dataset_mapping
        
    except Error as e:
        logger.error(f"Error connecting to MySQL database: {e}")
        return {}

# 同步进度存储
sync_progress = {}
sync_locks = {}

class VectorSyncManager:
    def __init__(self):
        self.dataset_mapping = load_dataset_mapping()
        
    def get_sync_progress(self, dataset_name):
        """获取同步进度"""
        return sync_progress.get(dataset_name, 0)
    
    def start_sync(self, dataset_name):
        """开始同步向量数据"""
        if dataset_name not in self.dataset_mapping:
            raise ValueError(f"Dataset {dataset_name} not found")
            
        # 检查是否已经在同步
        if dataset_name in sync_locks and sync_locks[dataset_name].locked():
            return {"status": "running", "message": "Sync already in progress"}
            
        # 创建锁
        if dataset_name not in sync_locks:
            sync_locks[dataset_name] = threading.Lock()
            
        # 如果已有锁但没有被锁定，开始新的同步
        if not sync_locks[dataset_name].locked():
            thread = threading.Thread(
                target=self._sync_vectors,
                args=(dataset_name,)
            )
            thread.start()
            return {"status": "started", "message": "Vector sync started"}
        
    def _sync_vectors(self, dataset_name):
        """实际的向量同步处理"""
        with sync_locks[dataset_name]:
            try:
                # 初始化进度
                sync_progress[dataset_name] = 0
                
                # 准备文件路径
                faiss_index_path = f"faiss_index_{dataset_name}.faiss"
                sql_url = f"sqlite:///{dataset_name}_document_store.db"
                dataset_path = self.dataset_mapping[dataset_name]
                
                # 加载已有的FAISSDocumentStore
                doc_store = FAISSDocumentStore.load(
                    index_path=faiss_index_path
                )
                # 设置数据库连接信息
                doc_store.sql_url = sql_url
                
                # 获取已有的文档数量
                existing_count = doc_store.get_document_count()
                
                # 读取完整数据集
                df = pd.read_json(dataset_path)
                total_docs = len(df)
                
                # 如果已经同步完成，直接返回
                if existing_count >= total_docs:
                    sync_progress[dataset_name] = 100
                    return
                
                # 准备新文档
                documents = []
                seen_content = set()
                
                # 获取已有文档的内容以避免重复
                existing_docs = doc_store.get_all_documents()
                for doc in existing_docs:
                    seen_content.add(doc.content)
                
                # 处理剩余文档
                start_idx = existing_count
                for idx, row in tqdm(df[start_idx:].iterrows(), total=len(df[start_idx:])):
                    try:
                        q = row.get('input', row.get('Question', ''))
                        a = row.get('output', row.get('Response', ''))
                        if q and a:
                            content = f"问题：{q} 答案：{a}"
                            if content not in seen_content:
                                seen_content.add(content)
                                documents.append(
                                    Document(
                                        content=content,
                                        meta={
                                            "type": "qa_pair",
                                            "question": q,
                                            "answer_length": len(a),
                                            "id": f"{dataset_name}_{len(documents) + existing_count}"
                                        }
                                    )
                                )
                        
                        # 更新进度
                        progress = (idx + 1) / total_docs * 100
                        sync_progress[dataset_name] = round(progress, 2)
                        
                    except Exception as e:
                        logger.error(f"Error processing row {idx}: {e}")
                        continue
                
                if documents:
                    # 创建检索器
                    retriever = EmbeddingRetriever(
                        document_store=doc_store,
                        embedding_model="sentence-transformersall-MiniLM-L6-v2",
                        top_k=5,
                        batch_size=32
                    )
                    
                    # 批量处理文档
                    batch_size = 500
                    for i in range(0, len(documents), batch_size):
                        batch = documents[i:i + batch_size]
                        doc_store.write_documents(batch)
                        doc_store.update_embeddings(retriever)
                        
                        # 更新进度
                        progress = (i + len(batch)) / len(documents) * 100
                        sync_progress[dataset_name] = round(progress, 2)
                    
                    # 保存更新后的索引
                    doc_store.save(faiss_index_path)
                
                # 完成同步
                sync_progress[dataset_name] = 100
                logger.info(f"Vector sync completed for dataset {dataset_name}")
                
            except Exception as e:
                logger.error(f"Error during vector sync for dataset {dataset_name}: {e}")
                sync_progress[dataset_name] = -1  # 表示同步失败
            finally:
                if dataset_name in sync_locks:
                    sync_locks[dataset_name] = threading.Lock()  # 重置锁

# 创建全局管理器实例
vector_sync_manager = VectorSyncManager()

# 对外暴露的方法
def start_vector_sync(dataset_name):
    """开始向量同步"""
    return vector_sync_manager.start_sync(dataset_name)

def get_sync_progress(dataset_name):
    """获取同步进度"""
    progress = vector_sync_manager.get_sync_progress(dataset_name)
    return {
        "dataset": dataset_name,
        "progress": progress,
        "status": "failed" if progress == -1 else 
                  "completed" if progress == 100 else 
                  "in_progress"
    } 