# -*- coding: utf-8 -*-
"""
批量上传和解析文件脚本
功能：模拟question_answer_router中的upload_file接口，批量处理文件夹中的所有文件
"""
import sys
import os
import uuid
import asyncio
import aiofiles
import traceback
from typing import List, Dict, Optional
from pathlib import Path

from langchain_elasticsearch import ElasticsearchStore
from langchain_core.documents import Document
from elasticsearch import Elasticsearch

# 添加项目根目录到Python路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

os.environ['EMBED_MODEL_PATH'] = "/home/dev/models/bge-m3"

from loguru import logger

from configs import UPLOAD_FILE_DIR
from configs.kb_config import ES_URL, ES_USER, ES_PASSWORD, ES_KB_INDEX_NAME
from utils.file_utils import convert_docx_to_pdf
from utils.hash_utils import cal_md5
from db.repository.qa_file import (
    db_file_add_asso,
    db_get_qa_file_by_name, 
    db_add_qa_file, 
    db_update_qa_file_delete_state
)
from db.repository.attributes import db_get_all_knowledge_types, db_get_element_classification_by_name

from utils.documents_utils import create_documents_from_pdf
from controller.files.utils import process_attachments
from db.repository.qa_file import db_update_qa_file_scope_gb
from service.embeddings import embeddings
from service.es_service import ESConnection


def add_documents_to_es(store, documents: List[Document]):
    uuids = [str(uuid.uuid4()) for _ in range(len(documents))]
    result = store.add_documents(documents=documents, ids=uuids)
    return result


def delete_es_docs_by_file_id(es_client: Elasticsearch, file_id: str, es_index_names: List[str]):
    """
    删除ES中指定file_id的所有文档
    
    Args:
        es_client: Elasticsearch客户端
        file_id: 文件ID（字符串格式）
        es_index_names: ES索引名称列表
    """
    try:
        if isinstance(file_id, uuid.UUID):
            file_id = str(file_id)
            
        query = {
            "query": {
                "term": {"metadata.file_id.keyword": file_id}
            }
        }
        
        conn = ESConnection()
        conn.conn(es_client)
        
        # 删除每个索引中的记录
        for index_name in es_index_names:
            try:
                conn.delete_doc_by_query(query, idxnm=index_name)
                logger.info(f"已删除文件 {file_id} 在ES索引 {index_name} 中的记录")
            except Exception as e:
                logger.warning(f"删除ES索引 {index_name} 中的文件记录时出错: {str(e)}")
                continue
                
    except Exception as e:
        logger.error(f"删除ES文件记录失败: {str(e)}")


def check_es_file_exists(es_client: Elasticsearch, file_id: str, es_index_names: List[str]) -> bool:
    """
    检查ES中是否存在指定file_id的记录
    
    Args:
        es_client: Elasticsearch客户端
        file_id: 文件ID（字符串格式）
        es_index_names: ES索引名称列表
        
    Returns:
        bool: 如果任何一个索引中存在该文件的记录，返回True；否则返回False
    """
    try:
        if isinstance(file_id, uuid.UUID):
            file_id = str(file_id)
            
        query_body = {
            "query": {
                "term": {
                    "metadata.file_id.keyword": file_id
                }
            },
            "size": 1  # 只需要检查是否存在，不需要返回数据
        }
        
        conn = ESConnection()
        conn.conn(es_client)
        
        # 检查每个索引
        for index_name in es_index_names:
            try:
                result = conn.search(q=query_body, idxnm=index_name)
                if result and 'hits' in result:
                    hits = result['hits']
                    # 处理ES 6.x和7.x不同的total格式
                    total = hits.get('total', {})
                    if isinstance(total, dict):
                        total_value = total.get('value', 0)
                    else:
                        total_value = total if isinstance(total, int) else 0
                    
                    if total_value > 0 or len(hits.get('hits', [])) > 0:
                        logger.debug(f"文件 {file_id} 在ES索引 {index_name} 中存在 ({total_value} 条记录)")
                        return True
            except Exception as e:
                logger.warning(f"检查ES索引 {index_name} 时出错: {str(e)}")
                continue
                
        logger.debug(f"文件 {file_id} 在所有ES索引中都不存在")
        return False
        
    except Exception as e:
        logger.error(f"检查ES文件记录失败: {str(e)}")
        return False


def find_attribute_by_name(attributes, name: str):
    """根据名称查找属性，返回属性ID"""
    for attr in attributes:
        if attr.name == name:
            return attr.id
    return None


def get_or_create_element_classification(category: str):
    """根据 category 名称获取或创建要素分类"""
    if not category:
        return None
    
    # 先尝试查找
    classification = db_get_element_classification_by_name(category)
    if classification:
        return classification.id
    return None


async def parse_kb_pdf_background(kb_vector_store, file_obj, file_type, pdf_save_path: str, is_delete: bool):
    if file_type == "gb":
        from document_loader import GbPdfLoaderV2
        pdf_loader = GbPdfLoaderV2(pdf_save_path, delete_old_file=True)
    else:
        from document_loader import CommonPdfLoader
        pdf_loader = CommonPdfLoader(pdf_save_path, delete_old_file=True)
    pdf_metadata = pdf_loader.load()
    file_name = pdf_metadata.get("filename", None)
    scope = pdf_metadata.get("scope", None)
    category = pdf_metadata.get("category", None)
    gb_num = pdf_metadata.get("gb_number", None)

    knowledge_types = db_get_all_knowledge_types()
    knowledge_type_id = find_attribute_by_name(knowledge_types, "标准")
    db_file_add_asso(file_obj.id, "knowledge_type", [knowledge_type_id])

    if scope or gb_num or file_name:
        file_obj = db_update_qa_file_scope_gb(file_obj.id, file_name, gb_num, scope)
        if category:
            classification_id = get_or_create_element_classification(category)
            if classification_id:
                db_file_add_asso(file_obj.id, "element_classification", [classification_id])

    save_dir = os.path.dirname(pdf_save_path)
    processed_attachments = process_attachments(pdf_metadata.get("attachments", []), file_obj, save_dir)

    # 转化格式到document对象
    documents = create_documents_from_pdf(pdf_metadata, file_obj, processed_attachments)

    # 保存到ES
    logger.info(f"Adding {len(documents)} documents from {file_obj.name} to ES")
    insert_ids = add_documents_to_es(kb_vector_store, documents)
    logger.info(f"Added {len(insert_ids)} documents to ES")
    logger.info(f"Finished parsing gb pdf: {pdf_save_path}")


class BatchFileUploader:
    """批量文件上传处理器"""
    
    def __init__(self, 
                 es_index: str,
                 es_url: str = ES_URL,
                 es_user: str = ES_USER, 
                 es_password: str = ES_PASSWORD,
                 upload_dir: str = UPLOAD_FILE_DIR,
                 es_index_names: List[str] = None):
        """
        初始化批量上传器
        
        Args:
            es_url: Elasticsearch URL
            es_user: Elasticsearch 用户名
            es_password: Elasticsearch 密码
            es_index: 主索引名称（用于ElasticsearchStore）
            es_index_names: 需要检查的ES索引名称列表（用于查重）
            upload_dir: 文件上传目录
        """
        self.upload_dir = upload_dir
        
        # 支持的文件类型
        self.allowed_extensions = {'pdf', 'doc', 'docx'}

        self.kb_vector_store = ElasticsearchStore(
            es_index, embedding=embeddings, es_url=es_url, es_user=es_user, es_password=es_password
        )
        
        # ES客户端用于查重
        self.es_client = self.kb_vector_store.client
        # 需要检查的ES索引列表（默认只检查主索引）
        self.es_index_names = es_index_names if es_index_names is not None else [es_index]
        
        # 统计信息
        self.total_files = 0
        self.success_count = 0
        self.failed_files = []
        
        logger.info(f"初始化批量上传器:")
        logger.info(f"  查重索引: {self.es_index_names}")
        logger.info(f"  上传目录: {upload_dir}")

    def get_files_from_directory(self, directory_path: str) -> List[tuple]:
        """
        获取目录下所有支持的文件
        
        Args:
            directory_path: 目录路径
            
        Returns:
            List[tuple]: [(文件名, 文件路径), ...]
        """
        files = []
        directory = Path(directory_path)
        
        if not directory.exists():
            logger.error(f"目录不存在: {directory_path}")
            return files
            
        if not directory.is_dir():
            logger.error(f"路径不是目录: {directory_path}")
            return files
            
        logger.info(f"扫描目录: {directory_path}")
        
        # 递归遍历目录
        for file_path in directory.rglob('*'):
            if file_path.is_file() and not file_path.name.startswith('.'):
                # 检查文件扩展名
                suffix = file_path.suffix.lower().lstrip('.')
                if suffix in self.allowed_extensions:
                    files.append((file_path.name, str(file_path)))
                    
        logger.info(f"找到 {len(files)} 个支持的文件")
        return files

    async def process_single_file(self, 
                                 file_name: str, 
                                 file_path: str,
                                 file_type: str = "common") -> bool:
        """
        处理单个文件，模拟upload_file接口的核心逻辑
        
        Args:
            file_name: 文件名
            file_path: 文件路径
            file_type: 文件类型 ("gb" 或 "common")
            
        Returns:
            bool: 处理是否成功
        """
        try:
            logger.info(f"开始处理文件: {file_name}")
            
            # 1. 验证文件类型
            if '.' in file_name:
                file_suffix = file_name.rsplit('.', 1)[-1].lower()
            else:
                file_suffix = ""
                
            if file_suffix not in self.allowed_extensions:
                logger.warning(f"不支持的文件类型: {file_suffix}，跳过文件: {file_name}")
                return False
                
            # 2. 读取文件内容并计算MD5
            try:
                async with aiofiles.open(file_path, "rb") as f:
                    file_content = await f.read()
                    
                if not file_content:
                    logger.error(f"文件内容为空: {file_name}")
                    return False
                    
                file_md5 = cal_md5(file_content)
                file_hash_name = f"{file_md5}.{file_suffix}"
                file_size = len(file_content)
                
            except Exception as e:
                logger.error(f"读取文件失败 {file_name}: {str(e)}")
                return False
                
            # 3. 三重查重：检查MySQL、文件路径、ES记录
            # 只要有一个不存在，就需要重新处理
            file_obj = db_get_qa_file_by_name(file_hash_name)
            
            # 检查MySQL记录
            mysql_exists = file_obj is not None
            
            # 检查文件路径是否存在
            dir_path = os.path.join(self.upload_dir, file_md5)
            file_hash_path = os.path.join(dir_path, file_hash_name)
            file_path_exists = os.path.exists(file_hash_path)
            
            # 检查ES记录（如果MySQL中存在记录）
            es_exists = False
            if file_obj:
                es_exists = check_es_file_exists(
                    self.es_client, 
                    str(file_obj.id), 
                    self.es_index_names
                )
            else:
                # 如果MySQL中不存在，ES肯定也不存在，跳过ES检查
                es_exists = False
            
            # 三重检查结果
            if mysql_exists and file_path_exists and es_exists:
                logger.info(f"文件已完整存在（MySQL✓ 文件路径✓ ES✓），跳过: {file_name}")
                return False
            
            # 任何一个不存在，都需要重新处理
            missing_parts = []
            if not mysql_exists:
                missing_parts.append("MySQL记录")
            if not file_path_exists:
                missing_parts.append("文件路径")
            if file_obj and not es_exists:
                missing_parts.append("ES记录")
            logger.info(f"文件不完整（缺失: {', '.join(missing_parts)}），需要重新处理: {file_name}")
            
            # 如果MySQL记录存在但ES不存在或文件路径不存在，需要清理ES中的旧数据（避免重复）
            if file_obj and (not es_exists or not file_path_exists):
                try:
                    logger.info(f"清理文件 {file_name} (ID: {file_obj.id}) 在ES中的旧数据")
                    delete_es_docs_by_file_id(self.es_client, str(file_obj.id), self.es_index_names)
                except Exception as e:
                    logger.warning(f"清理ES旧数据失败（将继续处理）: {str(e)}")
            
            # 4. 创建保存目录并保存文件（如果文件不存在）
            dir_path = os.path.join(self.upload_dir, file_md5)
            if not os.path.exists(dir_path):
                os.makedirs(dir_path)
                
            file_hash_path = os.path.join(dir_path, file_hash_name)
            
            # 如果文件路径不存在，需要保存文件
            if not file_path_exists:
                try:
                    async with aiofiles.open(file_hash_path, "wb") as f:
                        await f.write(file_content)
                    logger.debug(f"文件已保存: {file_hash_path}")
                except IOError as e:
                    logger.error(f"文件保存失败 {file_name}: {str(e)}")
                    return False
            else:
                logger.debug(f"文件路径已存在，跳过保存: {file_hash_path}")
                
            # 5. 保存文件信息到数据库
            try:
                file_obj, is_delete = db_add_qa_file(1, file_hash_name, file_name, file_hash_path, file_suffix, file_size)
            except Exception as e:
                logger.error(f"保存文件信息到数据库失败 {file_name}: {str(e)}")
                return False
                
            # 6. 处理Word文件转换为PDF
            if file_suffix.lower() in ['doc', 'docx']:
                pdf_path_in_target = os.path.join(dir_path, f"{file_md5}.pdf")
                if not os.path.exists(pdf_path_in_target):
                    success = convert_docx_to_pdf(file_hash_path, pdf_path_in_target)
                    if success:
                        file_pdf_path = pdf_path_in_target
                        logger.debug(f"Word转PDF成功: {pdf_path_in_target}")
                    else:
                        logger.error(f"Word转PDF失败: {file_hash_path}")
                        return False
                else:
                    file_pdf_path = pdf_path_in_target
                    logger.debug(f"PDF已存在，使用: {pdf_path_in_target}")
            else:
                file_pdf_path = file_hash_path
                
            # 8. 启动后台解析任务
            try:
                await parse_kb_pdf_background(self.kb_vector_store, file_obj, file_type, file_pdf_path, is_delete)
                logger.info(f"文件处理完成: {file_name}, 文件ID: {file_obj.id}")
                return True
                
            except Exception as e:
                logger.error(f"后台解析任务失败 {file_name}: {str(e)}")
                # 清理数据库记录
                try:
                    db_update_qa_file_delete_state(file_obj.id, delete=True)
                except Exception as cleanup_error:
                    logger.error(f"清理文件状态失败: {cleanup_error}")
                return False
                
        except Exception as e:
            logger.error(f"处理文件出错 {file_name}: {e}\n{traceback.format_exc()}")
            return False

    async def batch_upload_files(self, 
                                directory_path: str,
                                file_type: str = "gb") -> Dict:
        """
        批量上传文件
        
        Args:
            directory_path: 文件夹路径
            file_type: 文件类型 ("gb" 或 "common")
            
        Returns:
            Dict: 处理结果统计
        """
        logger.info(f"开始批量上传文件，目录: {directory_path}")
        
        # 获取所有文件
        files = self.get_files_from_directory(directory_path)
        self.total_files = len(files)
        
        if self.total_files == 0:
            logger.warning("未找到任何支持的文件")
            return {
                "total": 0,
                "success": 0,
                "failed": 0,
                "failed_files": []
            }
            
        logger.info(f"找到 {self.total_files} 个文件待处理")
        
        # 处理每个文件
        for idx, (file_name, file_path) in enumerate(files, 1):
            logger.info(f"处理文件 {idx}/{self.total_files}: {file_name}")
            
            try:
                success = await self.process_single_file(
                    file_name=file_name,
                    file_path=file_path,
                    file_type=file_type,
                )
                
                if success:
                    self.success_count += 1
                    logger.info(f"✓ 文件处理成功: {file_name}")
                else:
                    self.failed_files.append(file_name)
                    logger.error(f"✗ 文件处理失败: {file_name}")
                    
            except Exception as e:
                self.failed_files.append(file_name)
                logger.error(f"✗ 处理文件异常 {file_name}: {str(e)}")
                
        # 输出统计结果
        failed_count = self.total_files - self.success_count
        logger.info(f"\n=== 批量上传完成 ===")
        logger.info(f"总文件数: {self.total_files}")
        logger.info(f"成功处理: {self.success_count}")
        logger.info(f"失败处理: {failed_count}")
        
        if self.failed_files:
            logger.info(f"失败文件列表:")
            for failed_file in self.failed_files:
                logger.info(f"  - {failed_file}")
                
        return {
            "total": self.total_files,
            "success": self.success_count,
            "failed": failed_count,
            "failed_files": self.failed_files
        }


async def main():
    """主函数"""
    import argparse
    
    parser = argparse.ArgumentParser(description="批量上传和解析文件脚本")
    parser.add_argument("directory", help="要处理的文件夹路径")
    parser.add_argument("--file-type", choices=["gb", "common"], default="common", help="文件类型: gb(国标) 或 common(通用)")
    
    args = parser.parse_args()

    # 创建批量上传器
    uploader = BatchFileUploader(
        es_url=ES_URL,
        es_user=ES_USER,
        es_password=ES_PASSWORD,
        es_index=ES_KB_INDEX_NAME,
        upload_dir=UPLOAD_FILE_DIR
    )

    # 执行批量上传
    result = await uploader.batch_upload_files(
        directory_path=args.directory,
        file_type=args.file_type
    )

    # 输出最终结果
    print(f"\n处理完成!")
    print(f"总文件数: {result['total']}")
    print(f"成功处理: {result['success']}")
    print(f"失败处理: {result['failed']}")
    
    if result['failed_files']:
        print(f"失败文件:")
        for failed_file in result['failed_files']:
            print(f"  - {failed_file}")


if __name__ == "__main__":
    # 设置日志级别
    logger.remove()
    logger.add(sys.stdout, level="INFO", format="<green>{time:YYYY-MM-DD HH:mm:ss}</green> | <level>{level: <8}</level> | <cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>")
    
    # 运行主函数
    asyncio.run(main())


# python scripts/batch_upload_files.py /path/to/your/files --file-type gb

# nohup python scripts/batch_upload_files.py /home/yoka-new/GB汇总 2025-10-28 --file-type gb
# nohup python scripts/batch_upload_files.py /home/yoka-new/JT标准补充 --file-type gb
