# -*- coding: utf-8 -*-
"""
将已存入数据库的文件重新embedding，存入es
"""
import sys
import os

# 添加项目根目录到Python路径
current_dir = os.path.dirname(os.path.abspath(__file__))
project_root = os.path.dirname(current_dir)
sys.path.append(project_root)

# 本地环境配置
os.environ["SQLALCHEMY_DATABASE_URI"] = "mysql+pymysql://root:CowcMjJIDOahClHdW6@39.106.94.192:3306/yoka-tang"
# os.environ['EMBED_MODEL_PATH'] = "/Users/wzq/Desktop/portAI/models/bge-m3"
os.environ['EMBED_MODEL_PATH'] = "/home/dev/models/bge-m3"

import re
import argparse
import signal
from uuid import UUID, uuid4
from typing import Dict, List, Optional, Tuple, Union

from loguru import logger
from langchain_core.documents import Document
from langchain_elasticsearch import ElasticsearchStore
from elasticsearch import Elasticsearch

from db.models.files import QAFile as File
from db.repository.qa_file import db_get_qa_files, db_get_qa_file_by_id, db_update_qa_file_scope_gb, db_add_qa_attach, db_list_qa_attaches_by_file_id, db_update_qa_attach
from document_loader.pdf_loader_kb import GbPdfLoader, CommonPdfLoader
from service.embeddings import embeddings
from service.es_service.es_conn import ESConnection
from utils.email_utils import send_email
from utils.file_utils import download_attachment
from utils.documents_utils import create_documents_from_pdf


def get_files(file_id_values: Optional[List[str]] = None) -> List[File]:
    """获取文件列表。

    Args:
        file_id_values: None 或 若干文件ID（支持包含逗号的混合形式）

    Returns:
        文件对象列表
    """
    if not file_id_values:
        return db_get_qa_files()

    # 将可能的多次传参与逗号分隔混合解析成扁平列表
    normalized_ids: List[str] = []
    for v in file_id_values:
        if not v:
            continue
        normalized_ids.extend([p.strip() for p in v.split(',') if p.strip()])

    files: List[File] = []
    for file_id_str in normalized_ids:
        try:
            file_id = UUID(file_id_str)
            file_obj = db_get_qa_file_by_id(file_id)
            if file_obj:
                logger.info(f"找到文件: {file_obj.name}")
                files.append(file_obj)
            else:
                logger.warning(f"未找到ID为 {file_id_str} 的文件")
        except ValueError as e:
            logger.error(f"无效的文件ID格式: {file_id_str}, 错误: {e}")
        except Exception as e:
            logger.error(f"查找文件时发生错误: {e}")

    return files


def send_error_notification(error: Exception, file_info: Dict):
    """发送错误通知邮件"""
    try:
        subject = f"文件处理失败: {file_info.get('file_name', '未知文件')}"

        # 构建详细的错误信息
        error_details = [
            f"错误类型: {type(error).__name__}",
            f"错误信息: {str(error)}",
            "\n文件信息:",
            f"文件名: {file_info.get('file_name', '未知')}",
            f"文件路径: {file_info.get('file_path', '未知')}",
            f"文件哈希: {file_info.get('file_hash', '未知')}",
        ]

        # 添加额外的上下文信息
        if hasattr(error, 'file_info'):
            error_details.append("\n错误上下文:")
            for k, v in error.file_info.items():
                error_details.append(f"{k}: {v}")

        message = "\n".join(error_details)
        send_email(subject, message)
    except Exception as e:
        logger.error(f"Failed to send error notification: {str(e)}")


def delete_es_docs_by_file_id(es_client, es_index_name, file_id):
    """删除ES中指定file_id的所有文档"""
    try:
        if isinstance(file_id, UUID):
            file_id = str(file_id)
        query = {
            "query": {
                "term": {"metadata.file_id.keyword": file_id}
            }
        }
        conn = ESConnection()
        conn.conn(es_client)
        conn.delete_doc_by_query(query, idxnm=es_index_name)
        logger.info(f"Successfully deleted docs for file_id={file_id} from index={es_index_name}")
    except Exception as e:
        logger.error(f"Failed to delete ES docs for file_id={file_id} from index={es_index_name}: {e}")
        raise


def process_item(file_obj: File, es_client: Elasticsearch, gb_vector_store: ElasticsearchStore,
                 emergency_vector_store: ElasticsearchStore, *, reparse: bool = False) -> bool:
    """处理单个文件：重新解析并更新ES"""
    try:
        logger.info(f"Processing {file_obj.name} started")

        # 检查文件是否存在
        if not os.path.exists(file_obj.location):
            logger.error(f"File not found: {file_obj.location}")
            return False

        # 根据是否有gb_num选择对应的Loader和索引
        if file_obj.gb_num:
            logger.info(f"Using GbPdfLoader for file with GB number: {file_obj.gb_num}")
            pdf_loader = GbPdfLoader(file_obj.location)
            vector_store = gb_vector_store
            es_index_name = "yoka_kb_index_tang"
        else:
            logger.info("Using CommonPdfLoader for file without GB number")
            pdf_loader = CommonPdfLoader(file_obj.location)
            vector_store = emergency_vector_store
            es_index_name = "yoka_kb_index_tang_emergency"

        # 重新解析PDF
        if reparse:
            pdf_metadata = pdf_loader.load(delete_old_file=True)
        else:
            pdf_metadata = pdf_loader.load()

        # 删除ES中对应file_id的文档
        try:
            delete_es_docs_by_file_id(es_client, es_index_name, file_obj.id)
            logger.info(f"Deleted existing ES docs for file_id: {file_obj.id} from {es_index_name}")
        except Exception as e:
            logger.warning(f"Failed to delete existing ES docs: {e}")

        # 处理附件信息（补齐本地与数据库、必要时下载）
        processed_attachments = []
        attachments_meta = pdf_metadata.get("attachments", [])
        file_dir = os.path.dirname(file_obj.location) if file_obj.location else ""

        # 1) 遍历元数据中的附件，确保本地与数据库一致
        for attachment in attachments_meta:
            atype = attachment.get("type", "")
            if atype not in ("image", "image_in_table"):
                continue

            image_url = attachment.get("image_url", "")
            local_save_path = attachment.get("local_save_path", "")
            origin_name = os.path.basename(image_url) if image_url else attachment.get("id", "")

            # 如果是图片类附件
            if atype in ("image", "image_in_table"):
                # A. 本地有、数据库没有 → 补写数据库
                if local_save_path and os.path.exists(local_save_path):
                    file_size = os.path.getsize(local_save_path)
                    try:
                        db_add_qa_attach(origin_name, file_size, local_save_path, file_obj.id, image_url)
                    except Exception as e:
                        logger.warning(f"补写数据库附件失败: {origin_name}, err={e}")
                else:
                    # B. 数据库有location、本地没有 → 尝试从数据库location恢复（这里直接依赖db_add_attach内部去重，或后续使用修复脚本）
                    # C. 两者都没有 → 从 image_url 下载
                    if image_url and file_dir:
                        status, img_path = download_attachment(image_url, file_dir)
                        if status and img_path:
                            local_save_path = img_path
                            file_size = os.path.getsize(img_path)
                            try:
                                db_add_qa_attach(origin_name, file_size, local_save_path, file_obj.id, image_url)
                            except Exception as e:
                                logger.warning(f"下载后写入数据库失败: {origin_name}, err={e}")
                # 组装processed_attachments条目供后续文档构建
                built = {**attachment}
                if local_save_path:
                    built["local_save_path"] = local_save_path
                    built["id"] = os.path.basename(local_save_path)
                processed_attachments.append(built)

        # 2) 遍历数据库中的附件，补充缺失下载链接与本地路径
        try:
            db_attaches = db_list_qa_attaches_by_file_id(file_obj.id)
            for a in db_attaches:
                new_location = None
                new_download = None
                # 数据库有记录但本地文件不存在，且有download_link → 下载恢复
                if a.location and not os.path.exists(a.location) and a.download_link:
                    status, img_path = download_attachment(a.download_link, file_dir)
                    if status and img_path:
                        new_location = img_path
                # 数据库download_link为空，尝试从元数据匹配
                if not getattr(a, 'download_link', None):
                    for m in attachments_meta:
                        m_url = m.get("image_url", "")
                        if not m_url:
                            continue
                        m_name = os.path.basename(m_url)
                        if m_name and (m_name == a.origin_name or m_name == os.path.basename(a.location or "")):
                            new_download = m_url
                            break
                if new_location is not None or new_download is not None:
                    db_update_qa_attach(a.id, location=new_location or a.location, download_link=new_download or a.download_link)
        except Exception as e:
            logger.warning(f"遍历数据库附件并修复失败: {e}")

        # 创建新的文档对象
        documents: List[Document] = create_documents_from_pdf(pdf_metadata, file_obj, processed_attachments)

        # 插入新的文档到ES
        if documents:
            try:
                uuids = [str(uuid4()) for _ in range(len(documents))]
                result = vector_store.add_documents(documents=documents, ids=uuids)
                logger.info(f"Added {len(documents)} documents to {es_index_name} for file: {file_obj.name}")
                return True
            except Exception as e:
                logger.error(f"Failed to add documents to {es_index_name}: {e}")
                return False
        else:
            logger.warning(f"No documents created for file: {file_obj.name}")
            return False

    except Exception as e:
        logger.error(f"Error processing file {file_obj.name}: {str(e)}")
        return False


def main(es_client: Elasticsearch, gb_vector_store: ElasticsearchStore, emergency_vector_store: ElasticsearchStore,
         file_ids: Optional[List[str]] = None, *, reparse: bool = False):
    files: List[File] = get_files(file_ids)
    total_files = len(files)
    success_count = 0
    failed_files = []

    if file_ids:
        logger.info(f"指定文件ID列表: {file_ids}")
        if total_files == 0:
            logger.error(f"未找到指定的文件，请检查文件ID是否正确")
            return
        logger.info(f"找到 {total_files} 个文件需要处理")
    else:
        logger.info(f"未指定文件ID，将处理所有文件")
        logger.info(f"Found {total_files} files to process")

    for idx, file_obj in enumerate(files, 1):
        # 在开始处理下一个文件前检查是否收到停止信号
        if STOP_REQUESTED:
            logger.warning("Stop requested by signal. Exiting after finishing previous file.")
            break
        logger.info(f"Processing file {idx}/{total_files}: {file_obj.name}")
        try:
            if process_item(file_obj, es_client, gb_vector_store, emergency_vector_store, reparse=reparse):
                success_count += 1
            else:
                failed_files.append(file_obj.name)
        except Exception as e:
            logger.error(f"Unexpected error processing file {file_obj.name}: {str(e)}")
            failed_files.append(file_obj.name)
            send_error_notification(e, {"file_name": file_obj.name, "file_path": file_obj.location})

    # 发送最终汇总报告
    if file_ids:
        summary = (
            f"单文件处理完成报告\n\n"
            f"文件ID: {', '.join(file_ids)}\n"
            f"总文件数: {total_files}\n"
            f"成功处理: {success_count}\n"
            f"失败处理: {total_files - success_count}\n"
        )
        email_subject = f"单/多文件PDF处理完成报告"
    else:
        summary = (
            f"批量处理完成报告\n\n"
            f"总文件数: {total_files}\n"
            f"成功处理: {success_count}\n"
            f"失败处理: {total_files - success_count}\n"
        )
        email_subject = "批量PDF处理完成报告"

    if failed_files:
        summary += "\n失败文件列表:\n" + "\n".join(failed_files)

    send_email(email_subject, summary)
    logger.info(f"Processing completed. Success: {success_count}, Failed: {total_files - success_count}")


def re_match_gb(files):
    for file in files:
        file_name = file.name
        gb_num = file.gb_num
        print(f"处理文件: {file_name}")

        if not gb_num:
            m = re.search(r'GB[/T\s]*\s*\d+(?:\.\d+)*-\d{4}', file_name)
            if m:
                gb_number = m.group()
                print(f"  找到GB编号: {gb_number}")
                try:
                    db_update_qa_file_scope_gb(file.id, gb_num=gb_number)
                    print(f"  更新成功: {file.id}")
                except Exception as e:
                    print(f"  更新失败: {e}")
            else:
                print(f"  未找到GB编号")
        else:
            print(f"  已有GB编号: {gb_num}")


if __name__ == '__main__':
    # 安装优雅退出的信号处理：第一次 Ctrl+C / SIGTERM 等待当前任务完成后退出；第二次强制退出
    STOP_REQUESTED = False


    def _signal_handler(sig, frame):
        global STOP_REQUESTED
        if not STOP_REQUESTED:
            STOP_REQUESTED = True
            logger.warning(f"Received signal {sig}. Will stop after current task finishes.")
        else:
            logger.error("Second signal received. Forcing immediate exit.")
            os._exit(130)

    try:
        signal.signal(signal.SIGINT, _signal_handler)
        signal.signal(signal.SIGTERM, _signal_handler)
    except Exception:
        # 某些平台可能不支持全部信号
        pass
    logger.level("INFO")

    parser = argparse.ArgumentParser(description="重新解析已存在的PDF文件并更新ES索引")
    # 文件参数：支持多值与逗号分隔
    parser.add_argument("--file-id", dest="file_id", action="append", help="指定要重新解析的文件ID（可多次传参或逗号分隔；为空则处理所有文件）")
    # 强制解析参数（与 upload_files 对齐）
    parser.add_argument("--reparse", dest="reparse", action="store_true", help="是否强制重新解析（会先删除ES中的旧数据）")

    # ES参数
    parser.add_argument("--es-url", dest="es_url", type=str, default="http://localhost:9200", help="Elasticsearch URL")
    parser.add_argument("--es-user", dest="es_user", type=str, default="elastic", help="Elasticsearch用户名")
    parser.add_argument("--es-password", dest="es_password", type=str, default="changeme", help="Elasticsearch密码")

    args = parser.parse_args()

    try:
        # 创建GB索引的vector store
        gb_vector_store = ElasticsearchStore(
            "yoka_kb_index_tang",
            embedding=embeddings,
            es_url=args.es_url,
            es_user=args.es_user,
            es_password=args.es_password
        )

        # 创建应急索引的vector store
        emergency_vector_store = ElasticsearchStore(
            "yoka_kb_index_tang_emergency",
            embedding=embeddings,
            es_url=args.es_url,
            es_user=args.es_user,
            es_password=args.es_password
        )

        es_client = Elasticsearch(args.es_url, basic_auth=(args.es_user, args.es_password))

        main(es_client, gb_vector_store, emergency_vector_store, args.file_id, reparse=args.reparse)
    except Exception as e:
        logger.error(f"Application failed: {str(e)}")
        send_error_notification(e, {"error": "全局应用程序错误"})
        sys.exit(1)

# 使用示例：
# 1. 处理所有文件：
#    nohup python re-embed-exist-data.py > re-embedding.log 2>&1 &
#
# 2. 处理指定文件（通过文件ID）：
#    nohup python re-embed-exist-data.py --file-id "your-file-uuid" > re-embedding.log 2>&1 &
#
# 3. 处理指定文件并指定ES连接：
#    nohup python re-embed-exist-data.py --file-id "your-file-uuid" --es-url "http://your-es:9200" --es-user "elastic" --es-password "your-password" > re-embedding.log 2>&1 &
#
# 注意：file-id参数为可选，如果不指定则处理所有文件
