# -*- coding: utf-8 -*-
"""
按 V2 解析重处理：扫描指定目录，找到同名的 hash.pdf 与 hash.md 成对文件，
对每一对文件使用 GbPdfLoaderV2 重新解析（基于现有 markdown，必要时会重建中间产物）。

用法示例：
  python scripts/reprocess_existing_data.py \
    --root "/path/to/data" \
    --delete-old

参数说明：
  --root:        根目录，递归扫描文件
  --delete-old:  是否强制重建中间产物（*_cleaned.md、*_chunks.json、*_results.json 等）
"""
import argparse
import json
import os
from pathlib import Path
from typing import List, Tuple

# 兼容直接运行
import sys
PROJECT_ROOT = Path(__file__).resolve().parents[1]
if str(PROJECT_ROOT) not in sys.path:
    sys.path.append(str(PROJECT_ROOT))

from document_loader.pdf_loader_kb import GbPdfLoaderV2  # noqa: E402
from utils.file_utils import download_attachment, to_rel_path  # noqa: E402
from utils.documents_utils import create_documents_from_pdf  # noqa: E402
from service.embeddings import embeddings  # noqa: E402
from db.repository.qa_file import (
    db_get_qa_file_by_name,
    db_list_qa_attaches_by_file_id,
    db_add_qa_attach,
    db_update_qa_attach,
)  # noqa: E402


def _find_pdf_md_pairs(root: Path) -> List[Tuple[Path, Path]]:
    """查找同名的 (hash.pdf, hash.md) 文件对。

    返回列表项为 (pdf_path, md_path)
    """
    md_index: dict[str, Path] = {}
    for md_path in root.rglob('*.md'):
        if md_path.is_file():
            md_index[md_path.stem] = md_path

    pairs: List[Tuple[Path, Path]] = []
    for pdf_path in root.rglob('*.pdf'):
        if not pdf_path.is_file():
            continue
        stem = pdf_path.stem
        md_path = md_index.get(stem)
        if md_path and md_path.exists():
            pairs.append((pdf_path, md_path))

    return pairs


def _looks_like_gb(file_path: Path) -> bool:
    """保留启发式判断，供日志统计使用（不影响处理）。"""
    try:
        name = file_path.stem.upper()
        if 'GB' in name:
            return True
        for parent in file_path.parents:
            if parent == file_path.anchor:
                break
            if 'GB' in parent.name.upper():
                return True
        if file_path.suffix.lower() == '.md':
            with open(file_path, 'r', encoding='utf-8') as f:
                head_lines = []
                for _ in range(30):
                    try:
                        head_lines.append(next(f))
                    except StopIteration:
                        break
                head = ''.join(head_lines)
            if 'GB ' in head or 'GB/T' in head or 'GB\t' in head:
                return True
    except Exception:
        return False
    return False


def _process_pair(pdf_path: Path, md_path: Path, delete_old: bool) -> dict | None:
    """对 (pdf, md) 对执行 GbPdfLoaderV2 解析，基于 md 路径执行。

    返回结果元数据（若失败返回 None）。
    """
    try:
        loader = GbPdfLoaderV2(str(md_path), delete_old_file=delete_old)
        return loader.load(delete_old_file=delete_old)
    except Exception as e:
        print(f"FAIL: {md_path} | {e}")
        return None


def _download_images_from_metadata(pdf_metadata: dict, save_dir: Path, *, file_id=None, max_retries: int = 3) -> List[dict]:
    """根据解析结果中的 attachments 下载图片；若数据库已有记录则跳过下载；
    返回包含 local_save_path 的附件列表（用于后续构建 ES 文档）。
    """
    processed: List[dict] = []
    attachments = pdf_metadata.get("attachments", []) or []
    if not attachments:
        return processed

    # 读取数据库已有附件记录（若提供了 file_id）
    existing_by_link = {}
    existing_by_name = {}
    if file_id is not None:
        try:
            db_atts = db_list_qa_attaches_by_file_id(file_id)
            for a in db_atts:
                if getattr(a, 'download_link', None):
                    existing_by_link[a.download_link] = a
                if getattr(a, 'origin_name', None):
                    existing_by_name[a.origin_name] = a
        except Exception:
            pass

    for att in attachments:
        atype = att.get("type")
        if atype not in ("image", "image_in_table"):
            continue
        image_url = att.get("image_url") or ""
        origin_name = os.path.basename(image_url) if image_url else att.get("id", "")

        new_att = dict(att)

        # 1) 若数据库已有，填充本地路径，跳过下载
        db_hit = None
        if image_url and image_url in existing_by_link:
            db_hit = existing_by_link[image_url]
        elif origin_name and origin_name in existing_by_name:
            db_hit = existing_by_name[origin_name]

        if db_hit:
            loc = getattr(db_hit, 'location', '')
            if loc:
                new_att["local_save_path"] = loc  # 已是相对路径
            new_att["id"] = getattr(db_hit, 'origin_name', origin_name) or origin_name
            processed.append(new_att)
            continue

        # 2) 不存在则下载
        if not image_url:
            continue
        status, img_path = download_attachment(image_url, str(save_dir), max_retries=max_retries)
        if status and img_path:
            rel_path = to_rel_path(img_path)
            new_att["id"] = os.path.basename(img_path)
            new_att["local_save_path"] = rel_path

            # 3) 新下载的图片写入数据库
            if file_id is not None:
                try:
                    file_size = os.path.getsize(img_path)
                    db_add_qa_attach(new_att["id"], file_size, rel_path, file_id, image_url)
                except Exception:
                    pass

            processed.append(new_att)

    return processed


def reprocess_pairs(root: Path, delete_old: bool, *, download_images: bool = False,
                    es_url: str | None = None, es_user: str | None = None,
                    es_password: str | None = None, es_index: str | None = None,
                    max_download_retries: int = 3) -> Tuple[int, int, int]:
    pairs = _find_pdf_md_pairs(root)
    total = len(pairs)
    success = 0
    failed = 0

    vector_store = None
    if es_index and es_url and es_user is not None and es_password is not None:
        # 延迟导入，避免在未使用ES时触发依赖告警
        from langchain_elasticsearch import ElasticsearchStore  # type: ignore
        vector_store = ElasticsearchStore(
            es_index,
            embedding=embeddings,
            es_url=es_url,
            es_user=es_user,
            es_password=es_password,
        )

    for idx, (pdf_path, md_path) in enumerate(pairs, start=1):
        gb_hint = "GB" if _looks_like_gb(md_path) else "COMMON"
        print(f"[{idx}/{total}] Processing ({gb_hint}): {md_path.name}")
        result = _process_pair(pdf_path, md_path, delete_old)
        if isinstance(result, dict) and result.get("success"):
            stats = result.get('chunk_stats', {})
            print(f" -> OK | chunks: {sum(stats.values()) if stats else 'N/A'}")
            processed_attachments: List[dict] = []
            if download_images or vector_store is not None:
                try:
                    # 尝试根据 hash_name 匹配数据库文件
                    file_obj = None
                    try:
                        file_obj = db_get_qa_file_by_name(md_path.stem)
                    except Exception:
                        file_obj = None
                    file_id = getattr(file_obj, 'id', None) if file_obj else None
                    processed_attachments = _download_images_from_metadata(
                        result, md_path.parent, file_id=file_id, max_retries=max_download_retries
                    )
                    print(f"    downloaded images: {len(processed_attachments)}")
                except Exception as e:
                    print(f"    image download failed: {e}")

            if vector_store is not None:
                try:
                    documents = create_documents_from_pdf(result, None, processed_attachments)
                    if documents:
                        ids = [f"{md_path.stem}:{i:04d}" for i in range(1, len(documents) + 1)]
                        vector_store.add_documents(documents=documents, ids=ids)
                        print(f"    indexed to ES: {len(documents)} docs -> {es_index}")
                except Exception as e:
                    print(f"    ES index failed: {e}")
            success += 1
        else:
            print(" -> FAIL")
            failed += 1

    return total, success, failed


def main():
    parser = argparse.ArgumentParser(description="扫描并重处理同名 hash.pdf 与 hash.md 的文件对，使用 GbPdfLoaderV2 解析")
    parser.add_argument('--root', type=str, required=True, help='根目录')
    parser.add_argument('--delete-old', action='store_true', help='强制重建所有中间产物')
    # 可选：下载图片与入ES
    parser.add_argument('--download-images', action='store_true', help='下载解析结果中的图片到同目录')
    parser.add_argument('--max-download-retries', type=int, default=3, help='图片下载最大重试次数')
    args = parser.parse_args()

    root = Path(args.root).expanduser().resolve()
    if not root.exists():
        raise SystemExit(f"根目录不存在: {root}")
    
    es_url = "http://localhost:9200"
    es_user = "elastic"
    es_password = "pZG=M81j2q8f=BGjkkQm"
    es_index = "yoka_kb_index_new"

    total, success, failed = reprocess_pairs(
        root,
        args.delete_old,
        download_images=args.download_images,
        es_url=es_url,
        es_user=es_user,
        es_password=es_password,
        es_index=es_index,
        max_download_retries=args.max_download_retries,
    )
    print("\n统计：")
    print(f"  总计:   {total}")
    print(f"  成功:   {success}")
    print(f"  失败:   {failed}")


if __name__ == '__main__':
    main()


