"""
从 job.json 加载职位数据和白皮书

使用方法:
    # 加载所有职位数据（包括创建职位和加载白皮书）
    python scripts/load_job_data.py

    # 只加载指定职位
    python scripts/load_job_data.py --only ai-agent-python-v1

    # 跳过已存在的职位
    python scripts/load_job_data.py --skip-existing

    # 只创建职位，不加载白皮书
    python scripts/load_job_data.py --positions-only

    # 只加载白皮书，不创建职位
    python scripts/load_job_data.py --whitepapers-only
"""

import argparse
import json
import sys
from pathlib import Path

# 添加项目根目录到 Python 路径
sys.path.insert(0, str(Path(__file__).parent.parent))

from langchain_community.document_loaders import PyPDFLoader, TextLoader, Docx2txtLoader
from langchain_text_splitters import (
    RecursiveCharacterTextSplitter,
    MarkdownHeaderTextSplitter,
)
from langchain_postgres import PGVector

from app.config import get_settings
from app.database import SessionLocal
from app.embeddings import get_embeddings
from app.models import Position

settings = get_settings()


def load_job_config(json_path: str = "data/job.json"):
    """
    从 JSON 文件加载职位配置

    Args:
        json_path: JSON 配置文件路径

    Returns:
        List[dict]: 职位配置列表
    """
    config_path = Path(json_path)
    if not config_path.exists():
        raise FileNotFoundError(f"配置文件不存在: {json_path}")

    with open(config_path, "r", encoding="utf-8") as f:
        configs = json.load(f)

    print(f"✅ 成功加载配置文件: {json_path}")
    print(f"   职位数量: {len(configs)}")

    return configs


def create_position(db, config: dict):
    """
    创建职位记录

    Args:
        db: 数据库会话
        config: 职位配置

    Returns:
        Position or None: 创建的职位对象，如果已存在则返回 None
    """
    # 检查职位是否已存在
    existing = (
        db.query(Position)
        .filter(Position.white_paper_tag == config["position_tag"])
        .first()
    )

    if existing:
        print(f"   ⚠️  职位已存在，跳过: {config['name']} ({config['position_tag']})")
        return None

    # 创建新职位
    position = Position(
        title=config["name"],
        jd_text=config["description"],
        white_paper_tag=config["position_tag"],
    )

    db.add(position)
    db.commit()
    db.refresh(position)

    print(f"   ✅ 职位创建成功: {config['name']} ({config['position_tag']})")
    return position


def load_document(file_path: str):
    """
    加载文档

    Args:
        file_path: 文件路径

    Returns:
        List[Document]: 文档列表
    """
    file_ext = Path(file_path).suffix.lower()

    if file_ext == ".pdf":
        loader = PyPDFLoader(file_path)
    elif file_ext in [".txt", ".md"]:
        loader = TextLoader(file_path, encoding="utf-8")
    elif file_ext == ".docx":
        loader = Docx2txtLoader(file_path)
    else:
        raise ValueError(f"不支持的文件类型: {file_ext}")

    documents = loader.load()
    return documents


def split_documents(documents, chunk_size=1000, chunk_overlap=200, file_path=None):
    """
    智能分割文档

    Args:
        documents: 文档列表
        chunk_size: 分块大小
        chunk_overlap: 分块重叠
        file_path: 文件路径

    Returns:
        List[Document]: 分割后的文档块
    """
    file_ext = Path(file_path).suffix.lower() if file_path else None

    # Markdown 文档使用基于标题的智能分割
    if file_ext == ".md":
        headers_to_split_on = [
            ("#", "h1"),
            ("##", "h2"),
            ("###", "h3"),
            ("####", "h4"),
        ]

        markdown_splitter = MarkdownHeaderTextSplitter(
            headers_to_split_on=headers_to_split_on,
            strip_headers=False,
        )

        all_chunks = []
        for doc in documents:
            md_chunks = markdown_splitter.split_text(doc.page_content)

            text_splitter = RecursiveCharacterTextSplitter(
                chunk_size=chunk_size,
                chunk_overlap=chunk_overlap,
                separators=[
                    "\n\n",
                    "\n",
                    "。",
                    "！",
                    "？",
                    "；",
                    ".",
                    "!",
                    "?",
                    ";",
                    " ",
                    "",
                ],
            )

            for chunk in md_chunks:
                chunk_metadata = {**doc.metadata, **chunk.metadata}

                if len(chunk.page_content) > chunk_size:
                    sub_chunks = text_splitter.create_documents(
                        [chunk.page_content], metadatas=[chunk_metadata]
                    )
                    all_chunks.extend(sub_chunks)
                else:
                    chunk.metadata = chunk_metadata
                    all_chunks.append(chunk)

        return all_chunks
    else:
        # 其他文档使用递归字符分割
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=chunk_size,
            chunk_overlap=chunk_overlap,
            separators=[
                "\n\n",
                "\n",
                "。",
                "！",
                "？",
                "；",
                "，",
                ".",
                "!",
                "?",
                ";",
                ",",
                " ",
                "",
            ],
            keep_separator=True,
        )

        chunks = text_splitter.split_documents(documents)
        return chunks


def add_metadata(chunks, position_tag: str):
    """
    为文档块添加元数据

    Args:
        chunks: 文档块列表
        position_tag: 岗位标签

    Returns:
        List[Document]: 添加元数据后的文档块
    """
    for idx, chunk in enumerate(chunks):
        chunk.metadata["position_tag"] = position_tag
        chunk.metadata["chunk_index"] = idx
        chunk.metadata["chunk_length"] = len(chunk.page_content)
        chunk.metadata["doc_type"] = "whitepaper"

    return chunks


def load_to_pgvector(chunks):
    """
    加载到 pgvector 数据库

    Args:
        chunks: 文档块列表
    """
    embeddings = get_embeddings()

    PGVector.from_documents(
        documents=chunks,
        embedding=embeddings,
        collection_name="knowledge_base",
        connection=settings.get_database_url,
        pre_delete_collection=False,  # 追加模式
    )


def load_whitepaper(config: dict, chunk_size: int = 1000, chunk_overlap: int = 200):
    """
    加载单个白皮书

    Args:
        config: 职位配置
        chunk_size: 分块大小
        chunk_overlap: 分块重叠

    Returns:
        bool: 是否成功加载
    """
    # 检查文件是否存在
    file_path = config.get("file")
    if not file_path or not Path(file_path).exists():
        print(f"   ⚠️  白皮书文件不存在，跳过: {file_path}")
        return False

    try:
        # 1. 加载文档
        documents = load_document(file_path)
        print(f"   ✅ 文档加载成功，文档数量: {len(documents)}")

        # 2. 分割文档
        chunks = split_documents(
            documents,
            chunk_size=chunk_size,
            chunk_overlap=chunk_overlap,
            file_path=file_path,
        )
        print(f"   ✅ 文档分割完成，分块数量: {len(chunks)}")

        # 3. 添加元数据
        chunks = add_metadata(chunks, config["position_tag"])
        print("   ✅ 元数据添加完成")

        # 4. 存储到向量库
        load_to_pgvector(chunks)
        print("   ✅ 向量存储完成")

        return True

    except Exception as e:
        print(f"   ❌ 加载失败: {str(e)}")
        return False


def process_single_job(config: dict, db, args):
    """
    处理单个职位数据

    Args:
        config: 职位配置
        db: 数据库会话
        args: 命令行参数

    Returns:
        dict: 处理结果统计
    """
    result = {
        "position_created": False,
        "position_skipped": False,
        "whitepaper_loaded": False,
        "whitepaper_skipped": False,
        "whitepaper_failed": False,
    }

    print("\n" + "=" * 80)
    print(f"📋 职位: {config['name']}")
    print("=" * 80)
    print(f"   职位标签: {config['position_tag']}")
    print(f"   白皮书文件: {config.get('file', 'N/A')}")

    # 步骤 1: 创建职位
    if not args.whitepapers_only:
        position = create_position(db, config)
        if position:
            result["position_created"] = True
        else:
            result["position_skipped"] = True
            if args.skip_existing:
                return result

    # 步骤 2: 加载白皮书
    if not args.positions_only:
        if config.get("file"):
            success = load_whitepaper(
                config,
                chunk_size=config.get("chunk_size", 1000),
                chunk_overlap=config.get("chunk_overlap", 200),
            )
            if success:
                result["whitepaper_loaded"] = True
            else:
                result["whitepaper_failed"] = True
        else:
            print("   ⚠️  未配置白皮书文件，跳过")
            result["whitepaper_skipped"] = True

    return result


def main():
    """主函数"""
    parser = argparse.ArgumentParser(description="从 job.json 加载职位数据和白皮书")
    parser.add_argument(
        "--json-path",
        type=str,
        default="白皮书/job.json",
        help="JSON 配置文件路径（默认: data/job.json）",
    )
    parser.add_argument(
        "--only",
        type=str,
        help="只处理指定的职位（通过 position_tag 指定）",
    )
    parser.add_argument(
        "--skip-existing",
        action="store_true",
        help="跳过已存在的职位（不加载其白皮书）",
    )
    parser.add_argument(
        "--positions-only",
        action="store_true",
        help="只创建职位，不加载白皮书",
    )
    parser.add_argument(
        "--whitepapers-only",
        action="store_true",
        help="只加载白皮书，不创建职位",
    )

    args = parser.parse_args()

    print("=" * 80)
    print("🚀 职位数据加载工具")
    print("=" * 80)

    try:
        # 1. 加载配置
        configs = load_job_config(args.json_path)

        # 2. 过滤配置
        if args.only:
            configs = [c for c in configs if c.get("position_tag") == args.only]
            if not configs:
                print(f"❌ 未找到标签为 '{args.only}' 的职位配置")
                sys.exit(1)

        # 3. 初始化数据库
        db = SessionLocal()

        # 4. 统计
        total = len(configs)
        stats = {
            "positions_created": 0,
            "positions_skipped": 0,
            "whitepapers_loaded": 0,
            "whitepapers_skipped": 0,
            "whitepapers_failed": 0,
        }

        # 5. 处理每个职位
        for config in configs:
            result = process_single_job(config, db, args)

            if result["position_created"]:
                stats["positions_created"] += 1
            if result["position_skipped"]:
                stats["positions_skipped"] += 1
            if result["whitepaper_loaded"]:
                stats["whitepapers_loaded"] += 1
            if result["whitepaper_skipped"]:
                stats["whitepapers_skipped"] += 1
            if result["whitepaper_failed"]:
                stats["whitepapers_failed"] += 1

        # 6. 总结
        print("\n" + "=" * 80)
        print("📊 处理结果汇总")
        print("=" * 80)
        print(f"   总计: {total} 个职位")

        if not args.whitepapers_only:
            print("\n   职位创建:")
            print(f"   - ✅ 新建: {stats['positions_created']}")
            print(f"   - ⚠️  跳过: {stats['positions_skipped']} (已存在)")

        if not args.positions_only:
            print("\n   白皮书加载:")
            print(f"   - ✅ 成功: {stats['whitepapers_loaded']}")
            print(f"   - ⚠️  跳过: {stats['whitepapers_skipped']} (文件不存在或未配置)")
            print(f"   - ❌ 失败: {stats['whitepapers_failed']}")

        print("=" * 80)

        # 7. 提示下一步
        if stats["positions_created"] > 0 or stats["whitepapers_loaded"] > 0:
            print("\n✅ 数据加载完成！")
            print("\n💡 下一步:")
            print("   1. 运行测试脚本验证检索效果:")
            print("      python scripts/test_whitepaper_retrieval.py")
            print("   2. 启动面试，测试 AI 是否使用了白皮书知识")
            print("   3. 查看数据库中的职位:")
            print("      python scripts/list_positions.py")

        if stats["whitepapers_skipped"] > 0:
            print(
                f"\n⚠️  有 {stats['whitepapers_skipped']} 个白皮书文件不存在，请先创建这些文件"
            )

        if stats["whitepapers_failed"] > 0:
            print(
                f"\n⚠️  有 {stats['whitepapers_failed']} 个白皮书加载失败，请检查错误信息"
            )

        db.close()

    except Exception as e:
        print(f"\n❌ 错误: {str(e)}")
        import traceback

        traceback.print_exc()
        sys.exit(1)


if __name__ == "__main__":
    main()
