from dataclasses import dataclass
from pathlib import Path
import os
import logging
import json
import pandas as pd
import shutil
import time

# 尝试导入 Docling，如果失败则设置标志
try:
    from src.pdf_parsing import PDFParser
    DOCLING_AVAILABLE = True
except ImportError:
    PDFParser = None
    DOCLING_AVAILABLE = False

from src import pdf_mineru
from src.parsed_reports_merging import PageTextPreparation
from src.text_splitter import TextSplitter
from src.ingestion import VectorDBIngestor
from src.ingestion import BM25Ingestor
from src.questions_processing import QuestionsProcessor
from src.tables_serialization import TableSerializer


def here() -> Path:
    """简单的项目根目录查找函数"""
    return Path(__file__).parent.parent


@dataclass
class PipelineConfig:
    def __init__(
        self,
        root_path: Path,
        subset_name: str = "subset.csv",
        questions_file_name: str = "questions.json",
        pdf_reports_dir_name: str = "pdf_reports",
        serialized: bool = False,
        config_suffix: str = "",
    ):
        self.root_path = root_path
        suffix = "_ser_tab" if serialized else ""

        self.subset_path = root_path / subset_name
        self.questions_file_path = root_path / questions_file_name
        self.pdf_reports_dir = root_path / pdf_reports_dir_name

        self.answers_file_path = root_path / f"answers{config_suffix}.json"
        self.debug_data_path = root_path / "debug_data"
        self.databases_path = root_path / f"databases{suffix}"

        self.vector_db_dir = self.databases_path / "vector_dbs"
        self.documents_dir = self.databases_path / "chunked_reports"
        self.bm25_db_path = self.databases_path / "bm25_dbs"

        # 解析相关路径
        self.parsed_reports_dirname = "01_parsed_reports"
        self.parsed_reports_debug_dirname = "01_parsed_reports_debug"
        self.merged_reports_dirname = f"02_merged_reports{suffix}"
        self.reports_markdown_dirname = f"03_reports_markdown{suffix}"

        self.parsed_reports_path = self.debug_data_path / self.parsed_reports_dirname
        self.parsed_reports_debug_path = self.debug_data_path / self.parsed_reports_debug_dirname
        self.merged_reports_path = self.debug_data_path / self.merged_reports_dirname
        self.reports_markdown_path = self.debug_data_path / self.reports_markdown_dirname


@dataclass
class RunConfig:
    """运行配置类 - 支持从配置文件加载大模型参数"""

    # 配置文件相关
    config_profile: str = "default"

    # 运行流程参数配置
    use_serialized_tables: bool = False
    parent_document_retrieval: bool = False
    use_vector_dbs: bool = True
    use_bm25_db: bool = False

    # 检索方式配置
    retrieval_method: str = "vector"  # "vector", "hybrid", "enhanced_hybrid"
    pipeline_details: str = ""
    submission_file: bool = True
    full_context: bool = False
    config_suffix: str = ""

    # 解析器
    pdf_parser: str = "mineru"
    docling_parallel: bool = True
    docling_chunk_size: int = 2
    docling_max_workers: int = 10

    # 从配置文件加载的参数
    api_provider: str = None
    answering_model: str = None
    embedding_provider: str = None
    embedding_model: str = None
    llm_reranking: bool = None
    llm_reranking_sample_size: int = None
    top_n_retrieval: int = None
    parallel_requests: int = None

    def __post_init__(self):
        """初始化后从配置文件加载大模型参数（移除噪声打印）"""
        try:
            from src.config_manager import get_config_manager
            config_manager = get_config_manager()
            profile_config = config_manager.get_profile_config(self.config_profile)

            self.api_provider = profile_config.api_provider
            self.answering_model = profile_config.answering_model
            self.embedding_provider = profile_config.embedding_provider
            self.embedding_model = profile_config.embedding_model
            self.parallel_requests = profile_config.parallel_requests

            self.llm_reranking = profile_config.retrieval.llm_reranking
            self.llm_reranking_sample_size = profile_config.retrieval.llm_reranking_sample_size
            self.top_n_retrieval = profile_config.retrieval.top_n_retrieval
        except Exception:
            self._set_default_values()

    def _set_default_values(self):
        """设置默认值（当配置文件加载失败时使用）"""
        self.api_provider = "dashscope"
        self.answering_model = "qwen3-30b-a3b"
        self.embedding_provider = "dashscope"
        self.embedding_model = "text-embedding-v1"
        self.llm_reranking = False
        self.llm_reranking_sample_size = 30
        self.top_n_retrieval = 10
        self.parallel_requests = 1

    def get_model_config(self):
        """获取当前使用的模型详细配置"""
        try:
            from src.config_manager import get_config_manager
            config_manager = get_config_manager()
            return config_manager.get_model_config(
                self.api_provider,
                self.answering_model.replace("-a3b", "").replace("3-30b", "3-30b"),
            )
        except Exception as e:
            print(f"获取模型配置失败: {e}")
            return None

    def get_embedding_config(self):
        """获取当前使用的嵌入模型详细配置"""
        try:
            from src.config_manager import get_config_manager
            config_manager = get_config_manager()
            return config_manager.get_embedding_config(self.embedding_provider, self.embedding_model)
        except Exception as e:
            print(f"获取嵌入模型配置失败: {e}")
            return None

    @classmethod
    def from_profile(cls, profile_name: str, **kwargs):
        """从指定配置方案创建 RunConfig 实例"""
        return cls(config_profile=profile_name, **kwargs)

    def update_profile(self, profile_name: str):
        """更新配置方案并重新加载参数"""
        self.config_profile = profile_name
        self.__post_init__()


class Pipeline:
    def __init__(
        self,
        root_path: Path,
        subset_name: str = "subset.csv",
        questions_file_name: str = "questions.json",
        pdf_reports_dir_name: str = "pdf_reports",
        run_config: RunConfig = RunConfig(),
    ):
        self.run_config = run_config
        self.paths = self._initialize_paths(root_path, subset_name, questions_file_name, pdf_reports_dir_name)
        self._convert_json_to_csv_if_needed()
        self._print_config_info()

    def _print_config_info(self):
        """打印当前配置信息（仅保留环境变量配置展示）"""
        print(f"\n🚀 Pipeline 初始化完成")
        print(f"📁 数据路径: {self.paths.root_path}")
        print(f"⚙️ 配置方案: {self.run_config.config_profile}")
        print(f"🤖 API 提供商: {self.run_config.api_provider}")
        print(f"🧠 回答模型: {self.run_config.answering_model}")
        print(f"📄 PDF 解析器: {self.run_config.pdf_parser}")
        print(f"🔍 检索方法: {self.run_config.retrieval_method}")
        print(f"📊 检索数量: {self.run_config.top_n_retrieval}")
        print(f"🔄 并行请求: {self.run_config.parallel_requests}")
        if self.run_config.llm_reranking:
            print(f"🎯 LLM 重排序: 启用 (样本数: {self.run_config.llm_reranking_sample_size})")
        else:
            print(f"🎯 LLM 重排序: 禁用")

        # 基于环境变量的“当前生效配置”直观展示
        llm_provider = os.getenv("LLM_PROVIDER", "ollama")
        ollama_model = os.getenv("OLLAMA_MODEL", "qwen2.5:7b")
        embedding_provider = os.getenv("EMBEDDING_PROVIDER", self.run_config.embedding_provider or "dashscope")
        ollama_emb_model = os.getenv("OLLAMA_EMBEDDING_MODEL", "bge-m3")
        rerank_flag = os.getenv("RERANKING", "auto")
        print("🔧 当前环境变量配置：")
        print(f"   - LLM_PROVIDER={llm_provider}")
        if llm_provider == "ollama":
            print(f"   - OLLAMA_MODEL={ollama_model}")
            print(f"   - OLLAMA_EMBEDDING_MODEL={ollama_emb_model}")
        print(f"   - EMBEDDING_PROVIDER={embedding_provider}")
        print(f"   - RERANKING={rerank_flag} (false/0/no/off 将强制关闭重排)")
        print("-" * 50)

    @property
    def config_profile(self) -> str:
        return self.run_config.config_profile

    @property
    def model_info(self) -> dict:
        return {
            "api_provider": self.run_config.api_provider,
            "answering_model": self.run_config.answering_model,
            "embedding_provider": self.run_config.embedding_provider,
            "embedding_model": self.run_config.embedding_model,
        }

    def switch_config_profile(self, profile_name: str):
        print(f"🔄 切换配置方案: {self.run_config.config_profile} -> {profile_name}")
        self.run_config.update_profile(profile_name)
        self._print_config_info()

    def get_available_profiles(self) -> list:
        try:
            from src.config_manager import get_available_profiles
            return get_available_profiles()
        except Exception:
            return ["default", "fast", "high_performance", "openai_gpt4", "economy"]

    @classmethod
    def from_config_profile(cls, root_path: Path, profile_name: str, **kwargs):
        run_config = RunConfig.from_profile(profile_name, **kwargs)
        return cls(root_path, run_config=run_config)

    def _initialize_paths(self, root_path: Path, subset_name: str, questions_file_name: str, pdf_reports_dir_name: str) -> PipelineConfig:
        return PipelineConfig(
            root_path=root_path,
            subset_name=subset_name,
            questions_file_name=questions_file_name,
            pdf_reports_dir_name=pdf_reports_dir_name,
            serialized=self.run_config.use_serialized_tables,
            config_suffix=self.run_config.config_suffix,
        )

    def _convert_json_to_csv_if_needed(self):
        json_path = self.paths.root_path / "subset.json"
        csv_path = self.paths.root_path / "subset.csv"
        if json_path.exists() and not csv_path.exists():
            try:
                with open(json_path, "r", encoding="utf-8") as f:
                    data = json.load(f)
                df = pd.DataFrame(data)
                df.to_csv(csv_path, index=False)
            except Exception as e:
                print(f"Error converting JSON to CSV: {str(e)}")

    @staticmethod
    def download_docling_models():
        if not DOCLING_AVAILABLE:
            print("❌ Docling 不可用，无法下载模型")
            return
        logging.basicConfig(level=logging.DEBUG)
        parser = PDFParser(output_dir=here())
        parser.parse_and_export(input_doc_paths=[here() / "src/dummy_report.pdf"])

    def parse_pdf_reports_mineru(self, pdf_files: list = None):
        if pdf_files is None:
            pdf_files = [f.name for f in self.paths.pdf_reports_dir.glob("*.pdf")]
        print(f"开始使用 MinerU 解析 {len(pdf_files)} 个 PDF 文件...")
        success_count = 0
        for pdf_file in pdf_files:
            if self.export_reports_to_markdown_mineru(pdf_file):
                success_count += 1
        print(f"MinerU 解析完成！成功: {success_count}/{len(pdf_files)}")
        return success_count

    def export_reports_to_markdown_mineru(self, file_name):
        print(f"使用 MinerU 开始处理: {file_name}")
        pdf_path = self.paths.pdf_reports_dir / file_name
        if not pdf_path.exists():
            print(f"PDF 文件不存在: {pdf_path}")
            return False
        try:
            if pdf_mineru.config.mode == pdf_mineru.MinerUMode.LOCAL:
                print(f"使用本地 MinerU 处理: {file_name}")
                output_dir = pdf_mineru.process_pdf_local(str(pdf_path))
                output_path = Path(output_dir)
                md_files = list(output_path.glob("**/*.md"))
                if not md_files:
                    print(f"未找到生成的 markdown 文件在: {output_dir}")
                    return False
                md_path = md_files[0]
            else:
                print(f"使用云端 MinerU 处理: {file_name}")
                task_id = pdf_mineru.get_task_id(file_name)
                print(f"task_id: {task_id}")
                pdf_mineru.get_result(task_id)
                extract_dir = f"{task_id}"
                md_path = Path(extract_dir) / "full.md"
                if not md_path.exists():
                    print(f"未找到 markdown 文件: {md_path}")
                    return False

            os.makedirs(self.paths.reports_markdown_path, exist_ok=True)
            base_name = os.path.splitext(file_name)[0]
            target_path = Path(self.paths.reports_markdown_path) / f"{base_name}.md"
            shutil.copy2(str(md_path), str(target_path))
            print(f"已将 {md_path} 复制到 {target_path}")

            if pdf_mineru.config.mode == pdf_mineru.MinerUMode.CLOUD:
                extract_dir = md_path.parent
                if extract_dir.exists():
                    shutil.rmtree(str(extract_dir))
                    print(f"已清理临时目录: {extract_dir}")
            return True
        except Exception as e:
            print(f"MinerU 处理失败 {file_name}: {str(e)}")
            import traceback
            traceback.print_exc()
            return False

    def export_reports_to_markdown(self, file_name):
        return self.export_reports_to_markdown_mineru(file_name)

    def chunk_reports(self, include_serialized_tables: bool = False):
        text_splitter = TextSplitter()
        if self.run_config.pdf_parser.lower() == "mineru":
            print(f"开始分割 {self.paths.reports_markdown_path} 目录下的 markdown 文件...")
            text_splitter.split_markdown_reports(
                all_md_dir=self.paths.reports_markdown_path,
                output_dir=self.paths.documents_dir,
                subset_csv=self.paths.subset_path,
            )
        elif self.run_config.pdf_parser.lower() == "docling":
            if not DOCLING_AVAILABLE:
                print("❌ Docling 不可用，请安装 Docling 或使用 MinerU 解析器")
                return
            print(f"开始分割 {self.paths.parsed_reports_path} 目录下的解析报告...")
            text_splitter.split_parsed_reports(
                parsed_reports_dir=self.paths.parsed_reports_path,
                output_dir=self.paths.documents_dir,
                subset_csv=self.paths.subset_path,
                include_serialized_tables=include_serialized_tables,
            )
        else:
            raise ValueError(f"不支持的解析器类型: {self.run_config.pdf_parser}")
        print(f"分割完成，结果已保存到 {self.paths.documents_dir}")

    def create_vector_dbs(self):
        input_dir = self.paths.documents_dir
        output_dir = self.paths.vector_db_dir
        vdb_ingestor = VectorDBIngestor()
        # 调试：确认向量库构建阶段的嵌入提供方与模型
        try:
            provider = getattr(vdb_ingestor, "embedding_provider", None)
            model = getattr(vdb_ingestor, "ollama_emb_model", None)
            print(f"[调试] VectorDBIngestor.embedding_provider = {provider}, model = {model}")
        except Exception as _e:
            print(f"[调试] 无法读取 VectorDBIngestor 的嵌入配置: {_e}")
        vdb_ingestor.process_reports(input_dir, output_dir)
        print(f"Vector databases created in {output_dir}")

    def create_bm25_db(self):
        input_dir = self.paths.documents_dir
        output_file = self.paths.bm25_db_path
        bm25_ingestor = BM25Ingestor()
        bm25_ingestor.process_reports(input_dir, output_file)
        print(f"BM25 database created at {output_file}")

    def process_parsed_reports(self):
        print(f"开始处理报告流程（使用 {self.run_config.pdf_parser.upper()} 解析器）...")
        print("步骤1：报告分块...")
        self.chunk_reports()
        print("步骤2：创建向量数据库...")
        self.create_vector_dbs()
        if self.run_config.use_bm25_db or self.run_config.retrieval_method == "enhanced_hybrid":
            print("步骤3：创建BM25数据库...")
            self.create_bm25_db()
        print("报告处理流程已成功完成！")

    def process_all_pdfs(self, parser_type: str = None):
        if parser_type:
            original_parser = self.run_config.pdf_parser
            self.run_config.pdf_parser = parser_type
        try:
            print(f"开始完整的 PDF 处理流程（使用 {self.run_config.pdf_parser.upper()} 解析器）...")
            print("步骤1：PDF 解析...")
            if self.run_config.pdf_parser.lower() == "docling":
                if not DOCLING_AVAILABLE:
                    print("❌ Docling 不可用，请安装 Docling 或使用 MinerU 解析器")
                    return
                self.parse_pdf_reports_parallel(
                    chunk_size=self.run_config.docling_chunk_size,
                    max_workers=self.run_config.docling_max_workers,
                )
            else:
                self.parse_pdf_reports_mineru()
            print("步骤2：报告分块...")
            self.chunk_reports()
            print("步骤3：创建向量数据库...")
            self.create_vector_dbs()
            if self.run_config.use_bm25_db or self.run_config.retrieval_method == "enhanced_hybrid":
                print("步骤4：创建BM25数据库...")
                self.create_bm25_db()
            print("PDF 处理流程完成！")
        finally:
            if parser_type:
                self.run_config.pdf_parser = original_parser

    def parse_pdf_reports_parallel(self, chunk_size: int = 2, max_workers: int = 10):
        if not DOCLING_AVAILABLE:
            print("❌ Docling 不可用，无法使用并行解析功能")
            return
        logging.basicConfig(level=logging.DEBUG)
        pdf_parser = PDFParser(
            output_dir=self.paths.parsed_reports_path,
            csv_metadata_path=self.paths.subset_path,
        )
        pdf_parser.debug_data_path = self.paths.parsed_reports_debug_path
        input_doc_paths = []
        main_pdfs = list(self.paths.pdf_reports_dir.glob("*.pdf")) + list(self.paths.pdf_reports_dir.glob("*.PDF"))
        input_doc_paths.extend(main_pdfs)
        backup_dir = self.paths.pdf_reports_dir.parent / "pdf_reports - 副本"
        if backup_dir.exists():
            backup_pdfs = list(backup_dir.glob("*.pdf")) + list(backup_dir.glob("*.PDF"))
            input_doc_paths.extend(backup_pdfs)
            print(f"找到备份目录，添加 {len(backup_pdfs)} 个 PDF 文件")
        print(f"总共找到 {len(input_doc_paths)} 个 PDF 文件待处理")
        if not input_doc_paths:
            print("❌ 没有找到 PDF 文件")
            return
        pdf_parser.parse_and_export_parallel(
            input_doc_paths=input_doc_paths,
            optimal_workers=max_workers,
            chunk_size=chunk_size,
        )
        print(f"PDF reports parsed and saved to {self.paths.parsed_reports_path}")

    def process_questions(self):
        processor = QuestionsProcessor(
            vector_db_dir=self.paths.vector_db_dir,
            documents_dir=self.paths.documents_dir,
            questions_file_path=self.paths.questions_file_path,
            new_challenge_pipeline=True,
            subset_path=self.paths.subset_path,
            parent_document_retrieval=self.run_config.parent_document_retrieval,
            retrieval_method=self.run_config.retrieval_method,
            llm_reranking=self.run_config.llm_reranking,
            llm_reranking_sample_size=self.run_config.llm_reranking_sample_size,
            top_n_retrieval=self.run_config.top_n_retrieval,
            parallel_requests=self.run_config.parallel_requests,
            api_provider=self.run_config.api_provider,
            answering_model=self.run_config.answering_model,
            full_context=self.run_config.full_context,
        )
        output_path = self._get_next_available_filename(self.paths.answers_file_path)
        _ = processor.process_all_questions(
            output_path=output_path,
            submission_file=self.run_config.submission_file,
            pipeline_details=self.run_config.pipeline_details,
        )
        print(f"Answers saved to {output_path}")

    def answer_single_question(self, question: str, kind: str = "string"):
        t0 = time.time()
        print("[计时] 开始初始化 QuestionsProcessor ...")
        processor = QuestionsProcessor(
            vector_db_dir=self.paths.vector_db_dir,
            documents_dir=self.paths.documents_dir,
            questions_file_path=None,
            new_challenge_pipeline=True,
            subset_path=self.paths.subset_path,
            parent_document_retrieval=self.run_config.parent_document_retrieval,
            llm_reranking=self.run_config.llm_reranking,
            llm_reranking_sample_size=self.run_config.llm_reranking_sample_size,
            top_n_retrieval=self.run_config.top_n_retrieval,
            parallel_requests=1,
            api_provider=self.run_config.api_provider,
            answering_model=self.run_config.answering_model,
            full_context=self.run_config.full_context,
            retrieval_method=self.run_config.retrieval_method,
        )
        t1 = time.time()
        print(f"[计时] QuestionsProcessor 初始化耗时: {t1-t0:.2f} 秒")
        print("[计时] 开始调用 process_single_question ...")
        answer = processor.process_single_question(question, kind=kind)
        t2 = time.time()
        print(f"[计时] process_single_question 推理耗时: {t2-t1:.2f} 秒")
        print(f"[计时] answer_single_question 总耗时: {t2-t0:.2f} 秒")
        return answer

    def _get_next_available_filename(self, base_path: Path) -> Path:
        if not base_path.exists():
            return base_path
        stem = base_path.stem
        suffix = base_path.suffix
        parent = base_path.parent
        counter = 1
        while True:
            new_filename = f"{stem}_{counter:02d}{suffix}"
            new_path = parent / new_filename
            if not new_path.exists():
                return new_path
            counter += 1


# 预处理配置（向后兼容）
preprocess_configs = {
    "ser_tab": RunConfig(use_serialized_tables=True, config_profile="default"),
    "no_ser_tab": RunConfig(use_serialized_tables=False, config_profile="default"),
}

# 基础配置 - 使用经济型配置方案
base_config = RunConfig(
    config_profile="economy",
    submission_file=True,
    pipeline_details="Custom pdf parsing + vDB + Router + SO CoT; llm = GPT-4o-mini",
    config_suffix="_base",
)

# 父文档检索配置 - 使用 OpenAI GPT-4o
parent_document_retrieval_config = RunConfig(
    config_profile="openai_gpt4",
    parent_document_retrieval=True,
    submission_file=True,
    pipeline_details="Custom pdf parsing + vDB + Router + Parent Document Retrieval + SO CoT; llm = GPT-4o",
    config_suffix="_pdr",
)

# MinerU 默认配置
max_config_mineru = RunConfig(
    config_profile="default",
    use_serialized_tables=False,
    parent_document_retrieval=True,
    retrieval_method="hybrid",
    submission_file=True,
    pipeline_details="MinerU pdf parsing + vDB + Router + Parent Document Retrieval + reranking + SO CoT; llm = Qwen3-30B",
    config_suffix="_mineru_default",
    pdf_parser="mineru",
)

# MinerU 高性能配置
max_config_mineru_enhanced = RunConfig(
    config_profile="high_performance",
    use_serialized_tables=False,
    parent_document_retrieval=True,
    retrieval_method="enhanced_hybrid",
    use_bm25_db=True,
    submission_file=True,
    pipeline_details="MinerU pdf parsing + vDB + BM25 + Router + Parent Document Retrieval + triple reranking + SO CoT; llm = Qwen-Max",
    config_suffix="_mineru_enhanced",
    pdf_parser="mineru",
)

# MinerU 快速配置
max_config_mineru_fast = RunConfig(
    config_profile="fast",
    use_serialized_tables=False,
    parent_document_retrieval=True,
    retrieval_method="hybrid",
    submission_file=True,
    pipeline_details="MinerU pdf parsing + vDB + Router + Parent Document Retrieval + fast processing; llm = Qwen-Turbo",
    config_suffix="_mineru_fast",
    pdf_parser="mineru",
)

# Docling 默认配置
max_config_docling = RunConfig(
    config_profile="default",
    use_serialized_tables=False,
    parent_document_retrieval=True,
    retrieval_method="hybrid",
    submission_file=True,
    pipeline_details="Docling pdf parsing + vDB + Router + Parent Document Retrieval + reranking + SO CoT; llm = Qwen3-30B",
    config_suffix="_docling_default",
    pdf_parser="docling",
    docling_parallel=True,
    docling_chunk_size=2,
    docling_max_workers=10,
)

# Docling 高性能配置
max_config_docling_enhanced = RunConfig(
    config_profile="high_performance",
    use_serialized_tables=False,
    parent_document_retrieval=True,
    retrieval_method="enhanced_hybrid",
    use_bm25_db=True,
    submission_file=True,
    pipeline_details="Docling pdf parsing + vDB + BM25 + Router + Parent Document Retrieval + triple reranking + SO CoT; llm = Qwen-Max",
    config_suffix="_docling_enhanced",
    pdf_parser="docling",
    docling_parallel=True,
    docling_chunk_size=2,
    docling_max_workers=10,
)

# OpenAI 配置
openai_config = RunConfig(
    config_profile="openai_gpt4",
    use_serialized_tables=False,
    parent_document_retrieval=True,
    retrieval_method="hybrid",
    submission_file=True,
    pipeline_details="PDF parsing + vDB + Router + Parent Document Retrieval + reranking + SO CoT; llm = GPT-4o",
    config_suffix="_openai_gpt4",
    pdf_parser="mineru",
)

# 默认配置（向后兼容）
max_config = max_config_mineru

# 配置字典
configs = {
    "base": base_config,
    "pdr": parent_document_retrieval_config,
    "max": max_config,
    "max_mineru": max_config_mineru,
    "max_mineru_enhanced": max_config_mineru_enhanced,
    "max_mineru_fast": max_config_mineru_fast,
    "max_docling": max_config_docling,
    "max_docling_enhanced": max_config_docling_enhanced,
    "openai": openai_config,
    "fast": max_config_mineru_fast,
    "default": max_config_mineru,
    "high_performance": max_config_mineru_enhanced,
    "openai_gpt4": openai_config,
    "economy": base_config,
}


if __name__ == "__main__":
    root_path = here() / "data" / "stock_data"
    print("root_path:", root_path)

    pipeline = Pipeline(root_path, run_config=max_config)

    print("4. 将pdf转化为纯markdown文本")
    pipeline.export_reports_to_markdown("【财报】中芯国际：中芯国际2024年年度报告.pdf")

    print("5. 将规整后报告分块，便于后续向量化，输出到 databases/chunked_reports")
    pipeline.chunk_reports()

    print("6. 从分块报告创建向量数据库，输出到 databases/vector_dbs")
    pipeline.create_vector_dbs()

    print("7. 处理问题并生成答案，具体逻辑取决于 run_config")
    pipeline.process_questions()

    print("完成")