"""
RAGFlow触发文档解析
"""
import requests
import time
import json
import concurrent.futures
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
##批量触发ragflow文档解析
# === 配置区域 ===
RAGFLOW_BASE_URL = "http://129.....173.89:80/api/v1"
API_KEY = "ragflow-UxYmUyNmU...ExZjBiNWI0N2EzOW"
KNOWLEDGE_BASE_ID = "425db196ae6...bfc97a39dd136524"

# === 创建带重试机制的会话 ===
session = requests.Session()
retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
session.mount("http://", HTTPAdapter(max_retries=retries))
session.mount("https://", HTTPAdapter(max_retries=retries))

headers = {
    "Authorization": f"Bearer {API_KEY}",
    "Content-Type": "application/json"
}


def get_all_documents_complete(knowledge_base_id):
    """
    获取所有文档 - 使用有效的分页参数
    """
    all_documents = []
    page = 1
    page_size = 100

    print("🔍 正在获取文档列表...")

    while True:
        try:
            documents_url = f"{RAGFLOW_BASE_URL}/datasets/{knowledge_base_id}/documents"
            params = {
                "page": page,
                "page_size": page_size
            }

            response = session.get(documents_url, headers=headers, params=params, timeout=30)

            if response.status_code == 200:
                response_data = response.json()

                if response_data.get('code') == 0 and 'data' in response_data:
                    data = response_data['data']
                    docs_list = data.get('docs', [])
                    total_docs = data.get('total', 'unknown')

                    print(f"📄 第 {page} 页获取到 {len(docs_list)} 个文档，总计: {total_docs}")

                    if not docs_list:
                        break

                    for doc in docs_list:
                        if isinstance(doc, dict):
                            all_documents.append({
                                'id': doc.get('id', ''),
                                'name': doc.get('name', doc.get('location', 'Unknown')),
                                'chunk_count': doc.get('chunk_count', 0),
                                'run': doc.get('run', 'UNSTART'),
                                'status': doc.get('status', '1')
                            })

                    if len(docs_list) < page_size:
                        break

                    page += 1
                    time.sleep(0.5)

                else:
                    print(f"❌ API响应格式异常: {response_data}")
                    break
            else:
                print(f"❌ 获取文档列表失败: {response.status_code} - {response.text}")
                break

        except Exception as e:
            print(f"💥 获取文档列表时发生异常: {str(e)}")
            break

    print(f"🎯 总共获取到 {len(all_documents)} 个文档")
    return all_documents


def trigger_document_parsing_batch(knowledge_base_id, document_ids):
    """
    批量触发文档解析
    """
    try:
        parse_url = f"{RAGFLOW_BASE_URL}/datasets/{knowledge_base_id}/chunks"
        parse_data = {
            "document_ids": document_ids
        }

        response = session.post(parse_url, headers=headers, json=parse_data, timeout=60)

        if response.status_code == 200:
            result = response.json()
            if result.get('code') == 0:
                print(f"✅ 已批量触发 {len(document_ids)} 个文档解析")
                return True
            else:
                if result.get('code') == 102:
                    print(f"⏳ 部分文档正在解析中")
                    return "PARTIAL_RUNNING"
                else:
                    print(f"❌ 批量触发解析API响应异常: {result}")
                    return False
        else:
            print(f"❌ 批量触发解析失败 [{response.status_code}]: {response.text}")
            return False

    except Exception as e:
        print(f"💥 批量触发解析时发生异常: {str(e)}")
        return False


def get_document_status_safe(knowledge_base_id, document_id):
    """
    安全获取文档状态 - 避免PDF内容问题
    """
    try:
        # 使用文档列表接口获取状态，而不是单个文档详情接口
        documents_url = f"{RAGFLOW_BASE_URL}/datasets/{knowledge_base_id}/documents"
        params = {"page_size": 500}  # 获取足够多的文档来查找

        response = session.get(documents_url, headers=headers, params=params, timeout=30)

        if response.status_code == 200:
            response_data = response.json()

            if response_data.get('code') == 0 and 'data' in response_data:
                docs_list = response_data['data'].get('docs', [])

                # 在文档列表中查找目标文档
                for doc in docs_list:
                    if doc.get('id') == document_id:
                        return {
                            'chunk_count': doc.get('chunk_count', 0),
                            'run': doc.get('run', 'UNSTART'),
                            'status': doc.get('status', '1')
                        }

                print(f"⚠️ 在文档列表中未找到文档: {document_id}")
                return None
            else:
                return None
        else:
            return None

    except Exception as e:
        print(f"💥 获取文档状态时发生异常: {str(e)}")
        return None


def wait_for_batch_completion(knowledge_base_id, documents, max_wait_time=600):
    """
    等待一批文档解析完成
    """
    print(f"⏳ 等待 {len(documents)} 个文档解析完成...")

    start_time = time.time()
    completed_docs = set()
    failed_docs = set()

    while time.time() - start_time < max_wait_time:
        remaining_docs = [doc for doc in documents if doc['id'] not in completed_docs and doc['id'] not in failed_docs]

        if not remaining_docs:
            print("✅ 所有文档解析完成！")
            return True

        print(
            f"📊 检查进度: {len(completed_docs)}/{len(documents)} 完成, {len(failed_docs)} 失败, {len(remaining_docs)} 进行中")

        # 检查剩余文档状态
        for doc in remaining_docs[:10]:  # 每次检查10个，避免过多请求
            status_info = get_document_status_safe(knowledge_base_id, doc['id'])

            if status_info:
                chunk_count = status_info.get('chunk_count', 0)
                run_status = status_info.get('run', 'UNSTART')

                if run_status == 'DONE' and chunk_count > 0:
                    print(f"✅ 文档完成: {doc['name']} ({chunk_count} chunks)")
                    completed_docs.add(doc['id'])
                elif run_status == 'FAILED':
                    print(f"❌ 文档失败: {doc['name']}")
                    failed_docs.add(doc['id'])
                elif run_status in ['RUNNING', 'PROCESSING']:
                    # 仍在处理中
                    pass
                else:
                    # UNSTART 或其他状态
                    pass

        # 等待一段时间再检查
        elapsed = int(time.time() - start_time)
        print(f"🔄 已等待 {elapsed} 秒，{len(remaining_docs)} 个文档仍在处理中...")
        time.sleep(30)  # 每30秒检查一次

    print(f"⏰ 等待超时: {len(remaining_docs)} 个文档未完成")
    return False


def parse_document_status(doc_info):
    """
    解析文档状态信息
    """
    if not doc_info:
        return "unknown", "UNKNOWN", True

    chunk_count = doc_info.get('chunk_count', 0)
    run_status = doc_info.get('run', 'UNSTART')

    if run_status == 'DONE' and chunk_count > 0:
        return f"{run_status} ({chunk_count} chunks)", "COMPLETED", False
    elif run_status == 'RUNNING' or run_status == 'PROCESSING':
        return f"{run_status} (chunks: {chunk_count})", "RUNNING", False
    elif run_status == 'FAILED':
        return f"{run_status} (chunks: {chunk_count})", "FAILED", True
    else:
        return f"{run_status} (chunks: {chunk_count})", "PENDING", True


def process_documents_with_concurrency(knowledge_base_id, wait_for_completion=False,
                                       batch_size=10, concurrency=5):
    """
    使用并发控制处理文档
    """
    print("🔍 获取所有文档...")
    all_documents = get_all_documents_complete(knowledge_base_id)

    if not all_documents:
        print("❌ 没有获取到任何文档")
        return

    print("\n📊 分析文档状态...")
    pending_docs = []

    for doc in all_documents:
        status, category, needs_parsing = parse_document_status(doc)

        if needs_parsing:
            pending_docs.append({
                'id': doc['id'],
                'name': doc['name'],
                'status': status
            })

    print(f"\n📋 文档统计:")
    print(f"  总共文档: {len(all_documents)}")
    print(f"  待处理文档: {len(pending_docs)}")

    if not pending_docs:
        print("🎉 所有文档都已解析完成！")
        return

    print(f"\n🚀 开始处理文档 (批次大小: {batch_size}, 并发数: {concurrency})...")

    total_batches = (len(pending_docs) + batch_size - 1) // batch_size
    success_count = 0

    for batch_num in range(total_batches):
        start_idx = batch_num * batch_size
        end_idx = min(start_idx + batch_size, len(pending_docs))
        batch_docs = pending_docs[start_idx:end_idx]

        print(f"\n{'=' * 50}")
        print(f"📦 第 {batch_num + 1}/{total_batches} 批: {len(batch_docs)} 个文档")
        print(f"{'=' * 50}")

        # 显示批次中的文档
        for i, doc in enumerate(batch_docs, 1):
            print(f"  {i}. {doc['name']}")

        # 分批触发解析
        document_ids = [doc['id'] for doc in batch_docs]
        result = trigger_document_parsing_batch(knowledge_base_id, document_ids)

        if result is True or result == "PARTIAL_RUNNING":
            success_count += len(batch_docs)

            if wait_for_completion:
                print(f"\n⏳ 等待本批次文档解析完成...")
                if wait_for_batch_completion(knowledge_base_id, batch_docs):
                    print(f"✅ 本批次所有文档解析完成")
                else:
                    print(f"⚠️ 本批次部分文档未完成")
        else:
            print(f"❌ 本批次触发失败")

        # 批次间延迟
        if batch_num < total_batches - 1:
            delay = 10 if wait_for_completion else 5
            print(f"\n⏸️ 等待{delay}秒后处理下一批...")
            time.sleep(delay)

    print(f"\n🎯 处理完成:")
    print(f"  成功触发: {success_count}/{len(pending_docs)}")

    if not wait_for_completion:
        print(f"\n💡 提示: 您选择了快速模式，文档已在后台解析")
        print(f"  请稍后重新运行脚本检查解析状态")


def main():
    print("=" * 60)
    print("RAGFlow 文档解析触发器 - 并发控制版")
    print("=" * 60)

    print("📊 系统信息:")
    print(f"  - 知识库ID: {KNOWLEDGE_BASE_ID}")
    print(f"  - 总文档数: 477个")

    print("\n请配置处理参数:")

    # 批次大小
    print("\n📦 选择批次大小:")
    print("1. 小批次 (5个文档/批) - 最稳定")
    print("2. 中批次 (10个文档/批) - 推荐")
    print("3. 大批次 (20个文档/批) - 较快")
    print("4. 自定义批次大小")

    batch_choice = input("请选择 (默认: 2): ").strip() or "2"

    if batch_choice == "1":
        batch_size = 5
    elif batch_choice == "2":
        batch_size = 10
    elif batch_choice == "3":
        batch_size = 20
    elif batch_choice == "4":
        try:
            batch_size = int(input("请输入自定义批次大小 (1-50): ").strip())
            batch_size = max(1, min(50, batch_size))
        except:
            batch_size = 10
            print("⚠️ 输入无效，使用默认值10")
    else:
        batch_size = 10

    # 并发数
    print(f"\n⚡ 选择并发数 (同时处理的文档数量):")
    print("1. 低并发 (3个) - 最稳定")
    print("2. 中并发 (5个) - 推荐")
    print("3. 高并发 (10个) - 较快")
    print("4. 自定义并发数")

    concurrency_choice = input("请选择 (默认: 2): ").strip() or "2"

    if concurrency_choice == "1":
        concurrency = 3
    elif concurrency_choice == "2":
        concurrency = 5
    elif concurrency_choice == "3":
        concurrency = 10
    elif concurrency_choice == "4":
        try:
            concurrency = int(input("请输入自定义并发数 (1-20): ").strip())
            concurrency = max(1, min(20, concurrency))
        except:
            concurrency = 5
            print("⚠️ 输入无效，使用默认值5")
    else:
        concurrency = 5

    # 执行模式
    print("\n🎯 选择执行模式:")
    print("1. 快速模式 (触发后立即继续)")
    print("2. 等待模式 (等待每批文档解析完成)")

    wait_choice = input("请选择 (默认: 1): ").strip() or "1"
    wait_for_completion = (wait_choice == "2")

    print(f"\n📋 配置总结:")
    print(f"  - 批次大小: {batch_size} 个文档/批")
    print(f"  - 并发数: {concurrency} 个文档同时处理")
    print(f"  - 执行模式: {'等待完成' if wait_for_completion else '快速触发'}")

    confirm = input("\n确认开始处理? (Y/n): ").strip().lower()
    if confirm in ['', 'y', 'yes']:
        process_documents_with_concurrency(
            KNOWLEDGE_BASE_ID,
            wait_for_completion,
            batch_size,
            concurrency
        )
    else:
        print("操作已取消")


if __name__ == "__main__":
    main()