

# filter_documents_by_state: 根据文档的状态（progress 的值）筛选出符合条件的文档。
# 新增命令：
# failed: 只处理失败的文档（progress=-1）。
# success: 只处理成功的文档（progress=1）。
# ready: 只处理就绪的文档（progress=0）。

# 使用示例
# 处理失败的文档
# bash
# 深色版本
# python3 ragflow_parser.py failed
# 处理成功的文档
# bash
# 深色版本
# python3 ragflow_parser.py success
# 处理就绪的文档
# bash
# 深色版本
# python3 ragflow_parser.py ready
# 解析所有就绪和已完成的文档（如果使用 --reparse 参数）
# bash
# 深色版本
# python3 ragflow_parser.py parse --reparse


import requests
import sys
import time

# ========== 配置参数 ==========
ragflow_url = "http://18.0.44.51:8900/"  # 你的 Ragflow 地址
dataset_id = "e8c13a4e2a5b11f086640242ac1f0006"  # 你的知识库 ID
auth_token = "Bearer ragflow-dmMjFiMjQwMmIxMzExZjBhNmNiMDI0Mm"  # 你的授权 Token

headers = {
    "Authorization": auth_token,
    "Content-Type": "application/json"
}

batch_size_map = {
    'fast': 20,  # 快速文档批次大小
    'slow': 5    # 慢速文档批次大小
}
delay_time_map = {
    'fast': 60,  # 快速文档间隔时间（秒）
    'slow': 300  # 慢速文档间隔时间（秒）
}

max_retries = 3      # 请求最大重试次数
retry_delay = 5      # 请求失败后的等待时间（秒）

def get_all_documents():
    """获取知识库中所有文档及其完整信息"""
    page = 1
    page_size = 100
    all_docs = []

    while True:
        url = f"{ragflow_url}/api/v1/datasets/{dataset_id}/documents?page={page}&page_size={page_size}"
        try:
            response = requests.get(url, headers=headers)
            response.raise_for_status()
        except requests.exceptions.RequestException as e:
            print(f"[错误] 获取文档列表时出错: {e}")
            break

        data = response.json()
        docs = data.get("data", {}).get("docs", [])

        if not docs:
            break

        all_docs.extend(docs)
        page += 1

    return all_docs

def stop_parsing(document_list):
    """停止所有正在进行解析的文档"""
    in_progress_docs = [doc for doc in document_list if 0 < doc.get("progress", 0) < 1]

    if not in_progress_docs:
        print("[提示] 没有正在解析中的文档")
        return

    doc_ids = [doc["id"] for doc in in_progress_docs]
    print(f"[停止解析] 将停止 {len(doc_ids)} 个正在解析的文档")

    for i in range(0, len(doc_ids), batch_size_map['slow']):
        batch = doc_ids[i:i+batch_size_map['slow']]
        payload = {"document_ids": batch}
        retries = 0
        while retries < max_retries:
            try:
                url = f"{ragflow_url}/api/v1/datasets/{dataset_id}/chunks"
                response = requests.delete(url, json=payload, headers=headers)
                if response.status_code == 200 and response.json().get("code") == 0:
                    print(f"[停止解析] 已提交 {len(batch)} 个文档停止解析")
                    break
                else:
                    raise Exception(f"状态码: {response.status_code}, 响应: {response.text}")
            except Exception as e:
                retries += 1
                print(f"[停止解析] 请求失败({retries}/{max_retries}): {e}")
                if retries < max_retries:
                    time.sleep(retry_delay)
                else:
                    print(f"[停止解析] 达到最大重试次数，跳过该批文档")

def parse_documents(document_list):
    """
    根据文档类型使用不同的批次大小和等待时间进行解析
    """
    # 按文档类型分类
    fast_docs = []
    slow_docs = []

    for doc in document_list:
        name = doc.get("name", "")
        if name.lower().endswith(('.docx', '.txt', '.md', '.csv', '.xlsx')):
            fast_docs.append(doc["id"])
        elif name.lower().endswith(('.pdf', '.pptx', '.xls')):
            slow_docs.append(doc["id"])
        else:
            print(f"[未知类型] {name}，默认加入快速队列")
            fast_docs.append(doc["id"])

    print(f"[分类] 快速文档: {len(fast_docs)} 个 (docx/txt/md/csv/xlsx)")
    print(f"[分类] 慢速文档: {len(slow_docs)} 个 (pdf/pptx/xls)")

    def process_batch(doc_ids, batch_size, delay_seconds, label=""):
        total = len(doc_ids)
        processed = 0
        for i in range(0, len(doc_ids), batch_size):
            batch = doc_ids[i:i+batch_size]
            payload = {"document_ids": batch}
            retries = 0
            while retries < max_retries:
                try:
                    url = f"{ragflow_url}/api/v1/datasets/{dataset_id}/chunks"
                    response = requests.post(url, json=payload, headers=headers)
                    if response.status_code == 200 and response.json().get("code") == 0:
                        print(f"[{label}] 已提交 {len(batch)} 个文档进行解析")
                        processed += len(batch)
                        print(f"[进度] 已处理 {processed}/{total} 文档")
                        time.sleep(delay_seconds)  # 等待指定时间
                        break
                    else:
                        raise Exception(f"状态码: {response.status_code}, 响应: {response.text}")
                except Exception as e:
                    retries += 1
                    print(f"[{label}] 请求失败({retries}/{max_retries}): {e}")
                    if retries < max_retries:
                        time.sleep(retry_delay)
                    else:
                        print(f"[{label}] 达到最大重试次数，跳过该批文档")
        return processed

    # 先处理快速文档
    if fast_docs:
        print("[开始] 处理快速文档...")
        process_batch(fast_docs, batch_size=batch_size_map['fast'], delay_seconds=delay_time_map['fast'], label="快速文档")

    # 再处理慢速文档
    if slow_docs:
        print("[开始] 处理慢速文档...")
        process_batch(slow_docs, batch_size=batch_size_map['slow'], delay_seconds=delay_time_map['slow'], label="慢速文档")

    print("[完成] 所有文档已提交解析")

def filter_documents_by_state(document_list, state):
    """
    根据状态筛选文档
    :param document_list: 文档列表
    :param state: 文档状态 (0-就绪, 1-成功, -1-失败)
    :return: 符合状态的文档 ID 列表
    """
    parsable_ids = []
    for doc in document_list:
        progress = doc.get("progress", -1)
        if progress == state:
            parsable_ids.append(doc["id"])
    return parsable_ids

def main():
    args = sys.argv[1:]

    if not args or args[0] not in ["stop", "parse", "failed", "success", "ready"]:
        print("Usage: python3 ragflow_parser.py [stop|parse|failed|success|ready] [--reparse]")
        sys.exit(1)

    command = args[0]
    reparse_flag = "--reparse" in args

    print("[加载中] 正在从服务器获取文档列表...")
    all_docs = get_all_documents()

    if command == "stop":
        stop_parsing(all_docs)
    elif command == "parse":
        print(f"[筛选] 正在筛选可解析的文档...（{'包含' if reparse_flag else '不包含'} 已完成文档）")
        parsable_doc_ids = filter_documents_by_state(all_docs, 0)  # 默认只处理就绪状态
        if reparse_flag:
            parsable_doc_ids.extend(filter_documents_by_state(all_docs, 1))  # 包含已完成文档
        if not parsable_doc_ids:
            print("[提示] 没有可重新解析的文档")
        else:
            parse_documents([doc for doc in all_docs if doc["id"] in parsable_doc_ids])
    elif command == "failed":
        print("[筛选] 正在筛选失败的文档...")
        failed_doc_ids = filter_documents_by_state(all_docs, -1)
        if not failed_doc_ids:
            print("[提示] 没有失败的文档")
        else:
            parse_documents([doc for doc in all_docs if doc["id"] in failed_doc_ids])
    elif command == "success":
        print("[筛选] 正在筛选成功的文档...")
        success_doc_ids = filter_documents_by_state(all_docs, 1)
        if not success_doc_ids:
            print("[提示] 没有成功的文档")
        else:
            parse_documents([doc for doc in all_docs if doc["id"] in success_doc_ids])
    elif command == "ready":
        print("[筛选] 正在筛选就绪的文档...")
        ready_doc_ids = filter_documents_by_state(all_docs, 0)
        if not ready_doc_ids:
            print("[提示] 没有就绪的文档")
        else:
            parse_documents([doc for doc in all_docs if doc["id"] in ready_doc_ids])

if __name__ == "__main__":
    main()
