#!/usr/bin/env python3
# multi_extraction.py
# usage:
# python ./src/multi_extraction.py --threads_num 4 

import os
import sys
import json
import time
import argparse
import threading
from concurrent.futures import ThreadPoolExecutor
from typing import List, Dict

# 添加项目根目录到路径
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, project_root)

import pandas as pd
from config import project_config
from query.get_sql_answer import get_sql_answer

# 假设 process_sql_answer 在某个模块中定义，否则在此定义（示例）
def process_sql_answer(raw_sql: str) -> str:
    # 示例处理逻辑，实际请替换为真实逻辑
    if raw_sql is None:
        return ""
    return raw_sql.strip()

# 🔴 新增：全局锁，用于保护文件写入
file_write_lock = threading.Lock()


def count_lines(filepath: str) -> int:
    """高效统计 JSONL 文件行数"""
    with open(filepath, 'r', encoding='utf-8') as f:
        return sum(1 for _ in f)


def worker_thread(thread_id: int, lines: List[Dict], output_path: str, llm: str):
    """
    工作线程函数：处理分配的问题列表，并在处理完每一条记录后立即写入文件
    """
    # 🔴 在线程开始时以 'a' 模式打开文件（追加写入）
    try:
        with open(output_path, 'a', encoding='utf-8') as f:
            for item in lines:
                question_id = item['id']
                question_text = item['question']

                try:
                    raw_sql = get_sql_answer(question_text, llm)
                    if raw_sql is None :
                        continue 
                    processed_sql = process_sql_answer(raw_sql)
                    result = {"id": question_id, "sql": processed_sql}
                except Exception as e:
                    print(f"线程 {thread_id} 处理问题 ID={question_id} 时出错: {e}", file=sys.stderr)
                    result = {"id": question_id, "sql": ""}

                # 🔴 构造 JSON 行
                json_line = json.dumps(result, ensure_ascii=False) + '\n'

                # 🔴 使用锁保护写入和刷新操作
                with file_write_lock:
                    f.write(json_line)
                    f.flush()  # 🔥 确保立即写入磁盘

            # 🔴 线程处理完自己所有任务后，打印完成信息
            print(f"线程 {thread_id} 完成，结果已实时保存至 {output_path}")

    except Exception as e:
        print(f"线程 {thread_id} 打开或写入文件 {output_path} 时出错: {e}", file=sys.stderr)


def main():
    parser = argparse.ArgumentParser(description="多线程提取 SQL 查询结果（实时写入）")
    parser.add_argument('--threads_num', type=int, default=1, help='并发线程数量 (默认: 1)')
    parser.add_argument('--llm', type=str, default="low", choices=['low', 'normal', 'high'], help='指定大模型级别：low, normal, 或 high。')
    args = parser.parse_args()

    questions_path = project_config.questions_path
    data_answer_path = project_config.data_answer_path

    if not os.path.exists(questions_path):
        print(f"错误：问题文件不存在: {questions_path}", file=sys.stderr)
        sys.exit(1)

    total_lines = count_lines(questions_path)
    n_threads = args.threads_num
    llm = args.llm 

    if n_threads > total_lines:
        print(f"警告：线程数 ({n_threads}) 超过问题总数 ({total_lines})，可能导致资源浪费。", file=sys.stderr)
        print("建议设置线程数不超过问题总数。", file=sys.stderr)
        sys.exit(1)

    # 读取所有问题
    data = pd.read_json(questions_path, lines=True, encoding='utf-8')
    questions = data.to_dict('records')  # List[dict]

    # 分配任务到线程
    chunk_size = len(questions) // n_threads
    remainder = len(questions) % n_threads
    output_dir = os.path.dirname(data_answer_path)        # ➡️ "/home/.../result"
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir, exist_ok=True)
    output_filename = os.path.basename(data_answer_path)  # ➡️ "results.jsonl"
    # 🔴 确保每个 part 文件是空的（清空或创建）
    for i in range(n_threads):
        part_file = os.path.join(output_dir, f"part{i+1}_{output_filename}")
        with open(part_file, 'w', encoding='utf-8') as f:
            pass  # 创建空文件

    # 使用线程池执行
    with ThreadPoolExecutor(max_workers=n_threads) as executor:
        futures = []
        for i in range(n_threads):
            time.sleep(i)
            start_idx = i * chunk_size + min(i, remainder)
            end_idx = start_idx + chunk_size + (1 if i < remainder else 0)
            chunk = questions[start_idx:end_idx]
            output_file = os.path.join(output_dir, f"part{i+1}_{output_filename}")
            # 🔴 提交任务
            future = executor.submit(worker_thread, i+1, chunk, output_file, llm)
            futures.append(future)

        # 等待所有线程完成
        for future in futures:
            future.result()

    print(f"✅ 所有 {n_threads} 个线程已完成。")

if __name__ == '__main__':
    main()