# build_training_dataset.py
# python ./src/tools/build_training_dataset.py 

import os
import json
import glob
from typing import List, Dict

def load_tables_from_json(directory: str) -> List[Dict[str, str]]:
    """
    扫描 sql_output 目录下所有 .json 文件，提取 name 和 table 字段
    """
    tables = []
    json_files = glob.glob(os.path.join(directory, "*.json"))
    
    for file in json_files:
        try:
            with open(file, 'r', encoding='utf-8') as f:
                data = json.load(f)
                if isinstance(data, dict):
                    name = data.get("name")
                    table = data.get("table")
                    if name and table:
                        tables.append({"name": name, "table": table})
                elif isinstance(data, list):
                    for item in data:
                        name = item.get("name")
                        table = item.get("table")
                        if name and table:
                            tables.append({"name": name, "table": table})
        except Exception as e:
            print(f"Error reading {file}: {e}")
    print(f"tables:{tables}")

    return tables


def load_samples(jsonl_path: str) -> List[Dict[str, str]]:
    """
    加载 samples.jsonl 文件的所有行
    """
    samples = []
    try:
        with open(jsonl_path, 'r', encoding='utf-8') as f:
            for line in f:
                line = line.strip()
                if line:
                    sample = json.loads(line)
                    samples.append(sample)
    except Exception as e:
        print(f"Error reading {jsonl_path}: {e}")
    return samples


def generate_training_dataset(
    tables: List[Dict[str, str]],
    samples: List[Dict[str, str]],
    output_file: str,
    num_negative_per_sample: int = 5
):
    """
    生成训练数据集：
    - 正样本：SQL 中包含 table 名 → label=1.0
    - 负样本：随机选择不相关的表名 → label=0.0
    """
    # 构建 table_str 列表用于负采样
    table_strings = [f"{t['name']} {t['table']}" for t in tables]
    table_dict = {t['table']: f"{t['name']} {t['table']}" for t in tables}

    with open(output_file, 'w', encoding='utf-8') as out_f:
        for sample in samples:
            question = sample.get("question", "").strip()
            sql = sample.get("sql", "").lower()  # 统一转小写便于匹配

            if not question or not sql:
                continue

            # 查找匹配的表
            matched_tables = []
            for table_name, table_str in table_dict.items():
                if table_name.lower() in sql:
                    matched_tables.append(table_str)

            # 去重
            matched_tables = list(set(matched_tables))
            # print(f"matched_tables:{matched_tables}") # debug

            # 生成正样本
            if matched_tables:
                for mt in matched_tables:
                    record = {
                        "sentence1": question,
                        "sentence2": mt,
                        "label": 1.0
                    }
                    out_f.write(json.dumps(record, ensure_ascii=False) + "\n")
            else:
                # 即使没有匹配，也至少生成一个负样本避免丢失样本
                # 可选：跳过无匹配的样本，这里我们仍保留部分负样本
                pass  # 后续统一处理负样本

            # 生成负样本：从未匹配的表中随机选 N 个
            all_table_strs_set = set(table_strings)
            matched_set = set(matched_tables)
            negative_candidates = list(all_table_strs_set - matched_set)

            # print(f"negative_candidates:{negative_candidates}") # debug
            # 如果没有候选，跳过负样本
            if not negative_candidates:
                continue

            # 随机采样负样本（可重复）
            # num_neg = min(num_negative_per_sample, len(negative_candidates))
            # negative_samples = random.choices(negative_candidates, k=num_neg)

            # ✅ 使用所有负样本（不再是随机采样）
            for neg in negative_candidates:
                record = {
                    "sentence1": question,
                    "sentence2": neg,
                    "label": 0.0
                }
                out_f.write(json.dumps(record, ensure_ascii=False) + "\n")


    print(f"✅ Training dataset saved to {output_file}")


def main():
    # 参数配置
    SQL_OUTPUT_DIR = "sql_output"
    SAMPLES_FILE = "data/samples.jsonl"
    OUTPUT_FILE = "data/training_dataset.jsonl"
    NUM_NEGATIVE_SAMPLES = 19  # 每条样本生成的负样本数量

    print("🔍 Loading table metadata...")
    tables = load_tables_from_json(SQL_OUTPUT_DIR)
    print(f"Loaded {len(tables)} tables.")

    print("🔍 Loading SQL samples...")
    samples = load_samples(SAMPLES_FILE)
    print(f"Loaded {len(samples)} samples.")

    print("⚙️ Generating training dataset...")
    generate_training_dataset(tables, samples, OUTPUT_FILE, num_negative_per_sample=NUM_NEGATIVE_SAMPLES)


if __name__ == "__main__":
    main()

# 为了提升嵌入模型在语义到数据源匹配任务中的表现，推荐以下数据构造原则：
# 原则	说明
# ✅ 简洁性	sentence2 应尽量简洁，突出关键实体（如表名、字段名）
# ✅ 去操作化	避免“提取”、“计算”、“根据条件”等过程性描述
# ✅ 标准化命名	统一表名格式，如 order_full_total_day 或 工单业务全景表
# ✅ 增强语义对齐	可加入同义词或别名，如 "超期" -> "逾期", "占比" -> "比例"
# ❌ 避免冗余描述	不要写成 SQL 或 ETL 任务描述
# 结论：简洁实体型描述的召回效果显著优于操作描述型。