import json
import os
from typing import List, Dict


def find_train_files(root_dir: str) -> List[str]:
    """
    递归查找目录树中的所有train.json文件

    Args:
        root_dir: 根目录路径

    Returns:
        找到的train.json文件路径列表
    """
    train_files = []
    for root, _, files in os.walk(root_dir):
        if "train.json" in files:
            train_files.append(os.path.join(root, "train.json"))
    return train_files


def load_and_merge_data(file_paths: List[str]) -> List[Dict]:
    """
    加载并合并多个train.json文件的数据

    Args:
        file_paths: 文件路径列表

    Returns:
        合并后的数据列表
    """
    merged_data = []
    for path in file_paths:
        try:
            with open(path, "r", encoding="utf-8") as f:
                data = json.load(f)
                if isinstance(data, list):
                    merged_data.extend(data)
                    print(f"成功加载 {path}，合并 {len(data)} 条数据")
                else:
                    print(f"跳过 {path}：文件内容不是列表格式")
        except Exception as e:
            print(f"加载 {path} 失败：{str(e)}")
    return merged_data


def convert_to_llamafactory_format(original_data: List[Dict]) -> List[Dict]:
    """
    将原始诗歌解析数据转换为LLAMAFactory支持的格式

    Args:
        original_data: 原始数据列表，每个元素包含title/content/keywords/trans/emotion字段

    Returns:
        转换后的数据列表，符合LLAMAFactory的instruction/input/output结构
    """
    converted_data = []

    for item in original_data:
        # 构建instruction提示模板
        instruction = f"请解析这个古文《{item['title']}》诗句并生成结构化数据"

        # 结构化output数据
        structured_output = {
            "keywords": item["keywords"],
            "translation": item["trans"],
            "emotion_analysis": item["emotion"]
        }

        # 组装最终格式
        converted_item = {
            "instruction": instruction,
            "input": item["content"],
            "output": structured_output
            # "output": f"关键词解析: {structured_output['keywords']}\n白话文译文: {structured_output['translation']}\n诗词表达的情感 : {structured_output['emotion_analysis']}"
        }

        converted_data.append(converted_item)

    return converted_data
def main():
    # 配置根目录路径（示例路径，按需修改）
    input_dir = "./train-data"
    output_dir = "./converted_data"

    # 创建输出目录
    os.makedirs(output_dir, exist_ok=True)

    # 1. 查找所有train.json文件
    train_files = find_train_files(input_dir)
    print(f"共找到 {len(train_files)} 个数据文件")

    # 2. 加载并合并数据
    original_data = load_and_merge_data(train_files)
    print(f"总计加载 {len(original_data)} 条原始数据")

    # 3. 执行格式转换
    converted_data = convert_to_llamafactory_format(original_data)

    # 4. 保存结果
    output_path = os.path.join(output_dir, "llama_train_data_2.json")
    with open(output_path, "w", encoding="utf-8") as f:
        json.dump(converted_data, f, ensure_ascii=False, indent=2)

    print(f"转换完成！结果已保存至 {output_path}")


if __name__ == "__main__":
    main()
