import json
import os

from dotenv import load_dotenv
import yaml
from dis_qa.test_llm import test_llm
from dis_qa.distilabel_qa import StartSynthesis
from dis_qa.config import set_distilabel_config, DistilabelConf


# ollama 服务一走代理，就会报错
os.environ["NO_PROXY"] = "127.0.0.1,localhost,192.168.3.80"

load_dotenv()

def loadConf():
    with open('config.yml', 'r', encoding="utf-8") as f:
        # with open(os.path.join(os.path.dirname(__file__), '/config.yml'), 'r') as f:
        yamlConf = yaml.safe_load(f)


    set_distilabel_config(config=DistilabelConf(**yamlConf["DistilabelConf"]))



if __name__ == "__main__":
    loadConf()
        # 切换运行目录
    work_dir = os.getenv('WORKDIR', '.')  # 默认为当前目录
    os.chdir(work_dir)

    # path = "w1_1/auto/w1_1.txt"
    # parent = "test"
    # chunks = chunk_document(path, 1024)
    # chunks = chunks[:1]

    StartSynthesis("w1_1/auto/")


    # result = []
    # for i, c in enumerate(chunks):
    #     try:
    #         distiset = run_pipeline(chunks=chunks, ollama_host=host, model_name="gpt-oss:120b")
            
    #                 # 1. 检查数据结构
    #         if "default" not in distiset or "train" not in distiset["default"]:
    #             print(f"[WARN] No train dataset for chunk {i}")
    #             continue

    #         if len(distiset["default"]["train"]) == 0:
    #             print(f"[WARN] Empty train dataset for chunk {i}")
    #             continue

    #         train = distiset["default"]["train"][0]

    #         # 2. 检查字段
    #         context_str = train.get("instruction", "")
    #         generation = train.get("generation", "")

    #         if not generation:
    #             print(f"[WARN] Empty generation for chunk {i}")
    #             continue
            
    #         # 3. 解析 JSON
    #         try:
    #             qa_pairs = json.loads(generation)
    #             if not isinstance(qa_pairs, list):
    #                 print(f"[WARN] Generation is not a list for chunk {i}")
    #                 continue
    #         except json.JSONDecodeError as e:
    #             print(f"[ERROR] JSON decode failed for chunk {i}: {e}")
    #             continue

    #                 # 4. 遍历问答对
    #         for j, q in enumerate(qa_pairs):
    #             question = q.get("question")
    #             answer = q.get("answer")
    #             if not question or not answer:
    #                 print(f"[WARN] Skipping malformed QA pair in chunk {i}: {q}")
    #                 continue

    #             row = {
    #                 "id": f"{i}_{j}",
    #                 "input": question,
    #                 "expected_output": answer,
    #                 "actual_output": None,
    #                 "retrieval_context": None,
    #                 "n_chunks_per_context": 1,
    #                 "source_file": path,
    #                 "parent": parent,
    #                 "context_length": len(context_str),
    #                 "context_str": context_str,
    #             }
    #             result.append(row)
    #     except Exception as e:
    #         print(f"[ERROR] Unexpected error in chunk {i}: {e}")

    # pprint.pprint(result)

    





       









        # 遍历 Distiset
        # for split_name, dataset_dict in distiset.items():  # split_name = 'default'
        #     print(f"--- Split: {split_name} ---")
            
        #     for split, dataset in dataset_dict.items():  # split = 'train'
        #         print(f"Dataset: {split}, num_rows={len(dataset)}")
                
        #         # 遍历每一行
        #         for i, row in enumerate(dataset):
        #             print(f"Row {i}:")
        #             for k, v in row.items():
        #                 print(f"lxb  {k}: {v}")
        #             print("-----")


    # test_llm()