import json
import os
from pathlib import Path
from distilabel.pipeline import Pipeline
from distilabel.steps import LoadDataFromDicts
from distilabel.steps.tasks import TextGeneration
from distilabel.models.llms import OllamaLLM
from typing import List

from packages.util.file import load_file_paths
from packages.classification.qa_inserter import StartClassifyPair

from dis_qa.config import get_distilabel_config
from dis_qa.document_chunk import chunk_document
from dis_qa.model import *


def next(filepath, filename):
    StartClassifyPair(filepath, filename)


def run_pipeline(chunks: List[str], ollama_host="", model_name="gpt-oss:120b"):
    with Pipeline(name="PDF_QA_Generator") as pipeline:
        load = LoadDataFromDicts(
            name="load_chunks",
            data=[{"instruction": chunk} for chunk in chunks],
        )

        text_gen = TextGeneration(
            name="generate_qa",
            system_prompt=get_system_prompt(),
            template="Document:\n\n{{ instruction  }}\n\nNow generate question-answer pairs about the document.",
            llm=OllamaLLM(
                model=model_name,
                host=ollama_host,
                structured_output={
                    "schema": QADoc,
                    "format": "json"
                },
            ),
            num_generations=1,
            # 使用结构化输出以便直接得到 JSON
            input_batch_size=4,
        )

        load >> text_gen

    distiset = pipeline.run(
        parameters={},
        use_cache=False,
    )
    # distiset 包含生成好的结构化数据
    return distiset


def prepare():
    output_distilabel = f"output/{get_distilabel_config().OUTPUT}"
    os.makedirs(output_distilabel, exist_ok=True)


def StartSynthesis(filepath, extension='.txt', sourceTag=''):
    prepare()
    
    output_distilabel = f"output/{get_distilabel_config().OUTPUT}"
    ospath = Path(filepath)
    grand_path = sourceTag
    file_paths: List[str] = []
    if "auto" in ospath.parts:
        grand_path = ospath.parent.parent.name
        file_paths = load_file_paths(filepath, extension)
    else:
        for sub in os.listdir(filepath) :
            if sub == '.DS_Store': 
                continue
            subfilepath = f"{filepath}/{sub}/auto/"
            subfiles = load_file_paths(subfilepath, extension)
            file_paths+=subfiles

    print(f"@@@ StartSynthesis files {file_paths}")
    
    for i, doc_path in enumerate(file_paths):
        # 获取文件名
        filename = os.path.splitext(os.path.basename(doc_path))[0]
        print(f"@@@ start deepeval.systhesizer file {filename}")


        chunks = chunk_document(doc_path)
        chunks = chunks[:1]
        result = []
        for i, c in enumerate(chunks):
            try:
                distiset = run_pipeline(chunks=chunks, ollama_host=get_distilabel_config().OLLAMA_HOST, model_name=get_distilabel_config().MODEL_NAME)
                
                        # 1. 检查数据结构
                if "default" not in distiset or "train" not in distiset["default"]:
                    print(f"[WARN] No train dataset for chunk {i}")
                    continue

                if len(distiset["default"]["train"]) == 0:
                    print(f"[WARN] Empty train dataset for chunk {i}")
                    continue

                train = distiset["default"]["train"][0]

                # 2. 检查字段
                context_str = train.get("instruction", "")
                generation = train.get("generation", "")

                if not generation:
                    print(f"[WARN] Empty generation for chunk {i}")
                    continue
                
                # 3. 解析 JSON
                try:
                    qa_pairs = json.loads(generation)
                    if not isinstance(qa_pairs, list):
                        print(f"[WARN] Generation is not a list for chunk {i}")
                        continue
                except json.JSONDecodeError as e:
                    print(f"[ERROR] JSON decode failed for chunk {i}: {e}")
                    continue

                        # 4. 遍历问答对
                for j, q in enumerate(qa_pairs):
                    question = q.get("question")
                    answer = q.get("answer")
                    if not question or not answer:
                        print(f"[WARN] Skipping malformed QA pair in chunk {i}: {q}")
                        continue

                    row = {
                        "id": f"{i}_{j}",
                        "input": question,
                        "expected_output": answer,
                        "actual_output": None,
                        "retrieval_context": None,
                        "n_chunks_per_context": 1,
                        "source_file": doc_path,
                        "parent": grand_path,
                        "context_length": len(context_str),
                        "context_str": context_str,
                    }
                    result.append(row)
            except Exception as e:
                print(f"[ERROR] Unexpected error in chunk {i}: {e}")


        writeToFile = os.path.join(output_distilabel,
                         f"goldens_{filename}.json")

        with open(writeToFile, "w", encoding="utf-8") as f:
            json.dump(result, f, ensure_ascii=False, indent=2)

        output_path = f"{output_distilabel}/goldens_{filename}.json"

        print(
            f"@@@ Goldens generated and saved to {output_path} goldens: {len(result)}")

        if get_distilabel_config().mode == 1:
            return
        next(output_path, filename)