import json
import os
import re

from tqdm import tqdm
from sentence_transformers import SentenceTransformer
import numpy as np

from core.LLM import LLM
import core.prompt_extend as prompt_extend
from data.config import (
    BASIC_GENERATED_TESTS_DIR,
    EXTEND_GENERATED_TESTS_DIR,
    PREPROCESSED_DATA_DIR,
)


def load_rag_base(rag_case_path):
    with open(rag_case_path, "r", encoding="utf-8") as f:
        rag_base = json.load(f)
    return rag_base


def load_similarity_dict(similarity_dict_path):
    with open(similarity_dict_path, "r", encoding="utf-8") as f:
        similarity_dict = json.load(f)
    return similarity_dict


def load_pin_mappings(pin_mapping_path):
    with open(pin_mapping_path, "r", encoding="utf-8") as f:
        pin_mappings = json.load(f)
    return pin_mappings


def load_generation_results(generation_results_path):
    # load jsonl file as a list of dict
    final_res_dict = {}
    with open(generation_results_path, "r", encoding="utf-8") as f:
        for line in f:
            if line.strip():
                current_res = json.loads(line)
                if current_res["case_id"] not in final_res_dict:
                    final_res_dict[current_res["case_id"]] = []

                final_res_dict[current_res["case_id"]].append(
                    {
                        "test_name": current_res["case_name"],
                        "subsystem": current_res["subsystem"],
                        "function_module": current_res["function_module"],
                        "preconditions": current_res["preconditions"],
                        "operate_step": current_res["operate_step"],
                        "expected_results": current_res["expected_results"],
                        "test_environment": current_res["test_environment"],
                        "test_level": current_res["test_level"],
                    }
                )
    return final_res_dict


def find_most_similar_from_dict(requirement_key, similarity_dict, top_k=1):
    if requirement_key not in similarity_dict:
        raise ValueError(
            f"Requirement '{requirement_key}' not found in similarity_dict"
        )

    # Get all similarity pairs except self
    sims = {
        k: v
        for k, v in similarity_dict[requirement_key].items()
        if k != requirement_key
    }

    # Sort by similarity (descending)
    top_matches = sorted(sims.items(), key=lambda x: x[1], reverse=True)[:top_k]

    return top_matches


def find_most_similar_test(target_embedding, case_id, content_dicts):
    target_vec = target_embedding

    best_score = -1
    best_content = None

    for key, item in content_dicts.items():
        if case_id == item["req_id"]:
            continue
        emb = np.array(item["embedding"])
        # cosine similarity
        sim = np.dot(target_vec, emb) / (
            np.linalg.norm(target_vec) * np.linalg.norm(emb)
        )

        if sim > best_score:
            best_score = sim
            best_content = key
    return best_content, best_score


def check_presence(target_embedding, compare_embeddings):
    similarity_score = 0.995
    for emb in compare_embeddings:
        # cosine similarity
        sim = np.dot(target_embedding, emb) / (
            np.linalg.norm(target_embedding) * np.linalg.norm(emb)
        )

        if sim > similarity_score:
            return True
    return False


def fix_multiline_json_string(s: str) -> str:
    """
    修复包含字面换行的 JSON 字符串值。
    将字符串值内的实际换行替换为 \n，并去除多余空格。
    """

    # 匹配所有字符串值（包括跨行）
    def replace_match(match):
        # match.group(0) 是整个带引号的字符串，如 "xxx\n   yyy"
        content = match.group(1)  # 不带引号的内容
        # 将换行和后续空格替换为 \n（可选：压缩连续空格）
        content = re.sub(r"\s*\n\s*", r"\\n", content)
        return '"' + content + '"'

    # 匹配双引号字符串，支持跨行（非贪婪）
    fixed = re.sub(r'"((?:[^"\\]|\\.)*?)"', replace_match, s, flags=re.DOTALL)
    return fixed


def extract_all_json_from_output(output: str):
    """
    从大模型输出中提取所有被 ```json ... ``` 包裹的 JSON 内容，并解析为 Python 对象列表。

    Args:
        output (str): 大模型生成的原始文本。

    Returns:
        List[Union[dict, list]]: 所有成功解析的 JSON 对象组成的列表。
    """
    # 匹配所有 ```json ... ``` 块（非贪婪、跨行）
    matches = re.findall(r"```json\s*(.*?)\s*```", output, re.DOTALL)

    results = []
    for json_str in matches:
        json_str = json_str.strip()
        json_str = fix_multiline_json_string(json_str)
        if not json_str:
            continue
        try:
            obj = json.loads(json_str)
            results.append(obj)
        except json.JSONDecodeError:
            # 可选择跳过无效 JSON，或记录警告（这里静默跳过）
            continue
    return results


def strip_none_from_json(
    json_str: str, as_string: bool = True, ensure_ascii: bool = False
):
    """
    接受一个 JSON 格式的字符串，删除所有值为 None（JSON 中的 null）的键。
    返回值：
        - 如果 as_string 为 True，返回处理后的 JSON 字符串；
        - 否则返回处理后的 Python 对象（dict / list）。
    """
    if isinstance(json_str, str):
        data = json.loads(json_str)
    elif isinstance(json_str, (dict, list)):
        data = json_str
    cleaned = _remove_none_values(data)
    return json.dumps(cleaned, ensure_ascii=ensure_ascii) if as_string else cleaned


def _remove_none_values(obj):
    if isinstance(obj, dict):
        new = {}
        for k, v in obj.items():
            if v is None:
                continue
            elif v == "":
                continue
            new[k] = _remove_none_values(v)
        return new
    if isinstance(obj, list):
        return [_remove_none_values(i) for i in obj]
    return obj


def construct_few_shot_example_prompt(few_shot_case_id, extracted_cases, rag_base):
    for case in extracted_cases:
        if case["case_id"] != few_shot_case_id:
            continue

        case_name = case["case_name"]
        case_content = case["original_text"]
        rag_tests = rag_base[case_content]["Tests"]

        basic_tests = [i for i in rag_tests if i.get("test_level", "") == "冒烟"]
        extend_tests = [i for i in rag_tests if i.get("test_level", "") != "冒烟"]

        few_shot_prompt = prompt_extend.user_prompt.format(
            example_name=case_name,
            example_content=case_content,
            existing_basic_test=basic_tests,
        )
        few_shot_output = prompt_extend.example_response.format(
            example_test_cases=json.dumps(extend_tests, ensure_ascii=False, indent=2)
        )
        return few_shot_prompt, few_shot_output


def main(model, basic_generation_iden, extend_iden):
    expr_name = extend_iden

    basic_generated_test_dir = BASIC_GENERATED_TESTS_DIR
    generated_test_dir = EXTEND_GENERATED_TESTS_DIR
    preprocessed_data_dir = PREPROCESSED_DATA_DIR
    os.makedirs(generated_test_dir, exist_ok=True)

    result_file = os.path.join(generated_test_dir, expr_name)

    all_results = []
    # Load all cases
    with open(
        os.path.join(preprocessed_data_dir, "all_cases.jsonl"), "r", encoding="utf-8"
    ) as f:
        extracted_cases = [json.loads(line) for line in f if line.strip()]

    print(f"Total cases: {len(extracted_cases)}")

    rag_base = load_rag_base(os.path.join(preprocessed_data_dir, "cases4rag.json"))
    similarity_dict = load_similarity_dict(
        os.path.join(preprocessed_data_dir, "similarity_dict_qwen3_0.6b.json")
    )
    pin_mappings = load_pin_mappings(
        os.path.join(preprocessed_data_dir, "pin_mappings.json")
    )
    pin_mappings = strip_none_from_json(pin_mappings, False)

    basic_generated_tests = load_generation_results(
        os.path.join(basic_generated_test_dir, basic_generation_iden)
    )
    test_rag_base = load_rag_base(
        os.path.join(preprocessed_data_dir, "case_dict_qwen3_0.6b.json")
    )
    embedding_model = SentenceTransformer("Qwen/Qwen3-Embedding-0.6B")

    pin_list = pin_mappings.get("需求与引脚对照表", [])
    var_list = pin_mappings.get("需求与信号变量对照表", [])
    pin_json = json.dumps(pin_list, ensure_ascii=False, indent=2)
    var_json = json.dumps(var_list, ensure_ascii=False, indent=2)
    pin_block = f"```json\n{pin_json}\n```"
    var_block = f"```json\n{var_json}\n```"

    all_results = []
    for idx, case_data in enumerate(
        tqdm(extracted_cases, desc="Processing cases", unit="case")
    ):
        system_promt = prompt_extend.system_prompt.format(
            pin_mappings=pin_block,
            variable_mappings=var_block,
        )
        case_id = case_data.get("case_id", "")

        if case_id not in ["G393_BDC-2568", "G393_BDC-2582"]:
            continue

        case_content = case_data.get("original_text", "")
        generated_basic_test = [
            i
            for i in basic_generated_tests.get(case_id, [])
            if i.get("test_level", "") == "冒烟"
        ]

        try:
            current_tests = rag_base[case_data.get("original_text", "")].get(
                "Tests", []
            )
            current_basic_tests = [
                i for i in current_tests if i.get("test_level", "") == "冒烟"
            ]
            current_extend_tests = [
                i for i in current_tests if i.get("test_level", "") != "冒烟"
            ]
        except:
            print(f"No current tests found for case {case_data.get('case_id', '')}.")
            continue

        if not current_extend_tests:
            print(f'No extend tests for case {case_data.get("case_id", "")}, skipping.')
            continue

        # 存储所有基础测试用例和扩展测试用例的embedding
        all_test_content_embeddings = []
        
        for single_generated_basic_test in generated_basic_test:
            # 对冒烟测试用例进行编码，防止生成的扩展测试用例和冒烟相同
            test_content = json.dumps(
                {
                    "preconditions": single_generated_basic_test.get(
                        "preconditions", ""
                    ),
                    "operate_step": single_generated_basic_test.get("operate_step", ""),
                    "expected_results": single_generated_basic_test.get(
                        "expected_results", ""
                    ),
                },
                ensure_ascii=False,
                indent=2,
            )
            all_test_content_embeddings.append(embedding_model.encode(test_content))

        for single_generated_basic_test in tqdm(generated_basic_test):
            # 找到最相似的冒烟测试用例
            case_content = json.dumps(
                single_generated_basic_test, ensure_ascii=False, indent=2
            )
            target_embedding = embedding_model.encode(case_content)
            
            similar_basic_test = find_most_similar_test(
                target_embedding, case_id, test_rag_base
            )[0]
            similar_case_id = test_rag_base[similar_basic_test]["req_id"]
            similar_case_name = [
                i for i in extracted_cases if i.get("case_id", "") == similar_case_id
            ][0]["case_name"]

            req_case_req_content = test_rag_base[similar_basic_test]["requirement"]
            similar_extend_tests = (
                test_rag_base[similar_basic_test]["extend_tests"]
                if similar_basic_test
                else []
            )

            if similar_extend_tests:
                similar_basic_test_str = json.dumps(
                    similar_basic_test, ensure_ascii=False, indent=2
                )
                
                example_prompt = prompt_extend.user_prompt.format(
                    example_name=similar_case_name,
                    example_content=req_case_req_content,
                    existing_basic_test=similar_basic_test_str,
                )

                example_output = prompt_extend.example_response.format(
                    example_test_cases=json.dumps(
                        similar_extend_tests, ensure_ascii=False, indent=2
                    )
                )
            else:
                print("No similar case found, using few-shot example.")
                example_prompt, example_output = construct_few_shot_example_prompt(
                    "G393_BDC-16395", extracted_cases, rag_base
                )

            existing_basic_test = json.dumps(
                generated_basic_test, ensure_ascii=False, indent=2
            )

            user_prompt = prompt_extend.user_prompt.format(
                case_name=case_data.get("case_name", ""),
                original_text=case_data.get("original_text", ""),
                existing_basic_test=existing_basic_test,
            )
            # 尝试两次生成
            for i in range(2):
                allm = LLM(model)
                try:
                    if i == 0:
                        response = allm.get_response(
                            messages=[
                                {"role": "system", "content": system_promt},
                                {"role": "user", "content": example_prompt},
                                {"role": "assistant", "content": example_output},
                                {"role": "user", "content": user_prompt},
                            ]
                        )
                    else:
                        user_twice_promt = prompt_extend.user_twice_prompt.format(
                            case_name=case_data.get("case_name", ""),
                            original_text=case_data.get("original_text", ""),
                            existing_basic_test=existing_basic_test,
                            existing_extended_test=json.dumps(
                                useful_extend_tests, ensure_ascii=False, indent=2
                            ),
                        )
                        response = allm.get_response(
                            messages=[
                                {"role": "system", "content": system_promt},
                                {"role": "user", "content": example_prompt},
                                {"role": "assistant", "content": example_output},
                                {"role": "user", "content": user_twice_promt},
                            ]
                        )
                    model_outputs = extract_all_json_from_output(response)
                except json.JSONDecodeError:
                    model_outputs = None
                print(model_outputs)

                if isinstance(model_outputs, list) and model_outputs:
                    model_outputs = model_outputs[0]
                else:
                    print(
                        f"No valid JSON output for case {case_data.get('case_id', '')}, skipping."
                    )
                    continue
                
                useful_extend_tests = []
                for result_item in model_outputs:
                    # Map the response to the actual fields in GeneratedTest model
                    subsystem = result_item.get("subsystem", "")
                    function_module = result_item.get("function_module", "")
                    test_name = result_item.get("test_name", "")
                    preconditions = result_item.get("preconditions", "")
                    operate_step = result_item.get(
                        "operate_step", result_item.get("test_content", f"")
                    )
                    expected_results = result_item.get("expected_results", "")
                    test_level = result_item.get("test_level", "")
                    
                    # 判断生成的扩展测试用例是否和已有的一样
                    extend_test_content = json.dumps(
                        {
                            "preconditions": preconditions,
                            "operate_step": operate_step,
                            "expected_results": expected_results,
                        },
                        ensure_ascii=False,
                        indent=2,
                    )
                    if check_presence(
                        embedding_model.encode(extend_test_content),
                        all_test_content_embeddings,
                    ):
                        print("Duplicate test case detected, skipping.")
                        continue
                    all_test_content_embeddings.append(
                        embedding_model.encode(extend_test_content)
                    )
                    useful_extend_tests.append(result_item)

                    if test_level == "基础":
                        test_level = "扩展"
                    single_res = {
                        "case_id": case_data.get("case_id", ""),
                        "case_name": case_data.get("case_name", ""),
                        "preconditions": preconditions,
                        "operate_step": operate_step,
                        "expected_results": expected_results,
                        "prompt": user_prompt,
                        "response": response,
                        "subsystem": subsystem,
                        "function_module": function_module,
                        "test_name": test_name,
                        "test_environment": "车辆测试环境",
                        "test_level": test_level,
                    }
                    all_results.append(single_res)

            with open(result_file, "w", encoding="utf-8") as f:
                for result in all_results:
                    f.write(json.dumps(result, ensure_ascii=False) + "\n")


def entry_extend(model, basic_generation_iden):
    import time

    date = time.strftime("%m%d_%H%M", time.localtime())
    extend_iden = f"results_{date}_{model}.jsonl"
    main(model, date, basic_generation_iden, extend_iden)
    return extend_iden


if __name__ == "__main__":
    model = "deepseek/deepseek-r1-distill-llama-70b"
    model_name = "deepseek-r1-distill-llama-70b"
    import time

    date = time.strftime("%m%d_%H%M", time.localtime())
    basic_generation_iden = "results_1102_0125_qwen3-32b.jsonl"
    main(model, basic_generation_iden, extend_iden=f"results_{date}_{model_name}.jsonl")

# qwen3-next-80b-a3b-instruct
# qwen2.5-72b-instruct
# qwen3-32b
# deepseek-r1-distill-qwen-32b
