import json
import os

from tqdm import tqdm
from sentence_transformers import SentenceTransformer

from core import prompt_results
from core.LLM import LLM
import core.prompt_extend as prompt_extend
from core.prompt_system import system_prompt
from data.config import (
    BASIC_GENERATED_TESTS_DIR,
    EXTEND_GENERATED_TESTS_DIR,
    PREPROCESSED_DATA_DIR,
)
from utils.static_extend import power_station_change, traverse_signal_values
from utils.test_utils import construct_extend_rag_example, construct_result_rag_example, extract_test_cases_from_output
from utils.parse_json import extract_all_json_from_output, load_json_file, load_jsonl_file
from utils.rag_utils import find_most_similar_test


def load_generation_results(generation_results_path):
    generation_test_cases = load_jsonl_file(generation_results_path)
    final_res_dict = {}
    
    for current_res in generation_test_cases:
        if current_res["case_id"] not in final_res_dict:
            final_res_dict[current_res["case_id"]] = []

        final_res_dict[current_res["case_id"]].append(
            {
                "case_id": current_res["case_id"],
                "subsystem": current_res["subsystem"],
                "function_module": current_res["function_module"],
                "test_name": current_res["test_name"],
                "preconditions": current_res["preconditions"],
                "operate_step": current_res["operate_step"],
                "expected_results": current_res["expected_results"],
                "test_environment": current_res["test_environment"],
                "test_level": current_res["test_level"],
            }
        )
    return final_res_dict


def main(model, basic_generation_iden, extend_generation_iden):
    # 结果路径
    result_file = os.path.join(EXTEND_GENERATED_TESTS_DIR, extend_generation_iden)
    all_results = []
    
    # Load all cases
    all_requirements = load_json_file(os.path.join(PREPROCESSED_DATA_DIR, "all_requirements.json"))
    all_test_cases = load_jsonl_file(os.path.join(PREPROCESSED_DATA_DIR, "all_test_cases.jsonl"))
    basic_generated_tests = load_generation_results(
        os.path.join(BASIC_GENERATED_TESTS_DIR, basic_generation_iden)
    )

    embedding_model = SentenceTransformer(
            "Qwen/Qwen3-Embedding-0.6B",
            cache_folder="/Users/wangziqi/.cache/huggingface/hub"
        )
    
    for case_id, case_data in tqdm(all_requirements.items(), desc="Processing cases", unit="case"):
        # if case_id not in ["G393_BDC-2568", "G393_BDC-2582"]:
        #     continue
        
        generated_basic_test = [i for i in basic_generated_tests.get(case_id, []) if i.get("test_level", "") == "冒烟"]
        generated_basic_test_str = json.dumps(generated_basic_test, ensure_ascii=False, indent=2)
        
        current_extend_tests = [i for i in case_data.get("tests", []) if i.get("test_level", "") != "冒烟"]
        if not current_extend_tests:
            print(f'No extend tests for case {case_data.get("case_id", "")}, skipping.')
            continue
        
        # 存储所有基础测试用例和扩展测试用例的embedding
        all_test_content_embeddings = []
        for single_generated_basic_test in generated_basic_test:
            # 对冒烟测试用例进行编码，防止生成的扩展测试用例和冒烟相同
            test_content = json.dumps(
                {
                    "preconditions": single_generated_basic_test.get(
                        "preconditions", ""
                    ),
                    "operate_step": single_generated_basic_test.get("operate_step", ""),
                    "expected_results": single_generated_basic_test.get(
                        "expected_results", ""
                    ),
                },
                ensure_ascii=False,
                indent=2,
            )
            all_test_content_embeddings.append(embedding_model.encode(test_content))
        

        for single_generated_basic_test in tqdm(generated_basic_test):
            # 找到最相似的冒烟测试用例
            basic_test_content = json.dumps(
                single_generated_basic_test, ensure_ascii=False, indent=2
            )

            most_similar_basic_test = find_most_similar_test(basic_test_content, case_id, all_test_cases)
            most_similar_case_id = most_similar_basic_test.get("case_id", "")
            most_similar_extend_tests = [i for i in most_similar_basic_test["tests"] if i.get("test_level", "") != "冒烟"]

            if most_similar_extend_tests:
                similar_basic_test_str = json.dumps(
                    most_similar_basic_test['basic_test'], ensure_ascii=False, indent=2
                )
                
                example_prompt = prompt_extend.user_prompt.format(
                    case_name=most_similar_basic_test["case_name"],
                    original_text=most_similar_basic_test["requirement"],
                    existing_basic_test=similar_basic_test_str,
                )

                example_output = prompt_extend.llm_response.format(
                    example_test_cases=json.dumps(
                        most_similar_extend_tests, ensure_ascii=False, indent=2
                    )
                )
            else:
                print("No similar case found, using few-shot example.")
                example_prompt, example_output = construct_extend_rag_example("G393_BDC-16395", all_requirements)

            existing_basic_test = json.dumps(
                generated_basic_test, ensure_ascii=False, indent=2
            )

            user_prompt = prompt_extend.user_prompt.format(
                case_name=case_data.get("case_name", ""),
                original_text=case_data.get("requirement", ""),
                existing_basic_test=existing_basic_test,
            )
            
            allm = LLM(model)
            
            static_extend_tests = [i for i in power_station_change(single_generated_basic_test)]
            static_extend_tests.extend([i for i in traverse_signal_values(single_generated_basic_test)])
            
            result_user_prompt = prompt_results.user_prompt.format(
                initial_test_case=json.dumps(
                    static_extend_tests, ensure_ascii=False, indent=2
                )
            )
            result_few_shot_prompt, result_few_shot_output = construct_result_rag_example(most_similar_case_id, all_requirements)
            try:
                result_response = allm.get_response(
                    messages=[
                        {"role": "system", "content": system_prompt},
                        {"role": "user", "content": result_few_shot_prompt},
                        {"role": "assistant", "content": result_few_shot_output},
                        {"role": "user", "content": result_user_prompt},
                    ]
                )
                model_outputs = extract_all_json_from_output(result_response)
            except Exception as e:
                print(f"Error generating results for case {case_data.get('case_id', '')}: {e}")
                model_outputs = []
            
            llm_generated_test, all_test_content_embeddings = extract_test_cases_from_output(model_outputs, case_data, all_test_content_embeddings, generation_type='extend')
            all_results.extend(llm_generated_test)
            
            # 第一轮大模型生成的扩展测试用例
            useful_extend_tests = []
            try:
                response = allm.get_response(
                    messages=[
                        {"role": "system", "content": system_prompt},
                        {"role": "user", "content": example_prompt},
                        {"role": "assistant", "content": example_output},
                        {"role": "user", "content": user_prompt},
                    ]
                )
                model_outputs = extract_all_json_from_output(response)
            except Exception as e:
                print(f"Error generating results for case {case_data.get('case_id', '')}: {e}")
                model_outputs = []
            
            llm_generated_test, all_test_content_embeddings = extract_test_cases_from_output(model_outputs, case_data, all_test_content_embeddings, generation_type='extend')
            all_results.extend(llm_generated_test)
            useful_extend_tests.extend(llm_generated_test)
            
            # 第二轮大模型生成的扩展测试用例
            try:
                user_twice_promt = prompt_extend.user_twice_prompt.format(
                    case_name=case_data.get("case_name", ""),
                    original_text=case_data.get("original_text", ""),
                    existing_basic_test=existing_basic_test,
                    existing_extended_test=json.dumps(
                        useful_extend_tests, ensure_ascii=False, indent=2
                    ),
                )
                response = allm.get_response(
                    messages=[
                        {"role": "system", "content": system_prompt},
                        {"role": "user", "content": example_prompt},
                        {"role": "assistant", "content": example_output},
                        {"role": "user", "content": user_twice_promt},
                    ]
                )
                model_outputs = extract_all_json_from_output(response)
            except Exception as e:
                print(f"Error generating results for case {case_data.get('case_id', '')}: {e}")
                model_outputs = []
            
            llm_generated_test, all_test_content_embeddings = extract_test_cases_from_output(model_outputs, case_data, all_test_content_embeddings, generation_type='extend')
            all_results.extend(llm_generated_test)
            
    with open(result_file, "w", encoding="utf-8") as f:
        for result in all_results:
            f.write(json.dumps(result, ensure_ascii=False) + "\n")


def entry_extend(model, basic_generation_iden):
    import time

    date = time.strftime("%m%d_%H%M", time.localtime())
    extend_iden = f"results_{date}_{model}.jsonl"
    main(model, date, basic_generation_iden, extend_iden)
    return extend_iden


if __name__ == "__main__":
    model = "deepseek/deepseek-r1-distill-llama-70b"
    model_name = "deepseek-r1-distill-llama-70b"
    import time

    date = time.strftime("%m%d_%H%M", time.localtime())
    basic_generation_iden = "results_1111_1342_deepseek-r1-distill-llama-70b.jsonl"
    main(model, basic_generation_iden, extend_generation_iden=f"results_{date}_{model_name}.jsonl")

# qwen3-next-80b-a3b-instruct
# qwen2.5-72b-instruct
# qwen3-32b
# deepseek-r1-distill-qwen-32b
