import json
import os
import time

from tqdm import tqdm
from sentence_transformers import SentenceTransformer
from loguru import logger

from core import prompt_basic, prompt_results
from core.LLM import LLM
import core.prompt_extend as prompt_extend
from core.prompt_system import system_prompt
from data.config import (
    EXTEND_GENERATED_TESTS_DIR,
    PREPROCESSED_DATA_DIR,
)
from utils.static_extend import power_station_change, switch_state_negated, traverse_signal_values
from utils.test_utils import construct_basic_rag_example, construct_extend_rag_example, construct_result_rag_example, extract_test_cases_from_output
from utils.parse_json import extract_all_json_from_output, load_json_file, load_jsonl_file
from utils.rag_utils import cal_content_embedding, find_most_similar_requirement, find_most_similar_test, get_tests_embeddings, pass_test_case


embedding_model = SentenceTransformer(
    "Qwen/Qwen3-Embedding-0.6B",
    cache_folder="/Users/wangziqi/.cache/huggingface/hub"
)


def main(model, expr_iden):
    # 结果路径
    result_file = os.path.join(EXTEND_GENERATED_TESTS_DIR, expr_iden)
    all_results = []
    
    all_test_cases = load_jsonl_file(os.path.join(PREPROCESSED_DATA_DIR, "all_test_cases_detailed.jsonl"))
    
    test_case_ids = [
        'G393_BDC-2625',
        # 'G393_BDC-2568',
        # "G393_BDC-2582",
        # "G393_BDC-2587",
        # "G393_BDC-2595",
        # "G393_BDC-8381",
        # "G393_BDC-2607",
        # "G393_BDC-2615",
        # "G393_BDC-2619",
        # "G393_BDC-16396",
        # "G393_BDC-16399",
        # "G393_BDC-2625",
        # "G393_BDC-2629",
        # "G393_BDC-2630"
    ]
    num = 0
    for idx, case_data in tqdm(enumerate(all_test_cases)):
        case_id = case_data.get("case_id", "")
        case_name = case_data.get("case_name", "")
        requirement = case_data.get("requirement", "")
        
        # if case_id not in test_case_ids:
        #     continue
        if pass_test_case(case_data):
            logger.info(f'用例ID为{case_id}包含无需处理的信号，跳过该用例')
            continue
        num += 1
        continue
        logger.info(f'开始处理第{idx+1}个用例，ID为{case_id}；\n需求为：{requirement}')

        # 初始化大模型
        allm = LLM(model)
        
        # 1. 生成冒烟测试用例
        logger.info(f'1. 开始生成冒烟测试用例')
        
        # 检索与当前用例需求最相似的用例
        most_similar_requirement_case = find_most_similar_requirement(requirement, all_test_cases)
        if most_similar_requirement_case:
            basic_example_prompt, basic_example_response = construct_basic_rag_example(most_similar_requirement_case)
        else:
            logger.info(f'未找到相似用例，使用默认冒烟测试用例示例')
            basic_example_prompt = prompt_basic.example_prompt
            basic_example_response = prompt_basic.example_response
        
        basic_user_prompt = prompt_basic.user_prompt.format(
            case_name=case_name,
            requirement=requirement,
        )
        try:
            logger.info(f'调用大模型生成冒烟测试用例')
            basic_llm_response = allm.get_response(
                messages=[
                    {"role": "system", "content": system_prompt},
                    {"role": "user", "content": basic_example_prompt},
                    {"role": "assistant", "content": basic_example_response},
                    {"role": "user", "content": basic_user_prompt},
                ],
            )
            model_outputs = extract_all_json_from_output(basic_llm_response)
        except Exception as e:
            logger.error(f'生成冒烟测试用例时出错：{e}')
            model_outputs = []
        
        if not model_outputs:
            logger.warning(f'用例ID为{case_id}未生成任何冒烟测试用例，跳过该用例的扩展测试用例生成')
            continue
        
        llm_generated_test, _ = extract_test_cases_from_output(model_outputs, case_data, generation_type='basic')
        all_results.extend(llm_generated_test)
        
        # 2. 生成扩展测试用例
        logger.info(f'2. 开始生成扩展测试用例')

        generated_basic_tests = [i for i in llm_generated_test if i.get("test_level", "") == "冒烟"]
        if not generated_basic_tests:
            logger.warning(f'用例ID为{case_id}未生成任何冒烟测试用例，跳过该用例的扩展测试用例生成')
            continue
        
        # 存储所有基础测试用例和扩展测试用例的embedding
        all_test_content_embeddings = get_tests_embeddings(generated_basic_tests)
        
        logger.info(f'共有{len(generated_basic_tests)}条冒烟测试用例，开始生成扩展测试用例')
        for basic_idx, single_generated_basic_test in enumerate(generated_basic_tests):
            logger.info(f'开始处理第{basic_idx+1}条冒烟测试用例，名称为：{single_generated_basic_test.get("test_name", "")}')
            # 找到最相似的冒烟测试用例
            basic_test_content = json.dumps(
                single_generated_basic_test, ensure_ascii=False, indent=2
            )

            most_similar_basic_test = find_most_similar_test(basic_test_content, case_id, all_test_cases)
            most_similar_extend_tests = most_similar_basic_test.get("extend_tests", [])

            if most_similar_extend_tests:
                extend_example_prompt, extend_example_response = construct_extend_rag_example(most_similar_basic_test)
            else:
                logger.info(f'未找到相似用例，使用默认扩展测试用例示例')
                extend_example_prompt, extend_example_response = prompt_extend.example_prompt, prompt_extend.example_response

            extend_user_prompt = prompt_extend.user_prompt.format(
                case_name=case_name,
                original_text=requirement,
                existing_basic_test=basic_test_content,
            )
            
            # 使用静态方法生成扩展测试用例，之后使用大模型生成预期结果
            logger.info(f'2.1. 使用静态方法生成扩展测试用例')
            static_extend_tests = [i for i in power_station_change(single_generated_basic_test)]
            static_extend_tests.extend([i for i in traverse_signal_values(single_generated_basic_test)])
            static_extend_tests.extend([i for i in switch_state_negated(single_generated_basic_test)])
            
            result_user_prompt = prompt_results.user_prompt.format(
                initial_test_case=json.dumps(
                    static_extend_tests, ensure_ascii=False, indent=2
                )
            )
            result_example_prompt, result_example_response = construct_result_rag_example(most_similar_basic_test)
            
            try:
                logger.info(f'调用大模型生成扩展测试用例的预期结果')
                result_response = allm.get_response(
                    messages=[
                        {"role": "system", "content": system_prompt},
                        {"role": "user", "content": result_example_prompt},
                        {"role": "assistant", "content": result_example_response},
                        {"role": "user", "content": result_user_prompt},
                    ]
                )
                model_outputs = extract_all_json_from_output(result_response)
            except Exception as e:
                logger.error(f'生成扩展测试用例时出错：{e}')
                model_outputs = []
            
            llm_generated_test, _ = extract_test_cases_from_output(model_outputs, case_data, [], generation_type='extend')
            all_test_content_embeddings.extend(get_tests_embeddings(llm_generated_test))
            all_results.extend(llm_generated_test)
            
            # 第一轮大模型生成的扩展测试用例
            logger.info(f'2.2. 第一轮大模型生成的扩展测试用例')
            
            useful_extend_tests = []
            try:
                logger.info(f'调用大模型生成扩展测试用例')
                response = allm.get_response(
                    messages=[
                        {"role": "system", "content": system_prompt},
                        {"role": "user", "content": extend_example_prompt},
                        {"role": "assistant", "content": extend_example_response},
                        {"role": "user", "content": extend_user_prompt},
                    ]
                )
                model_outputs = extract_all_json_from_output(response)
            except Exception as e:
                logger.error(f'生成扩展测试用例时出错：{e}')
                model_outputs = []
            
            llm_generated_test, all_test_content_embeddings = extract_test_cases_from_output(model_outputs, case_data, all_test_content_embeddings, generation_type='extend')
            all_results.extend(llm_generated_test)
            useful_extend_tests.extend(llm_generated_test)
            
            # 第二轮大模型生成的扩展测试用例
            logger.info(f'2.3. 第二轮大模型生成的扩展测试用例')
            
            try:
                logger.info(f'调用大模型生成扩展测试用例')
                user_twice_promt = prompt_extend.user_twice_prompt.format(
                    case_name=case_data.get("case_name", ""),
                    original_text=case_data.get("original_text", ""),
                    existing_basic_test=basic_test_content,
                    existing_extended_test=json.dumps(
                        useful_extend_tests, ensure_ascii=False, indent=2
                    ),
                )
                response = allm.get_response(
                    messages=[
                        {"role": "system", "content": system_prompt},
                        {"role": "user", "content": extend_example_prompt},
                        {"role": "assistant", "content": extend_example_response},
                        {"role": "user", "content": user_twice_promt},
                    ]
                )
                model_outputs = extract_all_json_from_output(response)
            except Exception as e:
                logger.error(f'生成扩展测试用例时出错：{e}')
                model_outputs = []
            
            llm_generated_test, all_test_content_embeddings = extract_test_cases_from_output(model_outputs, case_data, all_test_content_embeddings, generation_type='extend')
            all_results.extend(llm_generated_test)
        logger.info(f'完成用例ID为{case_id}的扩展测试用例生成')
    
    print(f'本次共处理用例{num}条')
    with open(result_file, "w", encoding="utf-8") as f:
        for result in all_results:
            f.write(json.dumps(result, ensure_ascii=False) + "\n")


if __name__ == "__main__":
    model = "deepseek/deepseek-r1-distill-llama-70b:free"
    model_name = "deepseek-70b"
    date = time.strftime("%m%d_%H%M", time.localtime())
    
    main(model, f'{date}_{model_name}.jsonl')

