import json
import os
import time

os.environ["TOKENIZERS_PARALLELISM"] = "false"

from tqdm import tqdm
from loguru import logger

from core.LLM import LLM
from core.prompt_system import system_prompt
import core.prompt_basic as prompt_basic
from data.config import BASIC_GENERATED_TESTS_DIR, PREPROCESSED_DATA_DIR
from utils.parse_json import extract_all_json_from_output, load_json_file
from utils.rag_utils import find_most_similar_requirement
from utils.test_utils import extract_test_cases_from_output


def main(model, basic_generation_iden):
    # 结果路径
    result_file = os.path.join(BASIC_GENERATED_TESTS_DIR, basic_generation_iden)
    all_results = []
    
    # 导入所有的需求
    all_requirements = load_json_file(os.path.join(PREPROCESSED_DATA_DIR, 'all_requirements.json'))
    
    for idx, case_data in enumerate(tqdm(all_requirements.values(), desc="Processing cases", unit="case")):
        print(f"Processing case {case_data.get('case_id', '')}")
        
        requirement = case_data.get('requirement', '')
        try:
            most_similar_requirement = find_most_similar_requirement(requirement, all_requirements)
        except:
            most_similar_requirement = None
        
        
        if most_similar_requirement:
            most_similar_tests = most_similar_requirement['tests']
            most_similar_case_name = most_similar_requirement['case_name']
            most_similar_requirement = most_similar_requirement['requirement']
            
            most_similar_basic_tests = [i for i in most_similar_tests if i.get('test_level', '') == '冒烟']
            
            example_prompt = prompt_basic.user_prompt.format(
                case_name=most_similar_case_name,
                original_text=most_similar_requirement,
            )
            
            example_output = prompt_basic.llm_response.format(
                test_cases=json.dumps(most_similar_basic_tests, ensure_ascii=False, indent=2)
            )
        else:
            print("No similar case found, using second example.")
            example_prompt = prompt_basic.example_prompt
            example_output = prompt_basic.example_response
        
        user_prompt = prompt_basic.user_prompt.format(
            case_name=case_data.get('case_name', ''),
            original_text=case_data.get('requirement', '')
        )
        
        allm = LLM(model)
        response = allm.get_response(messages=[
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": example_prompt},
            {"role": "assistant", "content": example_output},
            {"role": "user", "content": user_prompt}
        ])
        try:
            model_outputs = extract_all_json_from_output(response)
        except json.JSONDecodeError:
            logger.error(f"JSON decode error for case {case_data.get('case_id', '')}")
            logger.error(f"Response: {response}")
            continue
        
        if not model_outputs:
            logger.warning(f"No valid JSON output")
            logger.error(f"Response: {response}")
            continue
        
        llm_generated_test, _ = extract_test_cases_from_output(model_outputs, case_data)
        all_results.extend(llm_generated_test)
    
    with open(result_file, 'w', encoding='utf-8') as f:
        for result in all_results:
            f.write(json.dumps(result, ensure_ascii=False) + '\n')


def entry_basic(model, model_name):
    date = time.strftime("%m%d_%H%M", time.localtime())
    basic_generation_iden = f'results_{date}_{model_name}.jsonl'
    main(model, basic_generation_iden)
    return basic_generation_iden


if __name__ == "__main__":
    model = "deepseek/deepseek-r1-distill-llama-70b"
    model_name = 'deepseek-r1-distill-llama-70b'
    entry_basic(model, model_name)