import json
import os
import re

from core.LLM import LLM
from tqdm import tqdm
import core.prompt_basic as prompt_basic
from data.config import BASIC_GENERATED_TESTS_DIR, PREPROCESSED_DATA_DIR

def load_rag_base(rag_case_path):
    # load json file as a dict
    with open(rag_case_path, 'r', encoding='utf-8') as f:
        rag_base = json.load(f)
    return rag_base

def load_similarity_dict(similarity_dict_path):
    with open(similarity_dict_path, 'r', encoding='utf-8') as f:
        similarity_dict = json.load(f)
    return similarity_dict  

def find_most_similar_from_dict(requirement_key, similarity_dict, top_k=1):
    if requirement_key not in similarity_dict:
        raise ValueError(f"Requirement '{requirement_key}' not found in similarity_dict")

    # Get all similarity pairs except self
    sims = {
        k: v
        for k, v in similarity_dict[requirement_key].items()
        if k != requirement_key
    }

    # Sort by similarity (descending)
    top_matches = sorted(sims.items(), key=lambda x: x[1], reverse=True)[:top_k]

    return top_matches


def fix_multiline_json_string(s: str) -> str:
    """
    修复包含字面换行的 JSON 字符串值。
    将字符串值内的实际换行替换为 \n，并去除多余空格。
    """
    # 匹配所有字符串值（包括跨行）
    def replace_match(match):
        # match.group(0) 是整个带引号的字符串，如 "xxx\n   yyy"
        content = match.group(1)  # 不带引号的内容
        # 将换行和后续空格替换为 \n（可选：压缩连续空格）
        content = re.sub(r'\s*\n\s*', r'\\n', content)
        return '"' + content + '"'
    
    # 匹配双引号字符串，支持跨行（非贪婪）
    fixed = re.sub(r'"((?:[^"\\]|\\.)*?)"', replace_match, s, flags=re.DOTALL)
    return fixed


def extract_all_json_from_output(output: str):
    """
    从大模型输出中提取所有被 ```json ... ``` 包裹的 JSON 内容，并解析为 Python 对象列表。
    
    Args:
        output (str): 大模型生成的原始文本。
    
    Returns:
        List[Union[dict, list]]: 所有成功解析的 JSON 对象组成的列表。
    """
    # 匹配所有 ```json ... ``` 块（非贪婪、跨行）
    matches = re.findall(r"```json\s*(.*?)\s*```", output, re.DOTALL)
    
    results = []
    for json_str in matches:
        json_str = json_str.strip()
        json_str = fix_multiline_json_string(json_str)
        if not json_str:
            continue
        try:
            obj = json.loads(json_str)
            results.append(obj)
        except json.JSONDecodeError:
            # 可选择跳过无效 JSON，或记录警告（这里静默跳过）
            continue
    return results


def strip_none_from_json(json_str: str, as_string: bool = True, ensure_ascii: bool = False):
    """
    接受一个 JSON 格式的字符串，删除所有值为 None（JSON 中的 null）的键。
    返回值：
        - 如果 as_string 为 True，返回处理后的 JSON 字符串；
        - 否则返回处理后的 Python 对象（dict / list）。
    """
    if isinstance(json_str, str):
        data = json.loads(json_str)
    elif isinstance(json_str, (dict, list)):
        data = json_str
    cleaned = _remove_none_values(data)
    return json.dumps(cleaned, ensure_ascii=ensure_ascii) if as_string else cleaned


def _remove_none_values(obj):
    if isinstance(obj, dict):
        new = {}
        for k, v in obj.items():
            if v is None:
                continue
            elif v == "":
                continue
            new[k] = _remove_none_values(v)
        return new
    if isinstance(obj, list):
        return [_remove_none_values(i) for i in obj]
    return obj


def main(model, basic_generation_iden):
    expr_name = basic_generation_iden
    
    generated_test_dir = BASIC_GENERATED_TESTS_DIR
    preprocessed_data_dir = PREPROCESSED_DATA_DIR
    os.makedirs(generated_test_dir, exist_ok=True)
    
    result_file = os.path.join(generated_test_dir, expr_name)
    
    all_results = []
    # Load all cases
    with open(os.path.join(preprocessed_data_dir, 'all_cases.jsonl'), 'r', encoding='utf-8') as f:
        extracted_cases = [json.loads(line) for line in f if line.strip()]
        
    print(f"Total cases: {len(extracted_cases)}")
    
    rag_base = load_rag_base(os.path.join(preprocessed_data_dir, 'cases4rag.json'))
    similarity_dict = load_similarity_dict(os.path.join(preprocessed_data_dir, 'similarity_dict_qwen3_0.6b.json'))
    
    all_results = []
    for idx, case_data in enumerate(tqdm(extracted_cases, desc="Processing cases", unit="case")):
        print(f"Processing case {case_data.get('case_id', '')}")

        pin_mappings = strip_none_from_json(case_data.get('pin_mappings', {}), False)
        pin_list = pin_mappings.get('需求与引脚对照表', [])
        var_list = pin_mappings.get('需求与信号变量对照表', [])
        pin_json = json.dumps(pin_list, ensure_ascii=False, indent=2)
        var_json = json.dumps(var_list, ensure_ascii=False, indent=2)
        pin_block = f"```json\n{pin_json}\n```"
        var_block = f"```json\n{var_json}\n```"

        system_promt = prompt_basic.system_prompt.format(
            pin_mappings=pin_block,
            variable_mappings=var_block,
        )
        
        case_content = case_data.get('original_text', '')
        # [('Explain gravity', 0.3225647807121277)]
        try:
            most_similar_case = find_most_similar_from_dict(case_content,similarity_dict, top_k=1)[0][0]
        except:
            most_similar_case = None
        
        if most_similar_case:
            rag_tests = rag_base[most_similar_case]['Tests']
            basic_tests = [i for i in rag_tests if i.get('test_level', '') == '冒烟']
            
            example_prompt = prompt_basic.example_prompt.format(
                example_name=rag_base[most_similar_case]['case_name'],
                example_content=most_similar_case,
            )
            
            example_output = prompt_basic.example_response.format(
                example_test_cases=json.dumps(basic_tests, ensure_ascii=False, indent=2)
            )
        else:
            print("No similar case found, using second example.")
            example_prompt = prompt_basic.user_promt_1 
            example_output = prompt_basic.answer_1
        
        user_prompt = prompt_basic.user_prompt.format(
            case_name=case_data.get('case_name', ''),
            original_text=case_data.get('original_text', '')
        )
        
        allm = LLM(model)
        response = allm.get_response(messages=[
            {"role": "system", "content": system_promt},
            {"role": "user", "content": example_prompt},
            {"role": "assistant", "content": example_output},
            {"role": "user", "content": user_prompt}
        ])
        try:
            model_outputs = extract_all_json_from_output(response)
        except json.JSONDecodeError:
            model_outputs = None
        print(model_outputs)
        
        if isinstance(model_outputs, list):
            try:
                model_outputs = model_outputs[0]
            except:
                print('=' * 20)
                print(response)
                print('=' * 20)
        
        for result_item in model_outputs:
            # Map the response to the actual fields in GeneratedTest model
            subsystem = result_item.get("subsystem", "")
            function_module = result_item.get("function_module", "")
            test_name = result_item.get("test_name", "")
            preconditions = result_item.get("preconditions", "")
            operate_step = result_item.get("operate_step", result_item.get("test_content", f""))
            expected_results = result_item.get("expected_results", "")
            test_level = result_item.get("test_level", "")
            
            if '冒烟' in test_level:
                test_level = '冒烟'
            elif '基础' in test_level:
                test_level = '基础'
            else:
                test_level = '扩展'
            
            try:
                if isinstance(operate_step, list):
                    operate_step = "\n".join(operate_step)
                if isinstance(expected_results, list):
                    expected_results = "\n".join(expected_results)
                if isinstance(preconditions, list):
                    preconditions = "\n".join(preconditions)
            except:
                continue
            
            single_res = {
                'case_id': case_data.get('case_id', ''),
                "case_name": case_data.get('case_name', ''),
                "preconditions": preconditions,
                "operate_step": operate_step,
                "expected_results": expected_results,
                "prompt": user_prompt,
                "response": response,
                "subsystem": subsystem,
                "function_module": function_module,
                "test_name": test_name,
                "test_environment": "车辆测试环境",
                "test_level": test_level,
            }
            all_results.append(single_res)
            
        with open(result_file, 'w', encoding='utf-8') as f:
            for result in all_results:
                f.write(json.dumps(result, ensure_ascii=False) + '\n')

def entry_basic(model):
    import time
    date = time.strftime("%m%d_%H%M", time.localtime())
    model_name_clean = model.replace('/', '-')
    basic_generation_iden = f'results_{date}_{model_name_clean}.jsonl'
    main(model, basic_generation_iden)
    return basic_generation_iden


if __name__ == "__main__":
    model = "deepseek/deepseek-r1-distill-llama-70b"
    model_name = 'deepseek-r1-distill-llama-70b'
    import time
    date = time.strftime("%m%d_%H%M", time.localtime())
    basic_generation_iden = f'results_{date}_{model_name}.jsonl'
    main(model, basic_generation_iden)