import json
import os
import re

from tqdm import tqdm
from sentence_transformers import SentenceTransformer
import numpy as np

import core.prompt_judge as prompt_judge
from core.LLM import LLM
from data.config import BASIC_GENERATED_TESTS_DIR, EXTEND_GENERATED_TESTS_DIR, EXTEND_JUDGE_RESULTS_DIR, PREPROCESSED_DATA_DIR

def load_json_file(json_path):
    with open(json_path, 'r', encoding='utf-8') as f:
        data = json.load(f)
    return data  

def load_jsonl_file(json_path):
    data = []
    with open(json_path, 'r', encoding='utf-8') as f:
        for line in f:
            if line.strip():
                data.append(json.loads(line))
    return data


def find_most_similar_from_dict(requirement_key, similarity_dict, top_k=1):
    if requirement_key not in similarity_dict:
        raise ValueError(f"Requirement '{requirement_key}' not found in similarity_dict")

    # Get all similarity pairs except self
    sims = {
        k: v
        for k, v in similarity_dict[requirement_key].items()
        if k != requirement_key
    }

    # Sort by similarity (descending)
    top_matches = sorted(sims.items(), key=lambda x: x[1], reverse=True)[:top_k]

    return top_matches

def load_generation_results(generation_results_path):
    # load jsonl file as a list of dict
    final_res_dict = {}
    with open(generation_results_path, 'r', encoding='utf-8') as f:
        for line in f:
            if line.strip():
                current_res = json.loads(line)
                if current_res['case_id'] not in final_res_dict:
                    final_res_dict[current_res['case_id']] = []
                
                final_res_dict[current_res['case_id']].append({
                        "test_name": current_res['case_name'],
                        "subsystem": current_res['subsystem'],
                        "function_module": current_res['function_module'],
                        "preconditions": current_res['preconditions'],
                        "operate_step": current_res['operate_step'],
                        "expected_results": current_res['expected_results'],
                        "test_environment": current_res['test_environment'],
                        "test_level": current_res['test_level']
                })
    return final_res_dict


def strip_none_from_json(json_str: str, as_string: bool = True, ensure_ascii: bool = False):
    """
    接受一个 JSON 格式的字符串，删除所有值为 None（JSON 中的 null）的键。
    返回值：
        - 如果 as_string 为 True，返回处理后的 JSON 字符串；
        - 否则返回处理后的 Python 对象（dict / list）。
    """
    if isinstance(json_str, str):
        data = json.loads(json_str)
    elif isinstance(json_str, (dict, list)):
        data = json_str
    cleaned = _remove_none_values(data)
    return json.dumps(cleaned, ensure_ascii=ensure_ascii) if as_string else cleaned

def _remove_none_values(obj):
    if isinstance(obj, dict):
        new = {}
        for k, v in obj.items():
            if v is None:
                continue
            elif v == "":
                continue
            new[k] = _remove_none_values(v)
        return new
    if isinstance(obj, list):
        return [_remove_none_values(i) for i in obj]
    return obj


def parse_judge_response(response_text):
    matches = re.findall(r"```json\s*(.*?)\s*```", response_text, re.DOTALL)

    for json_str in matches:
        json_str = json_str.strip()
        if not json_str:
            continue
        try:
            obj = json.loads(json_str)
        except json.JSONDecodeError:
            obj = None
            print('Loading response error!')
    return obj

def find_most_similar_test(target_embedding, case_id, content_dicts):
    target_vec = target_embedding
    
    best_score = -1
    best_content = None

    for key, item in content_dicts.items():
        if case_id == item['req_id']:
            continue
        emb = np.array(item['embedding'])
        # cosine similarity
        sim = np.dot(target_vec, emb) / (np.linalg.norm(target_vec) * np.linalg.norm(emb))
        
        if sim > best_score:
            best_score = sim
            best_content = key
    return best_content, best_score
   
def construct_few_shot_example_prompt(few_shot_case_id, extracted_cases, rag_base):
    for case in extracted_cases:
        if case['case_id'] != few_shot_case_id:
            continue

        case_content = case['original_text']
        rag_tests = rag_base[case_content]['Tests']

        return rag_tests
    
if __name__ == "__main__":
    model = 'qwen3-32b'
    judge_model = 'deepseek-v3'
    date = '1102_2128'
    expr_name = f'results_{date}_{model}.jsonl'
    
    basic_generated_test_dir = BASIC_GENERATED_TESTS_DIR
    basic_generation_iden = 'all_results_1102_0125_qwen3-32b.jsonl'
    
    extend_generated_test_dir = EXTEND_GENERATED_TESTS_DIR
    judge_res_dir = EXTEND_JUDGE_RESULTS_DIR
    preprocessed_data_dir = PREPROCESSED_DATA_DIR
    os.makedirs(judge_res_dir, exist_ok=True)
    
    generated_tests = load_generation_results(os.path.join(extend_generated_test_dir, expr_name))
    result_file = os.path.join(judge_res_dir, f'judge_{expr_name}')
    
    basic_generated_tests = load_generation_results(os.path.join(basic_generated_test_dir, basic_generation_iden))
    
    all_results = []
    
    # Load all cases
    extracted_cases = load_jsonl_file(os.path.join(preprocessed_data_dir, 'all_cases.jsonl'))
        
    print(f"Total cases: {len(extracted_cases)}")
    
    rag_base = load_json_file(os.path.join(preprocessed_data_dir, 'cases4rag.json'))
    similarity_dict = load_json_file(os.path.join(preprocessed_data_dir, 'similarity_dict_qwen3_0.6b.json'))
    
    pin_mappings = load_json_file(os.path.join(preprocessed_data_dir, 'pin_mappings.json'))
    pin_mappings = strip_none_from_json(pin_mappings, False)
    
    test_rag_base = load_json_file(os.path.join(preprocessed_data_dir, 'case_dict_qwen3_0.6b.json'))
    embedding_model = SentenceTransformer("Qwen/Qwen3-Embedding-0.6B")
    
    pin_list = pin_mappings.get('需求与引脚对照表', [])
    var_list = pin_mappings.get('需求与信号变量对照表', [])
    pin_json = json.dumps(pin_list, ensure_ascii=False, indent=2)
    var_json = json.dumps(var_list, ensure_ascii=False, indent=2)
    pin_block = f"```json\n{pin_json}\n```"
    var_block = f"```json\n{var_json}\n```"

    for idx, case_data in enumerate(tqdm(extracted_cases, desc="Processing cases", unit="case")):
        print(f"Processing case {case_data.get('case_id', '')}")
        
        system_prompt = prompt_judge.system_prompt.format(
            pin_mappings=pin_block,
            variable_mappings=var_block,
        )
        
        case_content = case_data.get('original_text', '')
        case_id = case_data.get('case_id', '')
        case_content = case_data.get('original_text', '')
        
        generated_basic_test = [i for i in basic_generated_tests.get(case_id, []) if i.get('test_level', '') == '冒烟']
        generated_extended_test = [i for i in generated_tests.get(case_id, []) if i.get('test_level', '') != '冒烟']
        
        if not generated_extended_test:
            print(f"No generated extended tests for case {case_id}, skipping.")
            continue
        
        try:
            current_tests = rag_base[case_data.get('original_text', '')].get('Tests', [])
            current_basic_tests = [i for i in current_tests if i.get('test_level', '') == '冒烟']
            current_extend_tests = [i for i in current_tests if i.get('test_level', '') != '冒烟']
        except:
            print(f"No current tests found for case {case_data.get('case_id', '')}.")
            continue
        
        example_basic_test = json.dumps(generated_basic_test[0], ensure_ascii=False, indent=2)
        target_embedding = embedding_model.encode(example_basic_test)
        
        similar_basic_test = find_most_similar_test(target_embedding, case_id, test_rag_base)[0]
        
        similar_case_id = test_rag_base[similar_basic_test]['req_id']
        similar_case_name = [i for i in extracted_cases if i.get('case_id', '') == similar_case_id][0]['case_name']
    
        req_case_req_content = test_rag_base[similar_basic_test]['requirement']
        similar_extend_tests = test_rag_base[similar_basic_test]['extend_tests'] if similar_basic_test else []
        
        similar_basic_test = json.loads(similar_basic_test)
        similar_extend_tests = json.loads(similar_extend_tests)
        
        similar_tests = []
        if similar_extend_tests:
            similar_tests.append(similar_basic_test)
            similar_tests.extend(similar_extend_tests)
        else:
            similar_tests = construct_few_shot_example_prompt('G393_BDC-16395', extracted_cases, rag_base)
        
        for single_generated_extended_test in generated_extended_test:
            user_prompt = prompt_judge.judge_tuozhan_prompt.format(
                example_name = similar_case_name,
                example_content = req_case_req_content,
                example_test_cases = json.dumps(similar_tests, ensure_ascii=False, indent=4),
                test_name = case_data.get('case_name', ''),
                test_content = case_data.get('original_text', ''),
                current_test_cases = json.dumps(current_extend_tests, ensure_ascii=False, indent=4),
                generated_test_cases = json.dumps([single_generated_extended_test], ensure_ascii=False, indent=4)
            )

            allm = LLM(model)
            response = allm.get_response(messages=[
                {"role": "system", "content": prompt_judge.system_prompt},
                {"role": "user", "content": user_prompt}
            ])
            
            judge_res = parse_judge_response(response)
            final_res = {
                "judge_result": judge_res['judgement'],
                "case_id": case_data.get('case_id', ''),
                "judge_reason": judge_res['reason'],
                "judge_suggestion": judge_res['suggestion'],
                "case_name": case_data.get('case_name', ''),
                "original_text": case_data.get('original_text', ''),
                "generated_test_cases": single_generated_extended_test,
            }
            
            all_results.append(final_res)
                
            with open(result_file, 'w', encoding='utf-8') as rf:
                for single_res in all_results:
                    rf.write(json.dumps(single_res, ensure_ascii=False) + '\n')
                # rf.write(json.dumps(all_results, ensure_ascii=False) + '\n')