# Requires transformers>=4.51.0
# Requires sentence-transformers>=2.7.0
import json
from sentence_transformers import SentenceTransformer, util
from tqdm import tqdm
import torch

def load_requirement_json(json_path):
    with open(json_path, 'r', encoding='utf-8') as f:
        data = json.load(f)
    # all_requirements = list(data.keys())
    return data


def find_most_similar_from_dict(requirement_key, similarity_dict, top_k=1):
    if requirement_key not in similarity_dict:
        raise ValueError(f"Requirement '{requirement_key}' not found in similarity_dict")

    # Get all similarity pairs except self
    sims = {
        k: v
        for k, v in similarity_dict[requirement_key].items()
        if k != requirement_key
    }

    # Sort by similarity (descending)
    top_matches = sorted(sims.items(), key=lambda x: x[1], reverse=True)[:top_k]

    return top_matches


def get_requirements_similarity_dict(model, json_path):
    all_requirements = [
        "What is the capital of China?",
        "Explain gravity",
    ]
    all_requirements = load_requirement_json(json_path)
    all_requirement_contents = list(all_requirements.keys())
    
    embeddings = model.encode(all_requirement_contents)
    similarity_matrix = util.cos_sim(embeddings, embeddings).cpu().numpy()

    # Build dict: {req_i: {req_j: sim_score}}
    similarity_dict = {
        all_requirement_contents[i]: {
            all_requirement_contents[j]: float(similarity_matrix[i][j])
            for j in range(len(all_requirement_contents))
        }
        for i in range(len(all_requirement_contents))
    }
    return similarity_dict
    
def join_test_content(test_data):
    # preconditions = '前置状态：' + test_data.get('preconditions', '')
    # operate_steps ='操作步骤： ' + test_data.get('operate_step', '')
    # expected_results = '预期结果： ' + test_data.get('expected_results', '')
    # joined_content = "\n".join([preconditions, operate_steps, expected_results])
    # return joined_content
    return json.dumps(test_data, ensure_ascii=False, indent=2)

def construct_case_req_dict(json_path):
    data = load_requirement_json(json_path)
    case_req_dict = {}
    for req_content, req_data in data.items():
        basic_tests = [i for i in req_data['Tests'] if i.get('test_level', '') == '冒烟']
        extend_tests = [i for i in req_data['Tests'] if i.get('test_level', '') != '冒烟']
        for single_test in basic_tests:
            test_content = join_test_content(single_test)
            if test_content not in case_req_dict:
                case_req_dict[test_content] = {
                    'req_id': req_data.get('case_id', ''),
                    'case_name': req_data.get('case_name', ''),
                    'requirement': req_content,
                    'extend_tests': json.dumps(extend_tests, ensure_ascii=False, indent=2)
                }
    
    all_basic_tests = list(case_req_dict.keys())

    import numpy as np
    embeddings = []
    for single_basic_test in tqdm(all_basic_tests):
        embedding = model.encode(single_basic_test).tolist()
        case_req_dict[single_basic_test]['embedding'] = embedding
        embeddings.append(embedding)
    embeddings = np.array(embeddings)
   
    similarity_matrix = util.cos_sim(embeddings, embeddings).cpu().numpy()
    # Build dict: {req_i: {req_j: sim_score}}
    similarity_dict = {
        all_basic_tests[i]: {
            all_basic_tests[j]: float(similarity_matrix[i][j])
            for j in range(len(all_basic_tests))
        }
        for i in range(len(all_basic_tests))
    }
    
    for single_basic_test in tqdm(all_basic_tests):
        similar_tests = find_most_similar_from_dict(single_basic_test, similarity_dict, top_k=1)[0][0]
        case_req_dict[single_basic_test]['similar_tests'] = similar_tests
    return case_req_dict

def construct_req_dict(json_path):    
    data = load_requirement_json(json_path)
    all_requirement_contents = list(data.keys())
    
    # embeddings = model.encode(all_requirement_contents)
    import numpy as np
    embeddings = []
    for single_req in all_requirement_contents:
        embedding = model.encode(single_req).tolist()
        data[single_req]['embedding'] = embedding
        embeddings.append(embedding)
    embeddings = np.array(embeddings)
    
    similarity_matrix = util.cos_sim(embeddings, embeddings).cpu().numpy()

    # Build dict: {req_i: {req_j: sim_score}}
    similarity_dict = {
        all_requirement_contents[i]: {
            all_requirement_contents[j]: float(similarity_matrix[i][j])
            for j in range(len(all_requirement_contents))
        }
        for i in range(len(all_requirement_contents))
    }
    
    for single_req in all_requirement_contents:
        similar_reqs = find_most_similar_from_dict(single_req, similarity_dict, top_k=1)[0][0]
        data[single_req]['similar_requirements'] = similar_reqs
    return data

if __name__ == "__main__":
    # model = SentenceTransformer("/Users/yangchen/Desktop/qwen3-embedding-0.6b")
    model = SentenceTransformer("Qwen/Qwen3-Embedding-0.6B")
    json_path = '/Users/yangchen/Desktop/hil/preprocessed_data/cases4rag.json'
    
    case_dict = construct_case_req_dict(json_path)
    with open('/Users/yangchen/Desktop/hil/preprocessed_data/case_dict_qwen3_0.6b.json', 'w', encoding='utf-8') as f:
        json.dump(case_dict, f, ensure_ascii=False, indent=4)
        
    req_dict = construct_req_dict(json_path)
    with open('/Users/yangchen/Desktop/hil/preprocessed_data/req_dict_qwen3_0.6b.json', 'w', encoding='utf-8') as f:
        json.dump(req_dict, f, ensure_ascii=False, indent=4)
        
    # requirements_similarity_dict = get_requirements_similarity_dict(model, json_path)
    # with open('/Users/yangchen/Desktop/hil/preprocessed_data/similarity_dict_qwen3_0.6b.json', 'w', encoding='utf-8') as f:
    #     json.dump(requirements_similarity_dict, f, ensure_ascii=False, indent=4)
    
    # req_a, req_b = all_requirements[0], all_requirements[0]
    # print(f"Similarity({req_a}, {req_b}) = {similarity_dict[req_a][req_b]:.4f}")

    # print(find_most_similar_from_dict(req_a, similarity_dict, top_k=1))
