import json
import csv
from data.config import PREPROCESSED_DATA_DIR
from collections import defaultdict

content_dict = {}
case_ids = []
with open(f'{PREPROCESSED_DATA_DIR}/all_cases.jsonl', 'r') as f:
    for line in f:
        case_id = json.loads(line)['case_id']
        case_ids.append(case_id)
        original_text = json.loads(line)['original_text']

        content_dict[case_id] = original_text
        
def merge(basic_file, extend_file, output_file):
    res_dict = defaultdict(list)
    with open(basic_file, 'r', encoding='utf-8') as f:
        for line in f:
            data = json.loads(line)
            case_id = data['case_id']
            case_name = data['case_name']
            preconditions = data['preconditions']
            operate_step = data['operate_step']
            expected_results = data['expected_results']
            
            subsystem = data['subsystem']
            function_module = data['function_module']
            test_name = data['test_name']
            
            test_level = data['test_level']
            content = content_dict[case_id]

            res_dict[case_id].append({
                'case_id': case_id,
                'content': content,
                'case_name': case_name,
                'preconditions': preconditions,
                'operate_step': operate_step,
                'expected_results': expected_results,
                'test_name': test_name,
                'subsystem': subsystem,
                'function_module': function_module,
                'test_level': test_level
            })

    with open(extend_file, 'r', encoding='utf-8') as f:
        for line in f:
            data = json.loads(line)
            case_id = data['case_id']
            case_name = data['case_name']
            preconditions = data['preconditions']
            operate_step = data['operate_step']
            expected_results = data['expected_results']
            
            subsystem = data['subsystem']
            function_module = data['function_module']
            test_name = data['test_name']
            
            test_level = data['test_level']
            content = content_dict[case_id]
            
            res_dict[case_id].append({
                'case_id': case_id,
                'content': content,
                'case_name': case_name,
                'preconditions': preconditions,
                'operate_step': operate_step,
                'expected_results': expected_results,
                'test_name': test_name,
                'subsystem': subsystem,
                'function_module': function_module,
                'test_level': test_level
            })

    res = []     
    # sort res according to case_id
    for i in case_ids:
        res.extend(res_dict[i])
    # save res to excel, each field is a column
    keys = res[0].keys()
    with open(output_file, 'w', encoding='utf-8', newline='') as output_file:
        dict_writer = csv.DictWriter(output_file, fieldnames=keys)
        dict_writer.writeheader()
        dict_writer.writerows(res)

if __name__ == '__main__':
    basic_iden = '/Users/yangchen/Desktop/hil/data/generated_tests/basic/results_1105_0036_qwen3-32b.jsonl'
    extend_iden = '/Users/yangchen/Desktop/hil/data/generated_tests/extend/results_1105_0048_qwen3-32b.jsonl'
    output_file = '/Users/yangchen/Desktop/hil/data/generated_tests/merged/merged_1105_0314_qwen3-32b.csv'
    merge(basic_iden, extend_iden, output_file)