import json
import os
from CKGRetriever import CKGRetriever
from EnvironmentService import EnvironmentService
from LoggerManager import LoggerManager
from Evaluator import EvaluatorState, evaluator_graph
from Config import *
from Utils import add_record, add_test_case

config = {"recursion_limit": RECURSION_LIMIT}
graph_retriever = CKGRetriever("bolt://localhost:7687", "neo4j", "123456")
envServer = EnvironmentService()
envServer.set_base_path(r"E:\Project\Agent\TestAgent\TestAgent\TestAgent\CKGConstruction\test3")

methods_list = graph_retriever.load_methods()
methods_list = methods_list[:2]  # just for demo

generate_log_dir = "result/log"
log_dir = generate_log_dir
logger_manager = LoggerManager()
logger = logger_manager.logger

if not os.path.exists(log_dir):
    os.makedirs(log_dir)
generate_record_path = os.path.join(generate_log_dir, "record.jsonl")


def read_jsonl(file_path):
    with open(file_path, "r") as file:
        return [json.loads(line) for line in file]


def run_evaluation():
    generate_record = read_jsonl(generate_record_path)
    index = 0
    error_count = 0
    skip_count = 0
    max_retries = 2

    for method in methods_list:
        index += 1
        record_key = f"{index}_{method.name}"
        record = next((item for item in generate_record if item["name"] == record_key), None)

        # Skip if already full coverage & mutation score
        if record and record["coverage"] == 1 and record["mutation_score"] == 1:
            print(f"[SKIP] {method.name}: full coverage & mutation score")
            skip_count += 1
            continue

        try:
            test_clazz_list = graph_retriever.search_test_class_by_signature(
                method.signature, method.full_qualified_name
            )[:5]

            if isinstance(test_clazz_list, str) or record["ret"][0] == "FAIL":
                print(f"[ERROR] {method.name}: search test class failed")
                error_count += 1
                continue

            envServer.set_inject_dir(
                method.absolute_path.replace("src/main", "src/test").rsplit("/", 1)[0]
            )
            graph_retriever.change_focal_method_id(int(method.id))

            state = EvaluatorState(
                envServer=envServer,
                package_name=method.package_name,
                method_id=int(method.id),
                method_code=method.content,
                method_signature=method.signature,
                start_line=method.start_line,
                end_line=method.end_line,
                class_name=method.class_name,
                full_method_name=method.full_qualified_name,
                method_summary=method.summarization if method.summarization else "",
                old_test_cases=test_clazz_list,
            )

            # 只关心最终结果，不打印 evaluator_graph 中间过程
            for event in evaluator_graph.stream(state, config, subgraphs=True):
                event = event[1]
                if "testCaseAcceptor" not in event:
                    continue

                # 计算最终覆盖率和变异分数
                covered_lines = set(record.get("covered_lines", []))
                total_lines = set(record.get("total_lines", []))
                mutation_info = dict(record.get("mutation_info", {}))
                result, find_bugs = [], []

                for test_case in event["testCaseAcceptor"]["test_cases"]:
                    result.append(test_case["test_result"])
                    find_bugs.append(test_case["find_bug"])

                    if test_case["coverage_report"]["result"] == "Success":
                        total_lines.update(test_case["coverage_report"]["output"]["covered_lines"])
                        total_lines.update(test_case["coverage_report"]["output"]["missed_lines"])
                        covered_lines.update(test_case["coverage_report"]["output"]["covered_lines"])

                    if test_case["mutation_report"]["result"] == "Success":
                        for item_mutation in test_case["mutation_report"]["output"]["filtered_mutations"]:
                            key = f"{item_mutation['Line']}_{item_mutation['Mutator']}"
                            if mutation_info.get(key) != "KILLED":
                                mutation_info[key] = item_mutation["Status"]

                final_coverage = len(covered_lines) / len(total_lines) if total_lines else 0
                final_mutation_score = (
                    sum(1 for v in mutation_info.values() if v == "KILLED") / len(mutation_info)
                    if mutation_info else 0
                )

                print(f"[RESULT] {method.name}: "
                      f"Coverage={final_coverage:.2f}, Mutation={final_mutation_score:.2f}, "
                      f"Bugs={len([b for b in find_bugs if b])}")

                add_record(
                    log_dir, f"{index}_{method.name}", result, find_bugs,
                    final_coverage, final_mutation_score, [], [], [],
                    total_lines, covered_lines, mutation_info, True
                )

        except Exception as e:
            print(f"[ERROR] {method.name}: exception {e}")
            error_count += 1

    print(f"=== Evaluation Done ===")
    print(f"Total: {len(methods_list)}, Skip: {skip_count}, Error: {error_count}")


if __name__ == "__main__":
    run_evaluation()
