from sklearn.metrics import accuracy_score
from os.path import join, abspath
import os
import json

work_dir = abspath(join(__file__, '..'))

answer = {}

with open(join(work_dir, 'dev_answer.json'), 'r', encoding='utf8') \
        as file:
    records = json.loads(file.read())
    for k, v in records.items():
        k_id = int(k.replace('idiom', '').replace('#', ''))
        answer[k_id] = int(v)


def evaluate(scheme_path: str, result_file_name: str):
    result_records = {}
    with open(join(work_dir, scheme_path, 'result', result_file_name),
              encoding='utf8') as f:
        for each in f.readlines():
            obj = json.loads(each)
            result_records[obj['id']] = obj['result']['index']

    # assert len(answer) == len(
    #     result_records), f'{len(answer)}, {len(result_records)}'
    ground_truth = []
    predict = []
    for key in result_records.keys():
        ground_truth.append(answer[key])
        predict.append(result_records[key])
    print(f'{scheme_path}, {result_file_name}, count: {len(result_records)}, '
          f' accuracy_score:'
          f' {accuracy_score(ground_truth, predict)}')


dir_path = join(work_dir, 'scheme3')
for _, _, files in os.walk(join(dir_path, 'result')):
    for filename in files:
        evaluate(dir_path, filename)
