from sklearn.metrics import accuracy_score
from os.path import join, abspath
import os
import json

work_dir = abspath(join(__file__, '..'))

with open(join(work_dir, 'dev.json'), 'r', encoding='utf8') \
        as file:
    ground_truth = [
        int(json.loads(each)['label'])
        for each in file.readlines()
    ]


def evaluate(scheme_path: str, result_file_name: str):
    with open(join(work_dir, scheme_path, 'result/', result_file_name),
              encoding='utf8') as f:
        result_records = []
        for each in f.readlines():
            record = json.loads(each)
            result_records.append((
                int(record['id']),
                int(record['result']['index'])
            ))
    assert len(ground_truth) == len(result_records)
    result_records = [each[1] for each in
                      sorted(result_records, key=lambda x: x[0])]
    print(f'{scheme_path}, {result_file_name}, count: {len(result_records)},'
          f' accuracy_score: {accuracy_score(ground_truth, result_records)}')


dir_path = join(work_dir, 'scheme6')
for _, _, files in os.walk(join(dir_path, 'result')):
    for filename in files:
        evaluate(dir_path, filename)
