from sklearn.metrics import accuracy_score
from os.path import join, abspath
import os
import json
import numpy as np

work_dir = abspath(join(__file__, '..'))

with open(join('d-dev.json'), 'r', encoding='utf8') \
        as file:
    records = json.loads(file.read())
with open(join('m-dev.json'), 'r', encoding='utf8') \
        as file:
    records.extend(json.loads(file.read()))

ground_truth = []
random_choice_acc = []
for record in records:
    res = []
    text = '\n'.join(record[0])
    questions = record[1]
    for q in questions:
        question = q['question']
        choice = q['choice']
        answer = q['answer']
        random_choice_acc.append(1 / len(choice))
        assert len(choice) > 0
        for i in range(len(choice)):
            if answer == choice[i]:
                ground_truth.append(i)
                break

random_choice_acc = np.mean(random_choice_acc)
print(f'random_choice_acc: {random_choice_acc}')


def evaluate(scheme_path: str, result_file_name: str):
    with open(join(work_dir, scheme_path, 'result/', result_file_name),
              encoding='utf8') as f:
        predicts = []
        indexes = []
        for each in f.readlines():
            each = json.loads(each)
            indexes.append(each['id'])
            predicts.append((
                each['id'],
                int(each['result']['index'])
            ))
    predicts = [each[1] for each in sorted(predicts, key=lambda x: x[0])]
    y_true = np.array(ground_truth)[indexes]
    print(f'{scheme_path}, {result_file_name}, count: {len(predicts)},'
          f' accuracy_score: {accuracy_score(y_true, predicts)}')


dir_path = join(work_dir, 'scheme1')
for _, _, files in os.walk(join(dir_path, 'result')):
    for filename in files:
        evaluate(dir_path, filename)
