from sklearn.metrics import f1_score
from data_utils.basic_data import load_train_val_dataset
from config import conf
from os.path import join
import pandas as pd
import os

INFERENCE_DIR = conf.get('dir', 'inference_result_dir')
PREDICT_FILE = join(INFERENCE_DIR, 'evaluation', 'BertSentiEntityscore0.940363_epoch6.csv')


def evaluate(val_dataset, predict_dataset):
    assert len(predict_dataset) == len(val_dataset)
    val_dataset = val_dataset.sort_values(['id'])
    predict_dataset = predict_dataset.sort_values(['id'])
    val_label, predict_label = [], []
    for entity, key_entity, predict_entity in zip(val_dataset['entity'].values, val_dataset['key_entity'].values,
                                                  predict_dataset['key_entity'].values):
        key_entity = str(key_entity).split(';')
        predict_entity = str(predict_entity).split(';')
        for e in str(entity).split(';'):
            val_label.append(int((e in key_entity)))
            predict_label.append(int((e in predict_entity)))

    sentiment_f1 = f1_score(val_dataset['negative'], predict_dataset['negative'])
    entity_f1 = f1_score(val_label, predict_label)
    total_score = 0.4 * sentiment_f1 + 0.6 * entity_f1
    return sentiment_f1, entity_f1, total_score


if __name__ == '__main__':
    print(os.getcwd())
    print(PREDICT_FILE)

    _, val_dataset = load_train_val_dataset(split_ratio=0.8)
    predict_dataset = pd.read_csv(PREDICT_FILE)
    sentiment_f1, entity_f1, total_score = evaluate(val_dataset, predict_dataset)

    print('sentiment_f1', sentiment_f1)
    print('entity_f1', entity_f1)
    print('total_score', total_score)
