import argparse
import pathlib
import sys

from datasets import load_dataset
from sklearn.metrics import accuracy_score, f1_score, recall_score
from tqdm import tqdm

from src.processors import ChineseEmotionSmallClassifier, Emotion

parser = argparse.ArgumentParser(
    description="PND Analyzer Evaluation Tool",
    exit_on_error=False,
)
parser.add_argument(
    "-o",
    "--output-dir",
    type=str,
    default="./output",
    help="The directory to output the results to. Defaults to './output'.",
)
parser.add_argument(
    "--cache-dir",
    type=str,
    default="./cache",
    help="The directory to cache the models and datasets to. Defaults to './cache'.",
)


def evaluate_main() -> None:
    try:
        args = parser.parse_args()
    except argparse.ArgumentError as e:
        print(e.message)
        parser.print_help()
        sys.exit(1)

    output_dir = pathlib.Path(args.output_dir)

    # The dataset which the ChineseEmotionSmallClassifier is trained on.
    #
    # We use the same dataset to evaluate the model for accuracy, recall and macro F1.
    dataset = load_dataset(
        "Johnson8187/Chinese_Multi-Emotion_Dialogue_Dataset",
        cache_dir=args.cache_dir,
    )
    dataset = dataset["train"]  # this dataset only provides the train split
    texts = dataset["text"]
    emotions = dataset["emotion"]
    for i in tqdm(range(len(emotions)), desc="Preprocessing"):
        emotion = emotions[i]
        if emotion in ["平淡語氣", "疑問語調"]:
            emotions[i] = Emotion.NEUTRAL.value
        elif emotion in ["開心語調", "驚訝語調"]:
            emotions[i] = Emotion.POSITIVE.value
        else:
            emotions[i] = Emotion.NEGATIVE.value

    classifier = ChineseEmotionSmallClassifier(args.cache_dir)
    predictions = []
    for text in tqdm(texts, desc="Classification"):
        predictions.append(classifier.classify(text).value)

    if not output_dir.exists():
        output_dir.mkdir(parents=True)
    accuracy = accuracy_score(emotions, predictions)
    recall = recall_score(emotions, predictions, average="macro")
    f1 = f1_score(emotions, predictions, average="macro")
    with (output_dir / "model_evaluation.txt").open("w", encoding="utf-8") as f:
        print(f"Accuracy: {accuracy}", file=f)
        print(f"Recall: {recall}", file=f)
        print(f"F1: {f1}", file=f)
    print(f"Accuracy: {accuracy}")
    print(f"Recall: {recall}")
    print(f"F1: {f1}")


if __name__ == "__main__":
    evaluate_main()
