import argparse

from evaluations.topic_coherence import compute_topic_coherence
from utils import config, miscellaneous, seed
import os
import basic_trainer
from NeuroMax.NeuroMax import NeuroMax
import evaluations
import datasethandler
import scipy

RESULT_DIR = "results"
DATA_DIR = "datasets"

if __name__ == "__main__":
    default_config = {
        "dataset": "NYT",
        "num_topics": 50,
        "num_groups": 20,
        "dropout": 0.2,
        "use_pretrainWE": False,
        "weight_ECR": 40.0,
        "alpha_ECR": 20.0,
        "weight_InfoNCE": 50.0,
        "beta_temp": 0.2,
        "epochs": 5,
        "batch_size": 200,
        "lr": 0.002,
        "device": "cuda",
        "seed": 0,
        "lr_scheduler": "StepLR",
        "lr_step_size": 125,
        "weight_loss_topic_cohesion": 1.0,
        "top_n_words": 10,
        "num_hard_negatives": 5,
        "use_topic_cohesion_loss": True,
        "use_infonce_loss": True,
    }
    args = argparse.Namespace(**default_config)

    current_time = miscellaneous.get_current_datetime()
    current_run_dir = os.path.join(RESULT_DIR, current_time)
    miscellaneous.create_folder_if_not_exist(current_run_dir)

    config.save_config(args, os.path.join(current_run_dir, "config.txt"))
    seed.seedEverything(args.seed)
    print(args)

    if args.dataset in ["YahooAnswers"]:
        read_labels = True
    else:
        read_labels = False

    # load a preprocessed dataset
    dataset = datasethandler.BasicDatasetHandler(
        os.path.join(DATA_DIR, args.dataset),
        device=args.device,
        read_labels=read_labels,
        as_tensor=True,
        contextual_embed=True,
    )

    # create a model
    pretrainWE = scipy.sparse.load_npz(
        os.path.join(DATA_DIR, args.dataset, "word_embeddings.npz")
    ).toarray()

    model = NeuroMax(
        vocab_size=dataset.vocab_size,
        num_topics=args.num_topics,
        num_groups=args.num_groups,
        dropout=args.dropout,
        pretrained_WE=pretrainWE if args.use_pretrainWE else None,
        weight_loss_ECR=args.weight_ECR,
        alpha_ECR=args.alpha_ECR,
        weight_loss_InfoNCE=args.weight_InfoNCE,
        beta_temp=args.beta_temp,
        weight_loss_topic_cohesion=args.weight_loss_topic_cohesion,
        top_n_words=args.top_n_words,
        num_hard_negatives=args.num_hard_negatives,
        use_infonce_loss=args.use_infonce_loss,
        use_topic_cohesion_loss=args.use_topic_cohesion_loss,
    )
    model.weight_loss_ECR = args.weight_ECR
    model = model.to(args.device)

    # create a trainer
    trainer = basic_trainer.BasicTrainer(
        model,
        epochs=args.epochs,
        learning_rate=args.lr,
        batch_size=args.batch_size,
        lr_scheduler=args.lr_scheduler,
        lr_step_size=args.lr_step_size,
        log_interval=10,
    )

    # train the model
    trainer.train(dataset, verbose=True)

    # save beta, theta and top words
    beta = trainer.save_beta(current_run_dir)
    train_theta, test_theta = trainer.save_theta(dataset, current_run_dir)
    top_words_10 = trainer.save_top_words(dataset.vocab, 10, current_run_dir)
    top_words_15 = trainer.save_top_words(dataset.vocab, 15, current_run_dir)
    top_words_20 = trainer.save_top_words(dataset.vocab, 20, current_run_dir)
    top_words_25 = trainer.save_top_words(dataset.vocab, 25, current_run_dir)

    # argmax of train and test theta
    train_theta_argmax = train_theta.argmax(axis=1)
    test_theta_argmax = test_theta.argmax(axis=1)

    # TD_10 = evaluations.compute_topic_diversity(top_words_10, _type="TD")
    # print(f"TD_10: {TD_10:.5f}")

    TC_10_list, Out_TC = evaluations.topic_coherence.TC_on_wikipedia(
        os.path.join(current_run_dir, "top_words_10.txt")
    )
    print(f"TC_10: {Out_TC:.5f}")

    # evaluating clustering
    if read_labels:
        clustering_results = evaluations.evaluate_clustering(
            test_theta, dataset.test_labels
        )
        print("NMI: ", clustering_results["NMI"])
        print("Purity: ", clustering_results["Purity"])
