import argparse

import numpy as np
import pandas as pd

from utils import config, miscellaneous, seed
import os
import basic_trainer
from NeuroMax.NeuroMax import NeuroMax
import evaluations
import datasethandler
import scipy

RESULT_DIR = "results"
DATA_DIR = "datasets"

if __name__ == "__main__":
    for i in range(1, 100):
        print(f"================= Running {i} times ==================")
        default_config = {
            "dataset": "20NG",
            "num_topics": 50,
            "num_groups": 20,
            "dropout": 0.2,
            "use_pretrainWE": False,
            "weight_ECR": 40.0,
            "alpha_ECR": 20.0,
            "weight_InfoNCE": 50.0,
            "beta_temp": 0.2,
            "epochs": 500,
            "batch_size": 200,
            "lr": 0.002,
            "device": "cuda",
            "seed": 0,
            "lr_scheduler": "StepLR",
            "lr_step_size": 125,
            "weight_loss_topic_cohesion": 1.0,
            "top_n_words": 10,
            "num_hard_negatives": 5,
            "use_topic_cohesion_loss": True,
            "use_infonce_loss": True,
        }
        args = argparse.Namespace(**default_config)

        current_time = miscellaneous.get_current_datetime()
        current_run_dir = os.path.join(RESULT_DIR, args.dataset, current_time)
        miscellaneous.create_folder_if_not_exist(current_run_dir)

        config.save_config(args, os.path.join(current_run_dir, "config.txt"))
        seed.seedEverything(args.seed)
        print(args)
        read_labels = True

        # load a preprocessed dataset
        dataset = datasethandler.BasicDatasetHandler(
            os.path.join(DATA_DIR, args.dataset),
            device=args.device,
            read_labels=read_labels,
            as_tensor=True,
            contextual_embed=True,
        )

        # create a model
        pretrainWE = scipy.sparse.load_npz(
            os.path.join(DATA_DIR, args.dataset, "word_embeddings.npz")
        ).toarray()

        model = NeuroMax(
            vocab_size=dataset.vocab_size,
            num_topics=args.num_topics,
            num_groups=args.num_groups,
            dropout=args.dropout,
            pretrained_WE=pretrainWE if args.use_pretrainWE else None,
            weight_loss_ECR=args.weight_ECR,
            alpha_ECR=args.alpha_ECR,
            weight_loss_InfoNCE=args.weight_InfoNCE,
            beta_temp=args.beta_temp,
            weight_loss_topic_cohesion=args.weight_loss_topic_cohesion,
            top_n_words=args.top_n_words,
            num_hard_negatives=args.num_hard_negatives,
            use_infonce_loss=args.use_infonce_loss,
            use_topic_cohesion_loss=args.use_topic_cohesion_loss,
        )
        model.weight_loss_ECR = args.weight_ECR
        model = model.to(args.device)

        # create a trainer
        trainer = basic_trainer.BasicTrainer(
            model,
            epochs=args.epochs,
            learning_rate=args.lr,
            batch_size=args.batch_size,
            lr_scheduler=args.lr_scheduler,
            lr_step_size=args.lr_step_size,
            log_interval=10,
        )

        # train the model
        trainer.train(dataset, verbose=True)

        # save beta, theta and top words
        beta = trainer.save_beta(current_run_dir)
        train_theta, test_theta = trainer.save_theta(dataset, current_run_dir)
        top_words_10 = trainer.save_top_words(dataset.vocab, 10, current_run_dir)
        top_words_15 = trainer.save_top_words(dataset.vocab, 15, current_run_dir)
        top_words_20 = trainer.save_top_words(dataset.vocab, 20, current_run_dir)
        top_words_25 = trainer.save_top_words(dataset.vocab, 25, current_run_dir)

        # argmax of train and test theta
        train_theta_argmax = train_theta.argmax(axis=1)
        test_theta_argmax = test_theta.argmax(axis=1)

        TD_10 = evaluations.compute_topic_diversity(top_words_10, _type="TD")
        # TC_20_list, Out_TC = evaluations.topic_coherence.TC_on_wikipedia(
        #     os.path.join(current_run_dir, "top_words_20.txt")
        # )
        cv_per_topic, score = evaluations.topic_coherence.compute_topic_coherence(
            dataset.test_texts, dataset.vocab, top_words_20
        )
        print(f"TC_20: {score:.5f}")

        # 创建dataFrame
        df = pd.DataFrame()
        df["Seed"] = [args.seed]
        df["TD"] = [round(TD_10, 5)]
        df["CV"] = [round(score, 5)]

        # evaluating clustering
        if read_labels:
            clustering_results = evaluations.evaluate_clustering(
                test_theta, dataset.test_labels
            )
            df["NMI"] = [round(clustering_results["NMI"], 5)]
            df["Purity"] = [round(clustering_results["Purity"], 5)]

        df["time"] = [current_time]
        print(df)
        # TODO 只有 循环跑的时候才记录 到这里 一条一条写入 到文件内部 没有就创建该文件
        dest_dir = os.path.join(
            RESULT_DIR,
            f"{args.dataset}_results_{args.num_topics}_cos_{args.use_topic_cohesion_loss}_infonce_{args.use_infonce_loss}.csv",
        )
        if not os.path.exists(dest_dir):
            df.to_csv(dest_dir, index=False)
        else:
            df.to_csv(dest_dir, mode="a", header=False, index=False)
