import argparse
import os
import evaluations
import datasethandler

RESULT_DIR = "results"
DATA_DIR = "datasets"

if __name__ == "__main__":
    default_config = {
        "dataset": "NYT",
        "num_topics": 50,
        "num_groups": 20,
        "dropout": 0.2,
        "use_pretrainWE": False,
        "weight_ECR": 40.0,
        "alpha_ECR": 20.0,
        "weight_InfoNCE": 50.0,
        "beta_temp": 0.2,
        "epochs": 300,
        "batch_size": 200,
        "lr": 0.002,
        "device": "cuda",
        "seed": 0,
        "lr_scheduler": "StepLR",
        "lr_step_size": 125,
        "weight_loss_topic_cohesion": 1.0,
        "top_n_words": 10,
        "num_hard_negatives": 5,
        "use_topic_cohesion_loss": True,
        "use_infonce_loss": True,
    }
    args = argparse.Namespace(**default_config)
    read_labels = False

    # load a preprocessed dataset
    dataset = datasethandler.BasicDatasetHandler(
        os.path.join(DATA_DIR, args.dataset),
        device=args.device,
        read_labels=read_labels,
        as_tensor=True,
        contextual_embed=True,
    )

    txt_path = r"D:\DeskTop\NeuTM\results\NYT\2025-06-30_17-11-35\top_words_20.txt"

    with open(txt_path, "r", encoding="utf-8") as f:
        lines = f.readlines()

        # 删除 \n
        top_words_20 = [line.strip() for line in lines]

        print(top_words_20)

        cv_per_topic, score = evaluations.topic_coherence._coherence(
            dataset.test_texts, dataset.vocab, top_words_20
        )
        print(f"TC_20: {score:.5f}")
