import topmost
from topmost import eva


device = "cuda"  # or "cpu"
dataset_dir = "./datasets/NYT"

contextual_embed = True
doc_embed_model = r"D:\DeskTop\paper\code\topicl-model\all-MiniLM-L6-v2"


if __name__ == "__main__":
    dataset = topmost.DynamicDataset(
        dataset_dir,
        batch_size=200,
        read_labels=True,
        device=device,
        contextual_embed=contextual_embed,
        doc_embed_model=doc_embed_model,
    )

    model = topmost.CFDTM(
        vocab_size=dataset.vocab_size,
        num_times=dataset.num_times,
        pretrained_WE=dataset.pretrained_WE,
        train_time_wordfreq=dataset.train_time_wordfreq,
    )
    model = model.to(device)
    trainer = topmost.DynamicTrainer(model, dataset, epochs=800, verbose=True)

    top_words, train_theta = trainer.train()

    ########################### Evaluate ####################################

    # get theta (doc-topic distributions)
    train_theta, test_theta = trainer.export_theta()

    train_times = dataset.train_times.cpu().numpy()
    # compute topic coherence
    dynamic_TC = eva.dynamic_coherence(
        dataset.train_texts, train_times, dataset.vocab, top_words
    )
    print("dynamic_TC: ", dynamic_TC)

    # compute topic diversity
    dynamic_TD = eva.dynamic_diversity(
        top_words, dataset.train_bow.cpu().numpy(), train_times, dataset.vocab
    )
    print("dynamic_TD: ", dynamic_TD)

    # evaluate clustering
    results = eva._clustering(test_theta, dataset.test_labels)
    print(results)

    # evaluate classification
    results = eva._cls(
        train_theta, test_theta, dataset.train_labels, dataset.test_labels
    )
    print(results)
