import json
import os
import time

import numpy as np


def get_topic_diversity(beta, topk):
    num_topics = beta.shape[0]
    list_w = np.zeros((num_topics, topk))
    for k in range(num_topics):
        idx = beta[k, :].argsort()[-topk:][::-1]
        list_w[k, :] = idx
    n_unique = len(np.unique(list_w))
    TD = n_unique / (topk * num_topics)
    print('Topic diveristy is: {}'.format(TD))


def get_document_frequency(data, wi, wj=None):
    if wj is None:
        D_wi = 0
        for l in range(len(data)):
            doc = data[l].squeeze(0)
            if len(doc) == 1:
                continue
            else:
                doc = doc.squeeze()
            if wi in doc:
                D_wi += 1
        return D_wi
    D_wj = 0
    D_wi_wj = 0
    for l in range(len(data)):
        doc = data[l].squeeze(0)
        if len(doc) == 1:
            doc = [doc.squeeze()]
        else:
            doc = doc.squeeze()
        if wj in doc:
            D_wj += 1
            if wi in doc:
                D_wi_wj += 1
    return D_wj, D_wi_wj


def get_topic_coherence(beta, data, vocab):
    D = len(data)  ## number of docs...data is list of documents
    print('D: ', D)
    TC = []
    num_topics = len(beta)
    for k in range(num_topics):
        print('k: {}/{}'.format(k, num_topics))
        top_10 = list(beta[k].argsort()[-11:][::-1])
        top_words = [vocab[a] for a in top_10]
        TC_k = 0
        counter = 0
        for i, word in enumerate(top_10):
            # get D(w_i)
            D_wi = get_document_frequency(data, word)
            j = i + 1
            tmp = 0
            while j < len(top_10) and j > i:
                # get D(w_j) and D(w_i, w_j)
                D_wj, D_wi_wj = get_document_frequency(data, word, top_10[j])
                # get f(w_i, w_j)
                if D_wi_wj == 0:
                    f_wi_wj = -1
                else:
                    f_wi_wj = -1 + (np.log(D_wi) + np.log(D_wj) - 2.0 * np.log(D)) / (np.log(D_wi_wj) - np.log(D))
                # update tmp:
                tmp += f_wi_wj
                j += 1
                counter += 1
            # update TC_k
            TC_k += tmp
        TC.append(TC_k)
    print('counter: ', counter)
    print('num topics: ', len(TC))
    TC = np.mean(TC) / counter
    print('Topic coherence is: {}'.format(TC))


def nearest_neighbors(word, embeddings, vocab):
    vectors = embeddings.data.cpu().numpy()
    index = vocab.index(word)
    print('vectors: ', vectors.shape)
    query = vectors[index]
    print('query: ', query.shape)
    ranks = vectors.dot(query).squeeze()
    denom = query.T.dot(query).squeeze()
    denom = denom * np.sum(vectors ** 2, 1)
    denom = np.sqrt(denom)
    ranks = ranks / denom
    mostSimilar = []
    [mostSimilar.append(idx) for idx in ranks.argsort()[::-1]]
    nearest_neighbors = mostSimilar[:20]
    nearest_neighbors = [vocab[comp] for comp in nearest_neighbors]
    return nearest_neighbors


def save_model_info(args, train_re):
    print("model have been saved on {}".format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
    file = "./results/model_info.json"
    new_model_info = {'model_name': args.model_name,
                      'dataset': args.dataset,
                      'update_time': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
                      'train_info': train_re,  # r"{}/{}".format(epoch, args.epochs)
                      'loss_ration': {'recon_ration': args.ratio_recon,
                                      'kl_ratio': args.ratio_KL,
                                      'latent_ratio': args.ratio_latent},
                      'epochs': args.epochs,
                      'num_topics': args.num_topics,
                      't_hidden_size': args.t_hidden_size,
                      'optimizer': args.optimizer,
                      'clip': args.clip,
                      'theta_act': args.theta_act,
                      'lr': args.lr,
                      'batch_size': args.batch_size,
                      'rho_size': args.rho_size,
                      'train_embeddings': args.train_embeddings
                      }
    if not os.path.exists(file):
        result = {"help": "Created on {}".format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())),
                  "model_info": [new_model_info],
                  "attention": ""}

    else:
        with open(file) as f:
            result = json.load(f)
        all_model_info = result["model_info"]
        for i, model in enumerate(all_model_info):
            if model["model_name"] == args.model_name:
                all_model_info[i]['update_time'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
                all_model_info[i]['train_info'] = train_re
                with open(file, "w+") as f:
                    json.dump(result, f, ensure_ascii=False, indent=4)
                return
        all_model_info.append(new_model_info)
    with open(file, "w+") as f:
        json.dump(result, f, ensure_ascii=False, indent=4)


def etm_print(info, separator=False):
    mode = 'w+' if "Epoch-->1 " in info else "a+"
    file = r'./results/etm_print.txt'
    with open(file, mode) as file_object:
        file_object.write(info + "\n")
        if separator:
            file_object.write('_' * 100 + "\n")
        # file_object.close()
