# coding: utf-8

import common
from gensim.models.doc2vec import Doc2Vec, LabeledSentence, TaggedDocument
import numpy as np
from sklearn.cluster import KMeans, MiniBatchKMeans

'''
1、官方doc2vec的展示例子，其中有很多评论语 https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/doc2vec-lee.ipynb
'''


def get_kmeans_model(tfidf_array, k, use_mini_batch=False):
    if use_mini_batch:
        km = MiniBatchKMeans(n_clusters=k, init='k-means++', n_init=1,
                             init_size=1000, batch_size=100, max_iter=1000, verbose=False)
    else:
        km = KMeans(n_clusters=k, init='k-means++', max_iter=100, n_init=1,
                    verbose=False)
    km.fit(tfidf_array)
    return km


if __name__ == '__main__':
    train_set_raw = common.get_train_set(max_count=55000)
    train_set_raw_for_gensim = [doc.split(' ') for doc in train_set_raw]
    train_set_tag = [TaggedDocument(line_list, tags=[i]) for i, line_list in enumerate(train_set_raw_for_gensim)]
    doc2vec = Doc2Vec(train_set_tag, min_count=1, window=5, vector_size=100, sample=1e-3,
                      # negative=5,
                      workers=4,
                      epochs=70, dbow_words=1)
    # doc2vec.train(train_set_tag, total_examples=doc2vec.corpus_count, epochs=70)
    # 获取每篇文档的向量
    inferred_vectors = list()
    for doc in train_set_raw_for_gensim:
        inferred_vector = doc2vec.infer_vector(doc)
        inferred_vectors.append(inferred_vector)
    inferred_vectors = np.array(inferred_vectors)

    # k为质心数量
    k = 2
    km = get_kmeans_model(tfidf_array=inferred_vectors, k=k)
    for i in range(k):
        print(i)
        for index, sim in doc2vec.docvecs.most_similar([km.cluster_centers_[i]]):
            print(sim, train_set_raw[index])

