import gensim
from gensim.models.doc2vec import Doc2Vec
from sklearn.cluster import KMeans

TaggededDocument = gensim.models.doc2vec.TaggedDocument

def get_datasest(docs):
    '''
    with open("out/text_dict_cut.txt", 'r') as cf:
        docs = cf.readlines()
        print
        len(docs)
    '''
    x_train = []
    # y = np.concatenate(np.ones(len(docs)))
    for i in range(len(docs)):
        text=docs[i]
        word_list = text.split(' ')
        l = len(word_list)
        word_list[l - 1] = word_list[l - 1].strip()
        document = TaggededDocument(word_list, tags=[i]) # doc2vec要求必须有tag
        x_train.append(document)
    return x_train

def train(x_train, size=200, epoch_num=100, min_count=5): # 训练词向量转换模型
    model_dm = Doc2Vec(x_train, min_count=min_count, window=3, size=size, sample=1e-3, negative=5, workers=4)
    model_dm.train(x_train, total_examples=model_dm.corpus_count, epochs=epoch_num)
    model_dm.save('model_dm.doc2vec')
    return model_dm

def getVec(x_train,model_dm):
    infered_vectors_list = []
    for text, label in x_train:
        vector = model_dm.infer_vector(text)  # 把文本转成词向量
        infered_vectors_list.append(vector)
    return infered_vectors_list

def cluster(infered_vectors_list,n_clusters):
    kmean_model = KMeans(n_clusters=n_clusters)
    kmean_model.fit(infered_vectors_list)
    # labels = kmean_model.predict(infered_vectors_list[0:100])
    return kmean_model.labels_ , kmean_model.cluster_centers_
