from gensim.models import Word2Vec
from gensim.models import Doc2Vec
import numpy as np
import pandas as pd
import gensim

# 读取数据
df_train = pd.read_csv('../data/train_processed.csv')
df_test = pd.read_csv('../data/test_processed.csv')
all_description = pd.concat([df_train['description'],df_test['description']],  axis=0)
all_description.to_csv('../data/description.txt',index=False, encoding = 'utf-8')
# sentences = all_description.map(lambda x: x.strip().split(' ')).tolist()
# word_model = Word2Vec(sentences, sg=1, size=200, window=5, min_count=2, negative=3, sample=0.001, hs=1, workers=4, iter=10)
#
# word_model.save('../model/word_vec.model')

docs = gensim.models.doc2vec.TaggedLineDocument('../data/description.txt')
model = Doc2Vec(docs, size=100, window=5, min_count=3, workers=4, dm=0,dm_mean=1)
model.train(docs, total_examples=model.corpus_count, epochs=100)
model.save('../model/doc_vec.model')
docvectors = model.docvecs.doctag_syn0
np.save("../model/doc_vectors_2row.npy", docvectors)