from gensim.models import Word2Vec
import os
import gensim
from gensim.models.doc2vec import TaggedDocument, Doc2Vec
import multiprocessing
import numpy as np

if not os.path.exists("../text2vec"):
	os.mkdir("../text2vect")

# 判断word2vec模型是否存在，不存在则训练
if not os.path.exists("../text2vec/word2vec.model"):
	with open("../data/en_community_content_clean.csv", "r") as fr:
		sentences = [line.strip().split('\t')[1] for line in fr]
	model= Word2Vec([line.split() for line in sentences], size=100, min_count=1, workers=4)
	model.save("../text2vec/word2vec.model")

# 判断doc2vec模型是否存在, 不存在则训练
if not os.path.exists("../text2vec/doc2vec.model"):
	with open("../data/en_community_content_clean.csv", "r") as fr:
		sentences = [line.strip().split('\t')[1] for line in fr]
	train_corpus = [TaggedDocument(gensim.utils.simple_preprocess(line), [i]) for i, line in enumerate(sentences)]
	cores = multiprocessing.cpu_count()
	model = Doc2Vec(dm=0, size=100, negative=5, hs=0, sample=3, min_count=1, workers=cores)
	model.build_vocab(train_corpus)
	model.train(train_corpus, total_examples=model.corpus_count, epochs=20)
	model.save("../text2vec/doc2vec.model")

# 提取word2vec Embedding特征
with open("../data/en_community_content_clean.csv", "r") as fr:
	dataset = [line.strip().split('\t') for line in fr]

model = Word2Vec.load('../text2vec/word2vec.model')
word2vecs = []
for label, message in dataset:
	temp = np.array([0.0 for _ in range(100)])
	for w in message.split():
		temp += model[w]
	temp = temp / len(message.split())
	word2vecs.append("%s\t%s\n" % (label, ' '.join(map(str, temp))))
with open("../text2vec/word2vec.txt", "w") as fw:
	fw.writelines(word2vecs)

# 提取doc2vec Embedding特征
model = Doc2Vec.load('../text2vec/doc2vec.model')
doc2vecs = []
for label, message in dataset:
	temp = model.infer_vector(message)
	doc2vecs.append("%s\t%s\n" % (label, ' '.join(map(str, temp))))
with open("../text2vec/doc2vecs.txt", "w") as fw:
	fw.writelines(doc2vecs)

