import re
import pandas as pd
import jieba
import gensim
from gensim.models import Word2Vec  # 加载Word2Vec模块训练词向量
from gensim.models.word2vec import LineSentence

data = pd.read_csv('../data/new.csv', header=None)
data.columns = ['code', 'contents']
# 数据预处理
temp = data.contents
# 去重
temp.duplicated().sum()
data_dup = temp.drop_duplicates()
# 分词
data_cut = data_dup.astype('str').apply(lambda x: list(jieba.cut(x)))
# 去停用词
stopword = pd.read_csv('../data/stopword.txt', sep='ooo', encoding='gbk',
                       header=None, engine='python')
stopword = [' '] + list(stopword[0])
l3 = data_cut.astype('str').apply(lambda x: len(x)).sum()
data_qustop = data_cut.apply(lambda x: [i for i in x if i not in stopword])
data_qustop = data_qustop.loc[[i for i in data_qustop.index if data_qustop[i] !=[]]]
space = ['\u3000', '\xa0']
data_qustop = data_qustop.apply(lambda x : [i for i in x if i not in space])
data_qustop.to_csv('../tmp/data_qustop.csv')
def get_word2vec_trainwords():
      space = ' '
      i = 0
      l = []
      f = open('../tmp/word2vec_train_words.txt', 'w', encoding='utf-8')
      for text in data_qustop:
            f.write(space.join(text) + '\n')
            l = []
            i = i + 1
            if (i % 200 == 0):
                    print('Saved ' + str(i) + ' articles')
            f.close()
news = open('../tmp/word2vec_train_words.txt', 'r', encoding='utf-8')
# 使用Word2Vec模型训练词向量
model = Word2Vec(LineSentence(news), sg=0, size=192, window=5, min_count=5,
workers=9)
print(model['新年'])