#! -*- coding:utf-8 -*-
'''pip install  gensim'''
from gensim import corpora, models, similarities
import logging
from pymongo import MongoClient
import jieba
import codecs
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.feature_extraction.text import CountVectorizer

sentences = [
    'hello world done hello',
    'me hello',
    'ok well'
]
y=['1','1','2']

vectorizer = CountVectorizer(min_df=1)
X = vectorizer.fit_transform(sentences)

print vectorizer.get_feature_names()
print "X:",X,X.toarray()

print "hello aredsda:",vectorizer.transform(['hello aredsda']).toarray()

#words=[]
#words=np.concatenate(z.split() for z in sentences)
#print "words:", words

train_x=[z.split() for z in sentences]
print "train_x",train_x

doc2vecmodel =  models.word2vec.Word2Vec(size = 10,  min_count = 1) #, dm = 1,alpha=0.025, min_alpha=0.025)
doc2vecmodel.build_vocab(train_x)
doc2vecmodel.train(train_x)

print "vocab:",doc2vecmodel.vocab

vocab =' '.join( list(doc2vecmodel.vocab.keys()))
print vocab
sent= models.word2vec.LineSentence('hello ameraca')
print "sent:",sent

print "well:",doc2vecmodel['well']

#print doc2vecmodel.most_similar('me') #positive=['口干', '黄'], negative=['腹'])


for i in range(30):
    #print "Round: " + str(i)
    doc2vecmodel.train(sentences)
