#! -*- coding:utf-8 -*-
'''pip install  gensim'''
from gensim import corpora, models, similarities
import logging
from pymongo import MongoClient
import jieba
import codecs


#logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
#documents = ["Shipment of gold damaged in a fire","Delivery of silver arrived in a silver truck","Shipment of gold arrived in a truck"]
#model = Doc2Vec.load_word2vec_format('train.txt', binary=False)  # C text format
#print model

sentences = []

'''
conn = MongoClient("okzor.com",27017)
db = conn.health
db.authenticate("hou","hou@123")
mydoclist =[]
content = db.training.find({})
file_object = codecs.open('thefile.txt', 'a','utf-8')

for i in content:
    word=i['illness']
    word.replace('\n', '')
    seg_list = jieba.cut(word) #, cut_all=True)
    line=' '.join(seg_list)+"\n"
    file_object.writelines(line)
    mydoclist.append(line)
file_object.close( )
'''

'''for  line in mydoclist:
    tt=models.word2vec.LineSentence(line)
    sentences.append(tt) #models.doc2vec.LabeledSentence(words=line.split(),tags= ['SENT_'+ str(uid)]))
    print line
print len(sentences)
'''
sentences=models.word2vec.LineSentence('thefile.txt')
print sentences

doc2vecmodel =  models.word2vec.Word2Vec(sentences,size = 100, window = 5, min_count = 1) #, dm = 1,alpha=0.025, min_alpha=0.025)
vocab =' '.join( list(doc2vecmodel.vocab.keys()))
print vocab
sent= models.word2vec.LineSentence(u"食纳增加，精神状况好转，偶感乏力，上方去黄芩、贝母，7剂，水煎服，一日一剂，分两次温服。hello ")
print "sent:",sent

#doc2vecmodel.build_vocab(sentences)
#for epoch in range(10):
    #doc2vecmodel.train(sentences)
    #doc2vecmodel.alpha -= 0.002  # decrease the learning rate
    #doc2vecmodel.min_alpha = doc2vecmodel.alpha  # fix the learning rate, no decay

#for key in doc2vecmodel.vocab.keys():
    #print key


# = models.Doc2Vec(sentences, size = 100, window = 5, min_count = 0, dm = 1)
print "Initial word vector for word junk:"
#print doc2vecmodel["hello"]
print doc2vecmodel.most_similar(u"腹胀") #positive=['口干', '黄'], negative=['腹'])


jieba.add_word('去黄芩')
jieba.add_word('桂枝')
word=u"食纳增加，精神状况好转，偶感乏力，口苦咽干，头晕乏力，上方去黄芩、贝母，7剂，水煎服，2日一剂，加桂枝汤，分两次温服。hello"
seg_list = jieba.cut_for_search(word) #, cut_all=True)
print ' '.join(seg_list)
print ' '.join( jieba.cut(word))


for i in range(30):
    #print "Round: " + str(i)
    doc2vecmodel.train(sentences)
#print "predict:",doc2vecmodel.predict(u"腹胀")
#print "Trained doc vector for 1st document"
print "Shipment and silver",doc2vecmodel.most_similar([u"腹胀",u"苔白",u"冲剂"])
