from snownlp import SnowNLP
import pandas as pd
import pylab as pl
from cls.orm.main import PG


# txt = open('D:/a.txt')
# text = txt.readlines()
# txt.close()
# print('读入成功')
# sentences = []
# senti_score = []
# for i in text:
#     a1 = SnowNLP(i)
#     a2 = a1.sentiments
#
#     print(a2)
#     sentences.append(i)  # 语序...
#     senti_score.append(a2)
#
# table = pd.DataFrame(sentences, senti_score)
# print(table)
# # table.to_excel('F:/_analyse_Emotion.xlsx', sheet_name='Sheet1')
# # ts = pd.Series(sentences, senti_score)
# # ts = ts.cumsum()
# # print(table)
# x = [1, 2, 3, 4, 5, 6, 7, 8]
# pl.mpl.rcParams['font.sans-serif'] = ['SimHei']
# pl.plot(x, senti_score)
# exit(2)
# pl.title(u'心 灵 捕 手 网 评')
# pl.xlabel(u'评 论 用 户')
# pl.ylabel(u'情 感 程 度')
# pl.show()
#

from snownlp import seg
seg.train('data.txt')
seg.save('seg2.marshal')
print('ok')
exit(11)

# res = db.execValue('select id from words where id = 1')
#  分词频率内容




# db.exec(sql)
def test():
    text1 = u'不管我做了什么事情，他对我都很好'     # 概率在0.5以下为负向情绪，在0.5及以上的为正向情绪
    text1 = u'问题二：为何有这种不同    我想最重要的原因是节约了不少的内存吧。Python的运行效率和编译类型的语言自然是没法比，但是能优化就优化一点吧    ~谁不想有更高的追求呢。'
    a1 = SnowNLP(text1)
    # a2 = SnowNLP(text2)
    # print('中文分词：')
    # print(a1.words)

    for word in a1.tags:
         updateWord(word)

    # print(a2.words)
    # print(text1, a1.sentiments)
    # print(text2, a2.sentiments)


test()
exit(6)


