from pyhanlp import *
import jieba.analyse
from gensim.models import word2vec
import gensim.models
import logging


#实验一:n-gram
data="刘克庄(1187～1269) 南宋诗人、词人、诗论家。字潜夫，号后村。福建莆田人。宋末文坛领袖，" \
     "辛派词人的重要代表，词风豪迈慷慨。在江湖诗人中年寿最长，官位最高，成就也最大。晚年致力于辞赋创作，" \
     "提出了许多革新理论。"
sep_list=jieba.cut(data,cut_all=False)
words=" ".join(sep_list)
print(words)

CorpusLoader = SafeJClass("com.hankcs.hanlp.corpus.document.CorpusLoader")
NatureDictionaryMaker = SafeJClass("com.hankcs.hanlp.corpus.dictionary.NatureDictionaryMaker")  # 词典模型Java模块(统计一元、二元语法)
corpus_path = "C:\\Users\\12036\\Desktop\\documents\\自然语言处理\\分词后.txt"
sentences=CorpusLoader.convert2SentenceList(corpus_path)

model_path="C:\\Users\\12036\\Desktop\\documents\\自然语言处理\\model"  #语言模型存储路径

for sent in sentences:
    print(sent)
    for word in sent:
        if word.label is None:
            word.setLabel("n")
maker =NatureDictionaryMaker()
maker.compute(sentences)
maker.saveTxtTo(model_path)

HanLP.Config.CoreDictionaryPath = model_path + ".txt"  # 一元
HanLP.Config.BiGramDictionaryPath = model_path + ".ngram.txt"  # 二元
CoreDictionary = SafeJClass("com.hankcs.hanlp.dictionary.CoreDictionary")
CoreBiGramTableDictionary = SafeJClass("com.hankcs.hanlp.dictionary.CoreBiGramTableDictionary")

print(CoreDictionary.getTermFrequency("核酸"))
print(CoreBiGramTableDictionary.getBiFrequency("核酸", "检测"))


#实验二:word2vec

data="刘克庄(1187～1269) 南宋诗人、词人、诗论家。字潜夫，号后村。福建莆田人。宋末文坛领袖，" \
     "辛派词人的重要代表，词风豪迈慷慨。在江湖诗人中年寿最长，官位最高，成就也最大。晚年致力于辞赋创作，" \
     "提出了许多革新理论。"
sep_list=jieba.cut(data,cut_all=False)
words=" ".join(sep_list)
print(words)

logging.basicConfig(format='%(asctime)s:%(levelname)s: %(message)s', level=logging.INFO)
sentences = word2vec.Text8Corpus(u"C:\\Users\\12036\\Desktop\\documents\\自然语言处理\\分词后B.txt")  # 加载语料
model = gensim.models.Word2Vec(sentences)  # 训练模型

#任务1
print("\"闪电\"的词向量:",model.wv["闪电"])
print("\n\"直升机\"的词向量:",model.wv["直升机"])

#任务2
try:
    result = model.wv.similarity(u"球形", u"闪电")
except KeyError:
    y1 = 0
print(" \n\"球形\"和\"闪电\"的相似度为：", result)

#任务3
print("\n与爆炸最相似的10个词:")
result=model.wv.most_similar(u"爆炸",topn=10)
for item in result:
    print(item[0],item[1])