# -*- coding: utf-8 -*-
"""
@author:
@time: 2021/11/2
@file: build_custom_dict.py
@desc:
"""
import codecs
import re
import os
import pandas as pd
from pyhanlp import *

HanLP.Config.ShowTermNature = False
CoreStopWordDictionary = JClass("com.hankcs.hanlp.dictionary.stopword.CoreStopWordDictionary")
CustomDictionary = JClass("com.hankcs.hanlp.dictionary.CustomDictionary")
CustomDictionary.add("2018年国防战略") # 动态增加
CustomDictionary.add("美国网络司令部")
CustomDictionary.add("网络司令部")

def JavaArrayListToPythonList(ArrayList):
    ret = None
    if ArrayList is not None:
        ret = [str(ArrayList.get(i)) for i in range(ArrayList.size())]
    return ret





def segment_to_words(raw_document):
    words = []
    lines = raw_document.split('\n')
    for line in lines:
        terms = CoreStopWordDictionary.apply(HanLP.segment(line))
        terms_list = JavaArrayListToPythonList(terms)
        if terms_list is not None:
            words.extend(terms_list)
    return words


def count_frequency(words):
    pd_words = pd.Series(words)
    words = pd_words.value_counts().index.tolist()
    frequency = pd_words.value_counts()
    words_frequency = [(x, y) for x,y in zip(words, frequency)]
    return frequency

# print(max_idf)
idf_max = 0
idf_path = r"F:\test\idf.txt"
def build_idf_dict(idf_path):
    idf_dict = {}
    global idf_max
    try:
        with codecs.open(idf_path, 'r', 'utf-8') as rfd:
            idf_lines = rfd.read().split('\r\n')
            for idf_line in idf_lines:
                idf_value = idf_line.split(' ')
                idf_dict[idf_value[0]] = float(idf_value[1])
                if idf_max < float(idf_value[1]):
                    idf_max = float(idf_value[1])
    except Exception as e:
        print(str(e))
    return idf_dict

idf_dict = build_idf_dict(idf_path)


CoreStopWordDictionary = JClass("com.hankcs.hanlp.dictionary.stopword.CoreStopWordDictionary")
# text = "小区居民有的反对喂养流浪猫，而有的居民却赞成喂养这些小宝贝"
# words = segment_to_words(text)
# count_frequency(words)

train_docs = r"F:\test\5.pdf.trans.txt"



with codecs.open(train_docs, 'r', 'utf-8') as rfd:
    data = rfd.read()
    words = segment_to_words(data)
    frequency = count_frequency(words)
    for index, value in frequency.items():
        if index in idf_dict.keys():
            frequency[index] *= idf_dict[index]
        else:
            frequency[index] *= idf_max
    frequency = frequency.sort_values(ascending=False)
    top_10 = frequency.index.tolist()
    print(top_10)
#
#     print('---pyhanlp---')
#     TermFrequency = JClass('com.hankcs.hanlp.corpus.occurrence.TermFrequency')
#     TermFrequencyCounter = JClass('com.hankcs.hanlp.mining.word.TermFrequencyCounter')
#     counter = TermFrequencyCounter()
#     counter.add(data)  # 第一个文档
#     for termFrequency in counter:  # 遍历每个词与词频
#         print("%s=%d" % (termFrequency.getTerm(), termFrequency.getFrequency()))
#     print(counter.top(50))  # 取 top N

    #  根据词频提取关键词
    # print(TermFrequencyCounter.getKeywordList("女排夺冠，观众欢呼女排女排女排！", 3))



CustomDictionary.insert("白富美", "nz 1024")  # 强行插入
#CustomDictionary.remove("攻城狮"); # 删除词语（注释掉试试）
CustomDictionary.add("单身狗", "nz 1024 n 1")

custom_words_set = set()
entity_pattern = re.compile(r"(?<=\“)\S+(?=\”)|(?<=\()\S+(?=\))|(?<=\《)\S+(?=\》)")
# with codecs.open(train_docs, 'r', 'utf-8') as rfd:
#     data = rfd.read().split('\n')
#     for line in data:
#         words = list(HanLP.segment(line))
#         for item in words:
#             print(item)



custom_words_set = set()
# entity_pattern = re.compile(r"(?<=\()\S+(?=\))")
with codecs.open(train_docs, 'r', 'utf-8') as rfd:
    data = rfd.read().split('\n')
    for line in data:
        entities = entity_pattern.findall(line)
        custom_words_set.update(entities)

for item in custom_words_set:
    print(item)
print('OK')

for i in range(3, 4):
    file_name = os.path.join("conf", "result.%dgram" % i)
    print(file_name)