import os
import re
import sys
import getopt
import nltk

n = -1
charPath = "0"
filePath = "0"
dir_s = "0"
dir_d = "0"
stopPath = "0"
verbPath = "0"
wordStop = {}
# wordVerb = {}
verbFlag = True
phraseLength = 1


# 读取动词表
def readVerb(verbPath):
    Verb = {}
    f = open(verbPath, "r", encoding="utf-8")
    for line in f:
        if line == "\n":
            continue
        else:
            line = line.split("\n")[0]
            line = line.split("*")[1]
            line = line.split(" ")
            original = line[0]
            for word in line:
                Verb[word] = original
        # Verb.append(listline)
    return Verb


# 进行排序
def sortWord(source):
    result = {}  # 最终的结果
    middle = {}  # 中间临时排序统计
    y1 = {k: v for k, v in sorted(source.items(), key=lambda item: item[1]["number"], reverse=True)}
    base_char = next(iter(y1))
    number = y1[base_char]["number"]
    # print(number)
    for item in y1:
        # 相等的话就进行插入
        if y1[item]["number"] == number:
            middle[item] = {}
            middle[item] = y1[item]
            # middle[item]["number"] = y1[item]["number"]
        # 不相等的话就进行排序后进行插入
        else:
            # 对出现次数相同的单词进行排序，按照字典序的大小进行排序
            y2 = {k: v for k, v in sorted(middle.items(), key=lambda item: item[0])}
            result.update(y2)
            middle[item] = {}
            middle[item] = y1[item]
            # middle[item]["number"] = y1[item]["number"]
            number = y1[item]["number"]
    y2 = {k: v for k, v in sorted(middle.items(), key=lambda item: item[0])}
    result.update(y2)
    return result


# 进行计算总数
def Calc(result):
    numberAll = 0.00
    for word in result:
        numberAll = numberAll + result[word]["number"]
    for word in result:
        result[word]["p"] = float(result[word]["number"] * 100.00 / numberAll)
    print("总数：", numberAll)
    return result


# 输出结果
def printResult(result):
    if n == -1:
        for item in result:
            if len(item) >= 8:
                print(item + "\t" + str(result[item]["number"]) + "\t" + "{:.2f}".format(result[item]["p"]) + "%")
            else:
                print(item + "\t\t" + str(result[item]["number"]) + "\t" + "{:.2f}".format(result[item]["p"]) + "%")
    else:
        begin = 0
        for item in result:
            if begin < n:
                # print(begin)
                if len(item) >= 8:
                    print(
                        item + "\t" + str(result[item]["number"]) + "\t" + "{:.2f}".format(result[item]["p"]) + "%")
                else:
                    print(item + "\t\t" + str(result[item]["number"]) + "\t" + "{:.2f}".format(
                        result[item]["p"]) + "%")
                begin = begin + 1
            else:
                break


# 进行统计单词
def Statistics(word, Word_all, verbFlag):
    if verbFlag is True:
        if word in wordVerb:
            word = wordVerb[word]
    if word not in wordStop:
        if word in Word_all:
            Word_all[word]["number"] = Word_all[word]["number"] + 1
        else:
            Word_all[word] = {}
            Word_all[word]["number"] = 1
    return Word_all


# 处理短语
def processPhrase(phrasePath):
    f = open(phrasePath, "r", encoding="utf-8")
    phraseall = {}
    for line in f:
        document = line
        sentences = nltk.sent_tokenize(document)
        data = []
        for sent in sentences:
            # words = nltk.pos_tag(nltk.word_tokenize(sent))
            data = data + nltk.pos_tag(nltk.word_tokenize(sent))
            # for word in data:
            #     print(word)
        # print(data)
        phraseall = noun_phrase_chunking(data, phraseall)
    return phraseall


# 截取短语
def interceptPhrase(phrase_all, phraseLength):
    print(phrase_all)
    new = {}
    if phraseLength >= 2:
        for phrase in phrase_all:
            if phrase_all[phrase]["length"] == phraseLength:
                new[phrase] = {}
                new[phrase] = phrase_all[phrase]
        return new
    else:
        return phrase_all


# 进行短语的统计
def phraseFile(phrasePath):
    phrase_all = processPhrase(phrasePath)
    phrase_all = sortWord(phrase_all)
    phrase_all = Calc(phrase_all)
    phrase_all = interceptPhrase(phrase_all, phraseLength)
    printResult(phrase_all)


# 名词短语词块划分
def noun_phrase_chunking(sentence, all):
    # sentence = [("the", "DT"), ("little", "JJ"), ("yellow", "JJ"), ("dog", "NN"), ("barked", "VBD"), ("at", "IN"),
    #             ("the", "DT"), ("cat", "NN")]
    # 语法。正则表达式规则。可选限定词（DT）后跟任意数量的形容词（JJ）和一个名词（NN）时，形成一个NP块
    # 动词短语：{<VB[A-Z]+><JJ><TO>}{<VB[A-Z]+><RB[A-Z]+><IN>}{<VB[A-Z]+><RB[A-Z]+>}{<VB[A-Z]+><IN>}
    # 名词短语：
    # 介词短语： {<IN>?<DT>?<JJ>*<NN>}{<DT>?<JJ>*<NN>}
    # phrase_all = []
    grammar = r"""
    NP:{<VB[A-Z]+><JJ><TO>}
    {<VB[A-Z]+><RB[A-Z]+><IN>+}
    {<VB[A-Z]+><RB[A-Z]+>}
    {<VB[A-Z]+><IN>}
    {<NN[A-Z]+>+}
    {<IN>+<DT>?<JJ>*<NN>+}
    {<DT>?<JJ>*<NN>+}
    """

    # 创建一个块解析器
    cp = nltk.RegexpParser(grammar)
    # 测试。查看结果
    result = cp.parse(sentence)
    # print(result)
    # print("内容信息提取：")
    for item in result:
        # print(item)
        # print(type(item).__name__)
        if type(item).__name__ == "Tree":
            # print(item)
            listPhares = []
            for leaves in item:
                listPhares.append(leaves[0].lower())
            if len(listPhares) >= 2:
                if verbFlag is True:
                    if listPhares[0] in wordVerb:
                        print("\n" + listPhares[0] + "\n")
                        listPhares[0] = wordVerb[listPhares[0]]
                # 得到短语
                phares = " ".join(listPhares)
                if phares not in all:
                    all[phares] = {}
                    all[phares]["number"] = 1
                    all[phares]["length"] = len(listPhares)
                else:
                    all[phares]["number"] = all[phares]["number"] + 1
                # print(phares)
    return all


wordVerb = readVerb("C:\\OStest\\verb.txt")
phraseFile("C:\\OStest\\test\\test.txt")
