import sys
sys.path.append('.//Tools')
from nltk import word_tokenize, pos_tag
from nltk.corpus import wordnet
from nltk.stem import WordNetLemmatizer
import Constant
import os


# 获取单词的词性
def get_wordnet_pos(tag):
    if tag.startswith('J'):
        return wordnet.ADJ
    elif tag.startswith('V'):
        return wordnet.VERB
    elif tag.startswith('N'):
        return wordnet.NOUN
    elif tag.startswith('R'):
        return wordnet.ADV
    else:
        return None
# 词形还原函数
def resume(sentence):
    tokens = word_tokenize(sentence)  # 分词
    tagged_sent = pos_tag(tokens)     # 获取单词词性
 
    wnl = WordNetLemmatizer()
    lemmas_sent = []

    for tag in tagged_sent:
        if Constant.IsPunc(tag[0]):
            continue
        #print(tag)
        wordnet_pos = get_wordnet_pos(tag[1]) or wordnet.NOUN
        lemmas_sent.append(wnl.lemmatize(tag[0], pos=wordnet_pos)) # 词形还原
    return lemmas_sent
# 载入字典
dic={}
maxFreq=int(0)
def rdic():
    fr = open(os.path.dirname(__file__)+'/dictionary.txt','r')
    for line in fr:
        line = line.replace("\n",'')
        line = line.replace(' ','')
        v = line.split('\t')
        global maxFreq
    
        if v[2].isnumeric() and int(v[2]) > maxFreq:
            maxFreq=int(v[2])                       
        dic[v[1]] = v
    #print(maxFreq)
    fr.close()
# 字典的查找函数
def find_word(word):
#   rdic()
    result = dic.get(word,0)
    return result
# 单词分析函数
def wordAnalyzer(my_list):
    data=[0,0,0,0,0,0]
    sm=len(my_list)
    for word in my_list:
        value = find_word(word)
# value的结构[词性，单词，总频率，spoken中的频率，fiction中的频率，magazine中的频率，academic中的频率]
        if value==0:
            if len(word)>2:
                #print(word)
                data[0]+=1
            else:
                sm-=1
            continue
        else:
            data[1]+=int(value[2])/maxFreq
            data[2]+=int(value[3])/int(value[2])
            data[3]+=int(value[4])/int(value[2])
            data[4]+=int(value[5])/int(value[2])
            data[5]+=int(value[6])/int(value[2])
    
    for i in range(1,len(data)):
        data[i]=data[i]/max((sm-data[0]),1)
    data[0]/=max(sm,1)
        
    return data
def GetWordVector(s):
    return wordAnalyzer(resume(s))
rdic()
# 主函数
def main():
    rdic()
    while True:
        sentence = input("请输入需要分析的英语句子：")
        my_list=resume(sentence)
        a=wordAnalyzer(my_list)
        print(a)
        flag=input("是否退出，如果退出请输入1，继续请输入0:")
        if flag=='1':
            return 0
        else:
            continue
