import re
import os
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer

stop_words = stopwords.words("english")
wordnet_lem = WordNetLemmatizer()

myPath = os.path.dirname(__file__)
txtPath = f"{myPath}/origin"    # 设置需要处理的文件位置，把需要处理的文件放到这里，允许多个文件
os.system(f"rm -rf {myPath}/origin/.DS_Store && rm -rf {myPath}/result/data.txt")   # 初始化
files = os.listdir(txtPath)

def delBlankline(infile, outfile):
    """python读取文件，将文件中的空白行去掉"""
    infopen = open(infile, 'r', encoding="utf-8")
    outfopen = open(outfile, 'w', encoding="utf-8")
 
    lines = infopen.readlines()
    for line in lines:
        if line.split(): 
            outfopen.writelines(line)
        else:
            outfopen.writelines("")
            
    infopen.close()
    outfopen.close()
    
def getData():
    """获取原始数据汇总成 data.txt 并进行初步数据处理（去除空白行）"""
    for i in files:
        with open(f"{txtPath}/{i}") as f:
            str = f.read()
        with open(f"{myPath}/result/data.txt", 'a+') as g:
            g.write(str)
    delBlankline(f"{myPath}/result/data.txt", f"{myPath}/result/handle_data.txt")
    os.system(f"rm -rf {myPath}/result/data.txt && mv {myPath}/result/handle_data.txt {myPath}/result/data.txt")
    
def getText():
    """获取文本，处理特殊字符"""
    with open(f"{myPath}/result/data.txt", 'r') as f:
        txt = f.read()
    txt = txt.lower()
    for i in '''!"“”#$£%&(（）)*+,，-—.．。/:;；<=>?@[\]^_＿'‘‘’ш {|}~1234567890''':
        txt = txt.replace(i, " ")
    return txt
        
def getCounts():
    """处理文本，对单词词频排序"""
    counts = {}
    excludes = {'bob', 'tom', 'john'}
    words = getText().split()

    for i in words:
        i = wordnet_lem.lemmatize(i, pos='v')  # 处理单词时态
        i = wordnet_lem.lemmatize(i)
        if i in excludes or i in stop_words or len(i) < 3:
            continue                        # 删除需要排除的词汇
        counts[i] = counts.get(i, 0) + 1    # 对单词出现的频率进行统计 counts.get(i，0)方法封装了一个if-else语句：如果 i 在 counts 中，则返回 i 对应的值，如果 i 不在counts中，则返回 0.
        
    items = list(counts.items())
    items.sort(key = lambda x: x[1], reverse = True)
    return items

def main():
    getData()
    items = getCounts()
    with open(f'{myPath}/result/rank.txt', 'w+') as f:
        for i in items:
            f.write("{0:<15}{1:>10}\n".format(i[0], i[1], end = "\n"))
if __name__ == '__main__':
    main()
