from bs4 import BeautifulSoup
import nltk
import os
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.stem import WordNetLemmatizer
 
# nltk.download("stopwords")
# nltk.download("wordnet")

class HTML():
    def __init__(self, file_path):
        self.file_path = file_path
 
    def read_file(self):
        # 读取HTML文件
        response = open(self.file_path, 'r', encoding="utf-8", errors="ignore")
        html = response.read()
        return html
 
    def clean_text(self, html):
        # 解析清洗读取出的HTML文本
        # soup = BeautifulSoup(html, "html5lib")
        soup = BeautifulSoup(html, "lxml")
        # text = soup.get_text(separator="", strip=True)
        text = soup.get_text()
        return text
 
    def take_tokens(self, text):
        # 将清洗过的文本提取标记
        tokens = nltk.regexp_tokenize(text, pattern='\w+')
        return tokens
 
    def delete_stop(self, tokens):
        # 删除token中的停用词
        clean_tokens = tokens[:]
        sr = stopwords.words('english')
        for token in tokens:
            if token in sr:
                clean_tokens.remove(token)
        return clean_tokens
 
    def word_freq(self, word):
        # 统计词频
        freq = nltk.FreqDist(word)
        freq.plot(20, cumulative=False)
        return freq.most_common(20)
 
    def word_stemmer(self, word):
        # 词干提取
        stemmer = PorterStemmer()
        return stemmer.stem(word)
 
    def word_lemmatizer(self, word, prop):
        # 词形还原
        lemmatizer = WordNetLemmatizer()
        return lemmatizer.lemmatize(word, pos=prop)
 
    # 计算某个字符出现的频率
    def percentage(self, voc, total_text):
        return 100 * total_text.count(voc) / len(total_text)
 
 
if __name__ == '__main__':
    html_path = "D:/Chrome/Dl/python362/library/math.html"
    html = HTML(html_path)
    # 读取HTML文件
    html_text = html.read_file()
    # print("text: " + html_text)
    file_name = os.path.splitext(os.path.split(html_path)[1])[0]
    with open("txts/{}.txt".format(file_name), "w") as f:
        f.write(html_text)
    os.system("pause")
 
    # 文本清洗
    clean_text = html.clean_text(html_text)
    # print("clean_text: " + clean_text)
    # with open("txts/{}.txt".format(file_name), "w", encoding="utf-8") as f:
    #     f.write(clean_text)
    # os.system("pause")

    # 提取token
    tokens = html.take_tokens(clean_text)
    # print("tokens: " + str(tokens))
    # os.system("pause")

    # 删除停用词
    clean_tokens = html.delete_stop(tokens)
    # print("clean_tokens: " + str(clean_tokens))
    # os.system("pause")

    # 统计输出出现频率最高的前20个词
    # fre_list = html.word_freq(clean_tokens)
    # print("Top20: " + str(fre_list))
    # os.system("pause")

    # 词干提取
    stem = html.word_stemmer(str(clean_tokens[4]))
    # print("stem: " + str(clean_tokens[4]) + "——" + stem)
    # os.system("pause")

    # 词形还原
    lemmatizer = html.word_lemmatizer(str(clean_tokens[9]).lower(), 'v')
    # print("lemmatizer: " + str(clean_tokens[9]) + "——" + lemmatizer)
    # os.system("pause")