import pandas as pd
import requests
from bs4 import BeautifulSoup
'''
用途：
对批量的英语文本，生成英语-汉语翻译的单词本，提供Excel下载
'''
def fanyi():
    # 注意：stardict.csv的地址需要替换成你自己的文件地址
    url = "https://pandas.pydata.org/docs/user_guide/indexing.html"
    res = requests.get(url).text
    # print(res[:100])
    # 3. 提取HTML的正文内容
    soup = BeautifulSoup(res)
    soup_text = soup.get_text()
    # print(soup_text[:500])

# 读取停用词表，从网上复制的，位于当前目录下
    # 4. 英文分词和数据清洗
    import re
    word_list = re.split("""[ ,.\(\)/\n|\-:=\$\["']""",soup_text)
    # print(word_list[:10])
    with open("../datas/stop_words/stop_words.txt") as fin:
        stop_words=set(fin.read().split("\n"))
    list(stop_words)[:10]
    # 数据清洗
    # 数据清洗
    word_list_clean = []
    for word in word_list:
        word = str(word).lower().strip()
        # 过滤掉空词、数字、单个字符的词、停用词
        if not word or word.isnumeric() or len(word)<=1 or word in stop_words:
            continue
        word_list_clean.append(word)
    res = word_list_clean[:20]
    print(res)

    # 5. 分词结果构造成一个DataFrame
    df_words = pd.DataFrame({"word":word_list_clean})
    # print(df_words.head())
    df_words = (df_words.groupby('word')['word'].agg(count="size").reset_index().sort_values(by="count",ascending=False))
    # 统计词频
    # df_words = (
    #     df_words
    #         .groupby("word")["word"]
    #         .agg(count="size")
    #         .reset_index()
    #         .sort_values(by="count", ascending=False)
    # )
    print(df_words.head())
    # 6. 和单词词典实现merge
    df_dict = pd.read_csv(r"C:\Users\huangziyan\Downloads\ECDICT-master\stardict.csv")
    df_merge = pd.merge(left=df_dict,right=df_words,on="word")
    df_merge.to_excel("./file/word.xlsx")

if __name__ == '__main__':
    fanyi()
    # read_txt()