# coding:utf-8

import pickle
import re
from collections import Counter
from concurrent.futures import ThreadPoolExecutor, as_completed

import requests
from bs4 import BeautifulSoup
from docx import Document


def get_youdao_src(word: str):
    url = r'https://dict.youdao.com/w/eng/{0}'.format(word)
    r = requests.get(url)
    if -1 == r.text.find("error-wrapper"):
        return r.text
    print(word, "可能不是特别规范~")
    return None


def parse_word(src):
    if src:
        word_dict = {}
        try:
            soup = BeautifulSoup(src, 'lxml')
            word = soup.find('span', class_="keyword").text
            # print(word)
            pron_lst = soup.find_all('span', class_="pronounce")
            # for pron in pron_lst:
            #     pron = ''.join(pron.text.split())
            trans = soup.find('div', attrs={"class": "trans-container"}).find_all('li')
            # for tran in trans:
            #     print(tran.text)
            example = soup.find('div', attrs={"class": "examples"})
            # print(example.text)
            word_dict['word'] = word or ''
            word_dict['pron'] = '\t\t'.join(
                pron.text.replace('\r', '').replace('\n', '').replace(' ', '') for pron in pron_lst)
            word_dict['trans'] = '\n'.join(tran.text for tran in trans) or ''
            if example:
                word_dict.setdefault('example', example.text)
        except AttributeError as ae:
            pass
        return word_dict
    else:
        return None


def get_html(url):
    headers = {
        "Accept-Encoding": "Gzip",  # 使用gzip压缩传输数据让访问更快
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
        "Connection": "close"
    }
    requests.adapters.DEFAULT_RETRIES = 5
    # resp = requests.get(url, headers= headers) #requests会报10053错误
    resp = requests.get(url, headers=headers, stream=False, timeout=10)
    html = resp.text
    html = re.sub('\r', '', html)  # 将html文本中非字符数据去掉
    html = re.sub('\n', '', html)  # 将html文本中非字符数据去掉
    return html


def parse_item_page(html):
    '''
    提取含有每条新闻的链接
    '''
    try:
        # 获取链接列表
        ListContent = re.findall(r'"LinkUrl":"(.*?)","', html)
        # print(len(ListContent))
        return ListContent
    except Exception:
        pass


def writefile(html):
    with open('1.txt', mode='w', encoding='utf-8') as f:
        f.write(html)


def parse_article(url):
    WORD_RE = re.compile(r'\W+')
    html = get_html(url)
    # writefile(html)
    title = re.findall(r'<h1 class="Btitle">(.*?)</h1>', html)[0]
    content_list = re.findall(r'<p>(.*?)</p>', html)
    article = title + ''.join(content_list)
    # print (article)
    words = WORD_RE.split(article.lower())
    return Counter(words)


def get_words_count(local=True):
    words_count = Counter()
    if local:
        try:
            with open('word_count.bin', 'rb') as f:
                words_count = pickle.load(f)
        except FileNotFoundError:
            print("word_count.bin not exist")
    else:
        black_words = ['a', 'an', 'and', 'are', 'as', 'at', 'be', 'beijing', 'by', 'china',
                       'chinese', 'dec', 'enditem', 'font', 'for', 'has', 'have', 'he', 'in',
                       'is', 'it', 'id', 'its', 'nov', 'of', 'on', 'one', 's', 'she', 'that',
                       'the', 'this', 'to', 'two', 'u', 'us', 'wang', 'was', 'we', 'will',
                       'with', 'xi', 'xinhua', 'img', '1', 'src', 'jpg', 'via', 'up', 'not', 'jinping',
                       'laos', 'been', 'years', '2021', '19', 'covid', 'taiwan', 'africa', 'countries', 'li', 'i', 'â',
                       '000', '10', '11', 'wuhan', ' ', '10pt', '2', '2020', 'russia', 'asia', 'i', 'african', 'asean',
                       'or', 'but', 'so', 'his']
        with ThreadPoolExecutor(max_workers=10) as executor:
            future_to_word_counts = [executor.submit(parse_article, url) for url in article_urls]
            for future in as_completed(future_to_word_counts):
                try:
                    words_count += future.result()
                except Exception as exc:
                    print("Exception Occur! ")
        for bw in black_words:
            words_count.pop(bw)

        with open('word_count.bin', 'wb') as f:
            pickle.dump(words_count, f)
    return words_count


def saveto_docx(infos):
    document = Document()
    for index, info in enumerate(infos, start=1):
        for k, v in info.items():
            if k == 'word':
                document.add_heading('{0}. {1}'.format(index, v), 0)
            else:
                document.add_paragraph(v)
    document.save('englishwords.docx')


if __name__ == '__main__':
    url = 'http://da.wa.news.cn/nodeart/page?nid=11230449&cnt=1000'
    html = get_html(url)
    article_urls = parse_item_page(html)
    # article_urls = article_urls[10:30]
    words_count = get_words_count(local=True)
    # print(words_count.most_common(50))
    infos = []
    for index, word_freq in enumerate(words_count.most_common(500), start=1):
        word = word_freq[0]
        print('处理第{}个单词{}...'.format(index, word))
        src = get_youdao_src(word)
        info = parse_word(src)
        if info:
            infos.append(info)
        saveto_docx(infos)
