import requests
from lxml import etree
from concurrent.futures import ThreadPoolExecutor

headers = {
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36",
}


def save(word):
    with open("word.txt", "a+", encoding="utf-8") as fp:
        fp.write(word + "\n")
        print(word, "已经添加成功")


def get_detail(url):
    html = requests.get(url=url, headers=headers).text
    soup = etree.HTML(html)
    li_list = soup.xpath('//ul[@class="columns2 bL"]/li')
    for li in li_list:
        try:
            word = li.xpath('a/text()')[0]
            with ThreadPoolExecutor(max_workers=20) as p:
                p.submit(save, word)
        except:
            print("word获取有问题")


def main(url):
    global href
    html = requests.get(url=url, headers=headers).text
    soup = etree.HTML(html)
    li_list = soup.xpath('//ul[@class="columns2 bL"]/li')
    with ThreadPoolExecutor(max_workers=30) as p:
        for li in li_list:
            try:
                href = li.xpath('a/@href')[0]
                p.submit(get_detail, href)
            except:
                print("href获取有问题")


if __name__ == '__main__':
    start_url_list = [f"https://www.collinsdictionary.com/browse/english/words-starting-with-{chr(i)}" for i in
                      range(ord("a"), ord("z") + 1)]
    with ThreadPoolExecutor(max_workers=20) as p:
        for url in start_url_list:
            p.submit(main, url)
