import requests
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor
import os

# url = "https://www.kanxiaojiejie.com/page/1"
dirName = r'D:\爬虫数据\beauty'
if not os.path.exists(dirName):
    os.mkdir(dirName)


def get_url(url):
    # page_html = requests.get(url).text
    page_html = requests.get(url).content.decode()

    return page_html


# print(page_html)
def parse_one_page(url):
    page_html = get_url(url)
    soup = BeautifulSoup(page_html, 'html.parser')
    a_list = soup.find_all('a', class_='entry-thumbnail')
    for a in a_list:
        pic_href = a.get('href')
        # print(pic_href)
        pic_page = get_url(pic_href)
        soup_pic = BeautifulSoup(pic_page, 'html.parser')
        p = soup_pic.find('div', class_='entry themeform').find('p')
        src_list = p.find_all('img')
        for src in src_list:
            src_html = src.get('src')
            src_resp = requests.get(src_html).content
            img_name = src_html.split('/')[-1]
            # print(src_download)
            with open(dirName + '/' + img_name, mode='wb') as f:
                f.write(src_resp)


def main():
    for page in range(1, 16):
        print(f'正在下载第{page}页。。。')
        url = f"https://www.kanxiaojiejie.com/page/{page}"
        parse_one_page(url)
        print(f'第{page}已下载完成！！！')


if __name__ == '__main__':
    # url = "https://www.kanxiaojiejie.com/page/1"
    with ThreadPoolExecutor(50) as t:
        t.submit(main)
    # main()
