import requests
from lxml import etree
import parsel
import os       # 创建文件夹


for page in range(1, 20):
    print(f'====================开始爬取第{page}页=============================')
    url = f'https://www.kanxiaojiejie.com/page/{page}'
    resp = requests.get(url)


    # et = etree.HTML(resp.text)
    selector = parsel.Selector(resp.text)
    # articles = selector.css('.entry-title > a').getall()
    url_list = selector.css('article > div > h2 > a::attr(href)').getall()
    titles = selector.css('article > div > h2 > a::text').getall()
    # print(url_list)
    # print(title)
    for zip_data in zip(url_list, titles):
        url = zip_data[0]       # 连接
        title = zip_data[1]     # 标题

        # 发送详情页连接
        resp_1 = requests.get(url)

        # 提取数据
        data_html= resp_1.text
        selector_1 = parsel.Selector(data_html)
        img_src_list = selector_1.css('div.entry-content > div > p > img::attr(src)').getall()
        # print(img_src_list)

        if not os.path.exists('img/'+title):
            os.mkdir('img/'+title)
        for img_url in img_src_list:
            # print(img_url)
            img_data = requests.get(img_url).content

            # 保存数据  ————    每一组图片都要创建一个文件夹保存对应的照片
            img_neme = img_url.split('/')[-1]
            with open(f'img/{title}/{img_neme}', mode='wb')as f:
                f.write(img_data)

            print(img_neme, '成功爬取！！！')


    print(f'=====================================第{page}页爬取完成！============================')













