'''
request 请求：
    url：http://www.521609.com/daxuexiaohua/list31.html
         http://www.521609.com/daxuexiaohua/list32.html

    请求方法：GET
    请求头：
        cookie
        user-agent Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36
        referer
    请求体


response
    状态响应码
        200 成功
        302 重定向
    响应头
'''
from requests_html import HTMLSession
import os
import re


class spider():
    def __init__(self):
        self.session = HTMLSession()
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36'
        }

    def get_index_url(self):
        for i in range(1, 4):
            if i == 1:
                yield 'http://www.521609.com/daxuexiaohua/'
            else:
                yield 'http://www.521609.com/daxuexiaohua/list3%s.html' % i

    def get_img_name(self, index_url):
        r = self.session.get(url=index_url, headers=self.headers)
        r.encoding = 'GBK'
        elements_list = r.html.find('.index_img a')
        print(elements_list, "123")
        for element in elements_list:
            img_url_frist = element.attrs['href']
            # img_url_frist = '/daxuexiaohua/11112.html'
            print(img_url_frist[:-5])
            # 照片列表
            img_url = 'http://www.521609.com' + img_url_frist
            r1 = self.session.get(url=img_url, headers=self.headers)
            r1.encoding = 'GBK'
            img_down = r1.html.find('.picbox img')
            print(img_down)
            img_upload = img_down[0].attrs['src']
            img_upload = 'http://www.521609.com' + img_upload
            # 照片名字
            img_name = img_down[0].attrs['alt']
            print(img_name)
            print(img_upload, "images")
            yield 1, img_upload, img_name
            if r1.html.find('.dede_pages ol>li', first=True):
                # 提取内部页面总数
                if len(r1.html.find('.dede_pages ol>li', first=True).text) > 4:
                    img_num = re.findall(r"\d\d", r1.html.find('.dede_pages ol>li', first=True).text)
                else:
                    img_num = re.findall(r"\d", r1.html.find('.dede_pages ol>li', first=True).text)
                for i in range(2, int(img_num[0]) + 1):
                    print("=========================================================")
                    # 第一张的照片
                    img_is = 'http://www.521609.com' + img_url_frist[:-5] + '_' + str(i) + '.html'
                    print(img_is, "=========================os")
                    r1 = self.session.get(url=img_is, headers=self.headers)
                    r1.encoding = 'GBK'
                    img_down = r1.html.find('.picbox img')
                    print(img_down)
                    img_upload = img_down[0].attrs['src']
                    img_upload = 'http://www.521609.com' + img_upload
                    # 照片名字
                    img_name = img_down[0].attrs['alt']
                    print(img_name)
                    print(img_upload, "images")
                    yield i, img_upload, img_name
                    print(i)
            # 单个照片的详细信息

    def save_img(self, img_num, img_url, img_name):
        r = self.session.get(url=img_url)
        r.encoding = 'GBK'
        img_mkdir = "111/{}".format(img_name)
        if not os.path.exists(img_mkdir):
            os.mkdir(img_mkdir)
        # 新建一个存放图片的文件夹名叫‘校花图片’
        img_path = os.path.join(img_mkdir, "{}.jpg".format(img_num))
        with open(img_path, 'wb') as f:
            f.write(r.content)
            print('%s下载完毕' % img_name)

    def run(self):
        for index_url in self.get_index_url():
            for img_num, img_url, img_name in self.get_img_name(index_url):
                self.save_img(img_num, img_url, img_name)


if __name__ == '__main__':
    xiaohua = spider()
    xiaohua.run()
