# -*- coding: utf-8 -*-
import scrapy
from myspider.items import MeizituItem
import requests
import os
import re


class MeizituSpider(scrapy.Spider):
    name = 'meizitu'
    custom_settings = {'DOWNLOAD_DELAY': 0.5}
    allowed_domains = ['mzitu.com']
    start_urls = ['https://www.mzitu.com/']
    dirs = r'E:\图片\xuexi'

    def start_requests(self):
        page = 1
        while True:
            url = self.start_urls[0] + 'page/' + str(page) + '/'
            yield scrapy.Request(url, callback=self.parse)
            page += 1
            break

    def parse(self, response: scrapy.http.response.Response):
        # links = response.xpath('//*[@id="pins"]/li/a/@href').extract()
        # title = response.xpath('//*[@id="pins"]/li/span[1]/a/text()').extract()
        title_links = response.xpath('//*[@id="pins"]/li/span[1]')
        for title_links_item in title_links:
            title = title_links_item.xpath('a/text()').extract_first()
            link = title_links_item.xpath('a/@href').extract_first()
            rstr = r"[\/\\\:\*\?\"\<\>\|]"
            title = re.sub(rstr, '_', title)
            dir_path = os.path.join(self.dirs, title)
            os.mkdir(dir_path)
            meta = {'dir_path': dir_path}
            yield scrapy.Request(url=link, callback=self.page_parse, meta=meta)

    def page_parse(self, response: scrapy.http.response.Response):
        page_max = response.xpath('/html/body/div[2]/div[1]/div[4]/a[5]/span/text()').extract_first()
        for page in range(1, int(page_max) + 1):
            url = response.url + '/' + str(page)
            meta = response.meta
            meta['page'] = page
            yield scrapy.Request(url=url, callback=self.img_parse, meta=meta)

    def img_parse(self, response: scrapy.http.response.Response):
        img_url = response.xpath('/html/body/div[2]/div[1]/div[3]/p/a/img/@src').extract_first()
        headers = {'referer': response.url,
                   'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36'}
        dir_path = response.meta['dir_path']
        page = response.meta['page']
        r = requests.get(img_url, headers=headers)
        with open(os.path.join(dir_path, str(page) + '.jpg'), 'wb') as f:
            f.write(r.content)
