import scrapy
from scrapy.spiders.crawl import CrawlSpider
from meizitu.items import MeizituItem


class mezitu(CrawlSpider):
    name = "meizitu"
    allowed_domains = ['meizitu.com']
    start_urls = ['http://meizitu.com/a/more_%s.html']

    # 要爬取的图片URL规则
    def start_requests(self):
        url = 'http://meizitu.com/a/more_%s.html'
        page = self.readFile()
        if page is None:
            page = "1"
        yield scrapy.Request(url=url % page, callback=self.parse)
        page = int(page)
        page += 1
        self.writeFile(str(page))

    # 解析每个页面中的主题URL
    def parse(self, response):
        urls = response.xpath("//div[@class='pic']/a/@href").extract()
        if urls is not None:
            for url in urls:
                yield scrapy.Request(url, callback=self.detail)

    # 下载每个主题URL的所有图片
    def detail(self, response):
        item = MeizituItem()
        name = response.xpath("//div[@class='metaRight']/h2/a/text()").extract_first()
        urls = response.xpath("//div[@id='picture']/p/img/@src").extract()
        item['img_dir'] = name
        item['url'] = []
        for url in urls:
            item['url'].append(url)
        print(item['img_dir'])
        print(item['url'])
        yield item
        # 读取文件页数

    def readFile(self):
        read = open("E:\PythonProject\page.txt")
        page = read.read()
        read.close()
        return page

    # 重写页数
    def writeFile(self, page):
        write = open("E:\PythonProject\page.txt", 'w')
        write.write(page)
        write.close()
