# -*- coding: utf-8 -*-
import scrapy
from gakkiDouban.items import GakkidoubanItem
import requests


class ImagesSpider(scrapy.Spider):
    name = 'images'
    allowed_domains = ['movie.douban.com']
    # Gakki
    # start_urls = ['https://movie.douban.com/celebrity/1018562/photos/?type=C&start=0&sortby=like&size=a&subtype=a']
    # Choi
    # start_urls = ['https://movie.douban.com/celebrity/1314646/photos/?type=C&start=0&sortby=like&size=a&subtype=a']

    # Hamabe_Minami
    # start_urls = ['https://movie.douban.com/celebrity/1324483/photos/?type=C&start=0&sortby=like&size=a&subtype=a']
    # Asuka
    start_urls = ['https://movie.douban.com/celebrity/1329640/photos/']
    # Hori
    # start_urls = ['https://movie.douban.com/celebrity/1351608/photos/']
    # Mai
    # start_urls = ['https://movie.douban.com/celebrity/1339098/photos/']

    def parse(self, response):

        root = response.css('.article')
        item = GakkidoubanItem()
        item['page'] = root.css('.thispage::text').extract_first()
        urls = root.css('.cover')
        for uri in urls:
            item['url'] = uri.css('img::attr("src")').extract()[0]
            yield item

        next_url = response.css('.paginator .next a::attr("href")').extract_first()
        url = response.urljoin(next_url)
        yield scrapy.Request(url=url, callback=self.parse)
