# -*- coding: utf-8 -*-
# coding: UTF-8
import logging
from scrapy.spiders import CrawlSpider
from scrapy.selector import Selector
from scrapy.http import Request

from ..items import DbdyItem


class dbdySpider(CrawlSpider):
    name = 'dbdySpider'
    # host = 'https://movie.douban.com/'
    host = 'https://movie.douban.com/top250%s'
    start_urls = [
        ''
    ]
    logging.getLogger("requests").setLevel(logging.WARNING)  # 将requests的日志级别设成WARNING
    logging.basicConfig(
        level=logging.DEBUG,
        format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
        datefmt='%a, %d %b %Y %H:%M:%S',
        filename='cataline.log',
        filemode='w')

    # test = True
    def start_requests(self):
        for db_type in self.start_urls:
            yield Request(url=self.host % db_type,
                          callback=self.parse_db_key)

    def parse_db_key(self, response):
        sel = Selector(response)
        logging.debug('request url:------>' + response.url)
        divs = sel.xpath('//div[@class="item"]')

        for div in divs:
            dbItem = DbdyItem()
            dbItem['rank'] = div.xpath('div[@class="pic"]/em/text()').extract()
            dbItem['name'] = div.xpath('div[@class="pic"]/a/img/@alt').extract()
            yield dbItem

        next_page = sel.xpath('//span[@class="next"]/a/@href')
        if next_page:
            url = response.urljoin(next_page[0].extract())
            yield Request(url, self.parse_db_key)
