# -*- coding: utf-8 -*-
import scrapy

from ArticleSpider.items import DoubanTop250Item
class DoubanTop250Spider(scrapy.Spider):
    name = 'douban_top250'
    allowed_domains = ['movie.douban.com']
    start_urls = ['https://movie.douban.com/top250']

    def parse(self, response):
        # 定位
        all_list = response.xpath("//div[@class='article']//ol//li/text()").extract()

        li_list =  response.css("div.article ol.grid_view li")
        # print(len(all_list))
        # print(response.text)

        for li in li_list:
            douban_item = DoubanTop250Item()
            # pass
            douban_item['serial_num'] = li.css("em::text").extract_first("")

            douban_item['movie_name'] = li.css("div.info div.hd span.title::text").extract_first("")

            douban_item['introduce'] = ''.join(li.css("div.info div.bd p::text").extract_first("").split())

            douban_item['star'] = li.css("div.star span.rating_num::text").extract_first("")

            douban_item['describe'] = li.css("div.info > div.bd > p.quote > span::text").extract_first("")

            douban_item['comment'] = li.css("div.info > div.bd > div > span::text").extract_first("")

            #
            yield douban_item

        # 下一页
        next_page = response.css("div.article > div.paginator > span.next > link::attr(href)").extract_first("")
        if next_page:
            yield scrapy.Request('https://movie.douban.com/top250/'+next_page, callback=self.parse)


