# -*- coding: utf-8 -*-

import scrapy
from doubantop.items import DoubantopItem


class DoubantopSpiderSpider(scrapy.Spider):
    # 爬虫名
    name = 'doubantop_spider'
    # 允许的域名
    allowed_domains = ['movie.douban.com']
    # 入口url,扔到调度器去
    start_urls = ['https://movie.douban.com/top250']

    def parse(self, response):
        # 循环电影的条目
        movie_list = response.xpath(
            "//div[@class='article']//ol[@class='grid_view']/li")
        for i_item in movie_list:
            # 把item文件导进来
            douban_item = DoubantopItem()
            # 写详细的xpath，进行数据的解析
            douban_item['serial_number'] = i_item.xpath(
                ".//div[@class='pic']/em/text()").extract_first()
            douban_item['movie_name'] = i_item.xpath(
                ".//div[@class='hd']/a/span[1]/text()").extract_first()
            # information的处理
            content = i_item.xpath(
                ".//div[@class='bd']/p[1]/text()").extract()  # 获取完整内容，而不是部分参数
            for i_content in content:
                douban_item['information'] = "".join(i_content.split())
            douban_item['star'] = i_item.xpath(
                ".//span[@class='rating_num']/text()").extract_first()
            douban_item['evaluate'] = i_item.xpath(
                ".//div[@class='star']/span[4]/text()").extract_first()
            douban_item['description'] = i_item.xpath(
                ".//p[@class='quote']/span/text()").extract_first()
            # 将数据传到item pipelines
            yield douban_item
        # 解析下一页的规则，取后页的xpath
        next_link = response.xpath(
            "//span[@class='next']/link/@href").extract()
        if next_link:
            next_link = next_link[0]
            yield scrapy.Request("https://movie.douban.com/top250" + next_link,
                                 callback=self.parse)
