# -*- coding: utf-8 -*-
import scrapy

# 导入要保存的数据项类 MaoyanItem
from ..items import MaoyanItem


class MaoyanSpider(scrapy.Spider):
    name = 'maoyan'
    allowed_domains = ['maoyan.com']
    # start_urls = ['http://maoyan.com/']
    start_urls = ['https://maoyan.com/board/4?offset=0']
    offset = 0   # 查询字符串中的offset的值

    def parse(self, response):
        # 用xpath 得到 页面中所有的 dd 列表
        #<dl class="board-wrapper">
                # <dd>
        dd_list = response.xpath('//dl[@class="board-wrapper"]/dd')
        # 依次遍历dd 列表
        for dd in dd_list:
            # 创建一个MaoyanItem对象
            item = MaoyanItem()
            item['name'] = dd.xpath('./a/@title').get()
            item['stars'] = dd.xpath('./div/div/div[1]/p[2]/text()').get().strip()
            # <p class="releasetime">
            item['atime'] = dd.xpath('./div/div/div[1]/p[3]/text()').get()
            yield item  # 交给管道

        # 上一页完成后,计算下一页链接交给调度器
        self.offset += 10   # ?offset={}
        if self.offset < 100:
            url = 'https://maoyan.com/board/4?offset={}'.format(self.offset)
            # 交给调取器入队列
            yield scrapy.Request(
                url, callback=self.parse)
