#!/usr/bin/env python
# encoding: utf-8

import scrapy
from ..items import MaoyanreyingItem


class MaoyanSpider(scrapy.Spider):
    # 项目的名称
    name = 'maoyan_spider'
    # 允许爬取的域名，比如一些网站有相关链接，域名就和本网站不同，这些就会忽略
    allowed_domains = ['maoyan.com']
    # 定义初始的请求url，可以多个
    start_urls = ['https://maoyan.com/board/']

    def parse(self, response):
        """
        是spider的一个方法，在请求start_urls后执行的方法，
        这个方法是对网页的解析，与提取自己想要的东西。
        :param response: 请求网页后返回的内容，也即要解析的网页
        :return:
        """
        # css解析方式
        # dl = response.css('.board-wrapper dd')
        # for dd in dl:
        #     item = MaoyanreyingItem()
        #     item['index'] = dd.css('.board-index::text').extract_first()
        #     item['title'] = dd.css('.name a::text').extract_first()
        #     item['star'] = dd.css('.star::text').extract_first()
        #     item['release_time'] = dd.css('.releasetime::text').extract_first()
        #     item['score'] = dd.css('.integer::text').extract_first() + dd.css('.fraction::text').extract_first()
        #     yield item

        # xpath解析方式
        dl = response.xpath(".//*[@class='board-wrapper']/dd")
        for dd in dl:
            item = MaoyanreyingItem()
            item['index'] = dd.xpath(".//*[contains(@class,'board-index')]/text()").extract_first()
            item['title'] = dd.xpath(".//*[@class='name']/a/text()").extract_first()
            item['star'] = dd.xpath(".//*[@class='star']/text()").extract_first().strip()
            item['release_time'] = dd.xpath(".//*[@class='releasetime']/text()").extract_first()
            item['score'] = dd.xpath(".//*[@class='integer']/text()").extract_first() + dd.xpath(".//*[@class='fraction']/text()").extract_first()
            yield item
