import scrapy
# 引入可直接写相对路径 ..表示上级目录 此处items在当前文件的上一级
from ..items import DoubanItem


def replace_special_str(strings):
    """
    替换特殊字符
    @param strings:
    @return:
    """
    return strings.replace('&nbsp;', '').replace("\n", '').replace('\xa0', '').replace(' ', '')


class DoubanSpiderSpider(scrapy.Spider):
    name = 'douban_spider'
    allowed_domains = ['movie.douban.com']
    start_urls = ['https://movie.douban.com/top250']

    def parse(self, response, **kwargs):
        movie_list = response.xpath("//div[@class='article']//ol//li")

        for m_list in movie_list:
            douban_item = DoubanItem()
            douban_item['ids'] = m_list.xpath(".//div[@class='pic']//em/text()").extract_first()

            name = m_list.xpath(".//div[@class='info']//div[@class='hd']//a//text()").extract()
            name = ''.join(name)
            name = replace_special_str(name)
            douban_item['name'] = name

            people = m_list.xpath("normalize-space(.//div[@class='info']//div[@class='bd']//p//text())").extract_first()
            people = replace_special_str(people)

            douban_item['people'] = people

            douban_item['start'] = m_list.xpath(
                ".//div[@class='star']//span[@class='rating_num']/text()").extract_first()

            douban_item['comment'] = m_list.xpath(".//div[@class='star']//span[last()]/text()").extract_first()

            douban_item['describe'] = m_list.xpath(".//div[@class='bd']//span[@class='inq']/text()").extract_first()

            yield douban_item

        next_link = response.xpath("//div[@class='paginator']//span[@class='next']//a//@href").extract_first()
        if next_link:
            next_link = 'https://movie.douban.com/top250' + next_link
            yield scrapy.Request(next_link, callback=self.parse)

        # 执行 scrapy crawl douban_spider即可爬取数据，并保存到mongodb数据库
        # 执行 scrapy crawl douban_spider -o movie.json 还可以保存为json数据或者 -o movie.csv 保存为csv格式
