import scrapy
from ScraDM.items import ScradmItem

class DoubantopSpider(scrapy.Spider):
    # 爬虫名
    name = 'movie'
    # 爬取网站的域名
    allowed_domains = ['movie.douban.com']
    # 入口url
    start_urls = ['https://movie.douban.com/top250']

    def parse(self, response, **kwargs):
        movies_list = response.xpath("//ol[@class='grid_view']/li")
        # 遍历每个电影列表，从其中精准抓取所需要的信息并保存为item对象
        for movie in movies_list:
            item = ScradmItem()
            item['ranking'] = movie.xpath('./div/div[1]/em').extract_first()
            item['name'] = movie.xpath('.//span[@class="title"]/text()').extract_first()
            text = movie.xpath(".//div[@class='bd']/p[1]/text()").extract()
            intro = ""
            for s in text:
                #去掉空格
                intro += "".join(s.split())
                item['introduce'] = intro

            item['comments'] = movie.xpath('./div/div[2]/div[2]/div/span[4]/text()').extract_first()
            item['describe'] = movie.xpath('./div/div[2]/div[2]/p[2]/span').extract_first()
            print(item)
            # 将结果item对象返回给Item管道
            print("888"*20)
            yield item

            # 爬取网页中的下一个页面url信息
            next_link = response.xpath("//span[@class='next']/a[1]/@href").extract_first()
            if next_link:
                next_link = "https://movie.douban.com/top250" + next_link
                print(next_link)
                # 将Request请求提交给调度器
                yield scrapy.Request(next_link, callback=self.parse)
