from sys import modules
from typing import Counter
import scrapy
import sys
sys.path.append("/Users/zxkkk/gitrepo/spider/douban")

from douban.items import DoubanItem
class MovieSpider(scrapy.Spider):
    name = 'movie'#爬虫名称
    allowed_domains = ['movie.douban.com/top250']
    start_urls = ['http://movie.douban.com/top250/']#开始爬取的地址

    def parse(self, response):
        movie_list = response.xpath("//*[@id='content']/div/div[1]/ol/li")
        for i_item in movie_list:
            item = DoubanItem()
            item["serial_number"] = i_item.xpath(".//div[@class='pic']/em/text()").extract_first("")
            item["movie_name"] = i_item.xpath(".//div[@class='hd']//span[@class='title']/text()").extract_first("")
            intros = i_item.xpath(".//div[@class='bd']/p[1]/text()").extract()
            for intro in intros:
                intro_str = "".join(intro.split())
            item["movie_introduce"] = intro_str
            item["star"] =i_item.xpath(".//div[@class='star']//span[@class='rating_num']/text()").extract_first("")
            item["evaluate_number"] = i_item.xpath(".//div[@class='star']//span[4]/text()").extract_first("")
            item["describe"] = i_item.xpath(".//p[@class='quote']/span/text()").extract_first("")
            # print(item)
            yield(item)
        next_link  = response.xpath("//*[@id='content']//span[@class='next']/a/@href").extract()
        if next_link:
            link = next_link[0] #得到第一个链接
            yield scrapy.Request("https://movie.douban.com/top250"+link,callback=self.parse,dont_filter=True)
