import scrapy
from ..items import *


class DoubanSpider(scrapy.Spider):
    name = 'douban'
    allowed_domains = ['douban.com']
    start_urls = []
    for i in range(5):
        urls = f'https://movie.douban.com/top250?start={int(i)*25}'
        start_urls.append(urls)
    start_urls = ['https://movie.douban.com/top250/']


    def parse(self, response):

        movie_name = response.xpath("//div[@class='item']//a/span[1]/text()").extract()
        movie_core = response.xpath("//div[@class='star']/span[2]/text()").extract()
        for t, s in zip(movie_name, movie_core):
            # yield {
            #     'movie_name': t,
            #     'movie_core': s,
            # }
            movie = DoubanItem()
            movie['movie_name'] = t
            movie['movie_core'] = s
            yield movie

        # my_scrapy crawl douban -o save_data.json -t json
