import hashlib
import os
import sys
from pathlib import Path
from typing import Iterable

import scrapy
from scrapy import Request
from scrapy.cmdline import execute
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait

from douban.items import MovieItem

class MovieSpider(scrapy.Spider):
    name = "movie"
    allowed_domains = ["movie.douban.com"]
    def start_requests(self) -> Iterable[Request]:
        for page in range(1):
            yield scrapy.Request(f'https://movie.douban.com/top250?start={page * 25}&filter=')

    def parse(self, response):
        list = response.xpath('//div[@id="content"]/div/div[1]/ol/li')
        for i in list:
            movie = MovieItem()
            movie['img_src'] = i.css('div.pic > a > img::attr(src)').extract_first()
            movie['title'] = i.css('span.title::text').extract_first()
            movie['quote'] = i.css('span.inq::text').extract_first()
            movie['score'] = i.css('span.rating_num::text').extract_first()
            movie['comments'] = i.css('div.star > span::text')[-1].extract()
            movie['remark'] = i.css('div.bd > p::text').extract_first()
            movie['href'] = response.urljoin(i.css('div.info > div.hd > a::attr(href)').extract_first())

            yield Request(movie['href'], callback=self.parse_detail, cb_kwargs={'movie':movie})

    def parse_detail(self, response, **kwargs):
        yield kwargs['movie']

if __name__ == '__main__':
    #添加当前项目的绝对地址
    sys.path.append(os.path.dirname(os.path.abspath(__file__)))
    #执行 scrapy 内置的函数方法execute，  使用 crawl 爬取并调试，最后一个参数jobbole 是我的爬虫文件名
    execute(['scrapy', 'crawl', 'movie'])


