import scrapy
from fake_useragent import UserAgent
from scrapy import Selector, Request
from scrapy.crawler import CrawlerProcess
from scrapy.http import HtmlResponse
from scrapy.utils.project import get_project_settings
import openpyxl
import os


# 电影的数据项类
class MovieItem(scrapy.Item):
    title = scrapy.Field()
    rank = scrapy.Field()
    subject = scrapy.Field()


# 重写数据管道
class MoviePipeline1:
    def __init__(self):
        self.wb = openpyxl.Workbook()
        self.ws = self.wb.active
        self.ws.title = 'Movie Top250'
        self.ws.append(('标题', '评分', '主题'))

    # 关闭爬虫时保存
    def close_spider(self, spider):
        self.wb.save('k1电影数据.xlsx')

    def process_item(self, item, spider):
        self.ws.append((item['title'], item['rank'], item['subject']))
        return item

# 添加Cookie



# 爬虫获取网页并解析类
class DoubanSpider(scrapy.Spider):
    name = os.path.basename(__file__).replace('.py', '')
    allowed_domains = ['movie.douban.com']
    # start_urls = ['https://movie.douban.com/top250']

    # 覆盖配置文件
    custom_settings = {
        'USER_AGENT': str(UserAgent().random),
        'ROBOTSTXT_OBEY': True,
        'CONCURRENT_REQUESTS': 2,
        'RANDOMIZE_DOWNLOAD_DELAY': True,
        'DOWNLOAD_DELAY': 3,
        # 配置管道
        'ITEM_PIPELINES': {
            f'{name}.MoviePipeline1': 1  # 数字越小越先执行
        }
        # 配置中间件（拦截请求）
        # 'DOWNLOADER_MIDDLEWARES': {
        #     'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
        # }
    }

    # 开始我的请求,构建请求列表
    def start_requests(self):
        for page in range(10):
            yield Request(url=f'https://movie.douban.com/top250?start={page * 25}&filter=')

    # 解析页面的代码写此处
    def parse(self, response: HtmlResponse, **kwargs):
        sel = Selector(response)
        list_items = sel.css('#content > div > div.article > ol > li')
        for list_item in list_items:
            move_item = MovieItem()
            move_item['title'] = list_item.css('span.title::text').extract_first()
            move_item['rank'] = list_items.css('span.rating_num::text').extract_first()
            move_item['subject'] = list_item.css('span.inq::text').extract_first()
            # print(move_item)
            yield move_item

        # # 解析页码
        # href_list = sel.css('div.paginator > a::attr(href)')
        # for href in href_list:
        #     url = response.urljoin(href.extract())
        #     # print(url)
        #     yield Request(url=url)


if __name__ == '__main__':
    # 启动爬虫
    process = CrawlerProcess(get_project_settings())
    process.crawl(DoubanSpider)
    process.start()

    # # 获取settings.py模块的设置
    # settings = get_project_settings()
    # # 使用随机代理
    # settings.setdict({'USER_AGENT': str(UserAgent().random)})
    # # 遵守Robot规则
    # settings.setdict({'ROBOTSTXT_OBEY': True})
    # # 设置最大并发
    # settings.setdict({'CONCURRENT_REQUESTS': 2})
    # # 打开随机化
    # settings.setdict({'RANDOMIZE_DOWNLOAD_DELAY': True})
    # # 设置下载延迟
    # settings.setdict({'DOWNLOAD_DELAY': 3})
    #
    # # settings.setdict({'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36'})
    # process = CrawlerProcess(settings=settings)
    #
    # # 可以添加多个spider
    # process.crawl(DoubanSpider)
    #
    # # 启动爬虫，会阻塞，直到爬取完成
    # process.start()

    # 获取当前文件名
    # print(os.path.basename(__file__))

    # 启动爬虫
    # cmdline.execute(f'scrapy runspider {os.path.basename(__file__)}'.split())
