import re

import scrapy
from fake_headers import Headers
from ..items import DouBanMovies


class DouBanSpider(scrapy.Spider):
    name = 'DouBanSpider'
    allowed_domains = ['douban.com']

    # 自定义设置，只对当前爬虫有效
    custom_settings = {
        "DOWNLOAD_DELAY": 1,
        "ITEM_PIPELINES": {
            'ScrapyDemo.pipelines.DouBanMoviesPipeline': 300,
        }
    }

    def __init__(self):
        super(DouBanSpider, self).__init__()
        # 随机生成请求头，将爬虫程序伪装成浏览器
        self.header = Headers(
            os='win'
            , browser='chrome'
            , headers=True
        )
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36"
        }
        self.custom_header = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'Accept-Encoding': 'gzip, deflate, br',
            'Accept-Language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',
            'Cache-Control': 'max-age=0',
            'Connection': 'keep-alive',
            'Cookie': 'douban-fav-remind=1; gr_user_id=9756402d-8c22-4857-9b68-cd20882358f4; bid=u3CfAaEoENs; viewed="2129650"; ll="118183"; _vwo_uuid_v2=DCA2874595BA46E318C0B1806FBDF09DB|de99a6d8d465d220b60306522c4fe8fe; __utmz=30149280.1648706973.15.4.utmcsr=google|utmccn=(organic)|utmcmd=organic|utmctr=(not%20provided); __utmz=223695111.1648706973.7.2.utmcsr=google|utmccn=(organic)|utmcmd=organic|utmctr=(not%20provided); __utmc=30149280; __utmc=223695111; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1648714057%2C%22https%3A%2F%2Fwww.google.com.hk%2F%22%5D; _pk_ses.100001.4cf6=*; __utma=30149280.1006018540.1550385442.1648706973.1648714057.16; __utmb=30149280.0.10.1648714057; __utma=223695111.1006152872.1603198199.1648706973.1648714057.8; __utmb=223695111.0.10.1648714057; ap_v=0,6.0; _pk_id.100001.4cf6=b9346b6fa7a74df8.1603198199.8.1648715891.1648708991.',
            'Host': 'movie.douban.com',
            'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="99", "Google Chrome";v="99"',
            'sec-ch-ua-mobile': '?0',
            'sec-ch-ua-platform': '"Windows"',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'none',
            'Sec-Fetch-User': '?1',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.84 Safari/537.36'
        }

    def start_requests(self):
        url_format = 'https://movie.douban.com/top250?start={page}&filter='
        for page in range(10):
            url = url_format.format(page=page * 25)
            yield scrapy.Request(url=url, callback=self.parse, headers=self.headers)

    def parse(self, response, **kwargs):
        # 使用xpath解析html
        li_list = response.xpath("//ol[@class='grid_view']/li")
        if len(li_list) == 25:
            for li in li_list:
                hd_div_a = li.xpath(".//div[@class='hd']/a")
                id = hd_div_a.xpath("@href").get().split("/")[-2]
                title = re.sub('\s*', '', hd_div_a.xpath("string(.)").get())
                p_text = li.xpath(".//div[@class='bd']/p/text()").extract()
                director_actor = p_text[0]
                director_actor_split = director_actor.replace("\n", "").replace(" ", "").replace("\xa0", "|").split("|")
                director = director_actor_split[0]
                actor = director_actor_split[-1]

                year_cry_cat = re.sub("\s+", "", p_text[1]).split("/")
                year = year_cry_cat[0]
                country = year_cry_cat[1]
                category = year_cry_cat[2]
                rate = li.xpath(".//span[@class='rating_num']/text()").get()
                comments = li.xpath(".//div[@class='star']/span[last()]/text()").get()
                quote = li.xpath(".//p[@class='quote']/span/text()").get()
                yield DouBanMovies(id=id
                                   , title=title
                                   , director=director
                                   , actor=actor
                                   , year=year
                                   , country=country
                                   , category=category
                                   , rate=rate
                                   , comments=comments
                                   , quote=quote)
        else:
            print(response.text)
