import json
import scrapy
import urllib.parse
from douban_spider.items import DoubanSpiderItem


class DoubanSpider(scrapy.Spider):
    name = "douban"
    allowed_domains = ["movie.douban.com"]
    # https://movie.douban.com/j/chart/top_list?type=13&interval_id=100%3A90&action=&start=0&limit=20
    # https://movie.douban.com/j/chart/top_list?type=13&interval_id=100%3A90&action=&start=20&limit=20
    # type=11
    # interval_id=100:90
    # 喜剧、动作、爱情、科幻
    movie_category = ['24','5','13','17']

    def start_requests(self):
        cookie_str = '_vwo_uuid_v2=D4561490FBB976292457BEC436C9EB29C|749f7262dcf561efc7f4ad06dfb9a200; bid=SMzGW0d9Oqg; ll="118281"; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1713776060%2C%22https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3DG_1C13A25AQtk4EdamXgOoOoCibBAFKku0xP4__Vn0iVNakFA1JHlgrh0i3C0kRr%26wd%3D%26eqid%3D87ac4a1b0004f9a300000004662625b7%22%5D; _pk_id.100001.4cf6=6db665b36b6c53ac.1713776060.; _pk_ses.100001.4cf6=1; ap_v=0,6.0; __utma=30149280.1891444727.1713776060.1713776060.1713776060.1; __utmb=30149280.0.10.1713776060; __utmc=30149280; __utmz=30149280.1713776060.1.1.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utma=223695111.256148559.1713776060.1713776060.1713776060.1; __utmb=223695111.0.10.1713776060; __utmc=223695111; __utmz=223695111.1713776060.1.1.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __yadk_uid=zGFDqHexvf3HvOFpP4hiLGVdEllXvfuV; __gads=ID=0704c761f4414a56:T=1713776061:RT=1713776061:S=ALNI_MZTpirLgGWwHTGwgplS9nq7P1iGnQ; __gpi=UID=00000df5cb9cf4de:T=1713776061:RT=1713776061:S=ALNI_MbOZvBRVjDUX3VcBGT0mWoNHCtxKw; __eoi=ID=dc6cf29253ecf097:T=1713776061:RT=1713776061:S=AA-AfjaDxN9rqX_Pq1JWP6BlcZ3l'
        cookie_dic = {cookie_str.strip('=')[0]: cookie_str.strip('=')[-1] for i in cookie_str.split('; ')}
        for category in self.movie_category:
            for i in range(1, 4):
                url = f'https://movie.douban.com/j/chart/top_list?type={category}&interval_id=100%3A90&action=&start={i * 20}&limit=20'
                yield scrapy.Request(url=url, callback=self.parse, cookies=cookie_dic)

    def parse(self, response):
        # print("网页状态:",response.status)
        # print('网页编码',response.encoding)
        # print('网页内容:',json.loads(response.text))
        item = response.meta.get('item')
        datas = json.loads(response.text)
        for data in datas:
            item = DoubanSpiderItem()
            # 电影名
            item['title'] = data['title']
            # 演员只取前4个
            item['actors'] = ','.join(data['actors'][:4])
            # 电影类型
            item['types'] = ','.join(data['types'])
            # 电影上映地
            item['regions'] = ','.join(data['regions'][:1])
            # 电影评分
            item['score'] = data['score']
            # 电影图片
            item['cover_url'] = data['cover_url']
            # 电影上映日
            item['release_date'] = data['release_date']
            url = data['url']

            # 翻页爬取电影的详情内容
            yield scrapy.Request(
                url=url,
                callback=self.parse_main,
                meta={
                    'item': item
                }
            )

    def parse_main(self, response):
        item = response.meta['item']
        # 电影片长
        item['run_time'] = response.xpath('//span[@property="v:runtime"]/@content').extract_first()
        # 电影评价数
        item['review_number'] = response.xpath('//span[@property="v:votes"]/text()').extract_first()
        # 电影内容
        item['content'] = response.xpath('//span[@property="v:summary"]/text()').extract_first()
        yield item
