# -*- coding: utf-8 -*-
import scrapy
from douban_movie.items import DoubanMovieCommentItem
# 电影id
MOVIE_ID = 26794435

class CommentsSpider(scrapy.Spider):
    name = 'comments'
    allowed_domains = ['movie.douban.com']
    start_urls = ['https://movie.douban.com/subject/%d/comments?status=P' % MOVIE_ID]

    def start_requests(self):
        #指定cookies
        cookies = {}
        for url in self.start_urls:
            yield scrapy.Request(url=url, callback=self.parse, cookies=cookies)


    def parse(self, response):
        """
        豆瓣电影短评
        :param response:
        :return:
        """
        comment_list = response.xpath('//div[@id="comments"]/div[@data-cid]')
        if comment_list:
            for comment in comment_list:
                id = comment.xpath('@data-cid').extract_first()
                movie_id = MOVIE_ID
                avatar = comment.xpath('div[@class="avatar"]/a/img/@src').extract_first()
                nick = comment.xpath('div[@class="comment"]/h3/span[@class="comment-info"]/a/text()').extract_first().strip()
                level = comment.xpath('div[@class="comment"]/h3/span[@class="comment-info"]/span[contains(@class, "rating")]/@title').extract_first('未评分')
                vote = comment.xpath('div[@class="comment"]/h3/span[@class="comment-vote"]/span[@class="votes"]/text()').extract_first('0').strip()
                content = comment.xpath('div[@class="comment"]/p/span[@class="short"]/text()').extract_first().strip()
                item = DoubanMovieCommentItem()
                item['id'] = id
                item['movie_id'] = movie_id
                item['avatar'] = avatar
                item['nick'] = nick
                item['level'] = level
                item['vote'] = vote
                item['content'] = content
                yield item

        next_param = response.selector.xpath('//div[@id="paginator"]/a[@class="next"]/@href').extract_first()
        if next_param:
            next_url = 'https://movie.douban.com/subject/%d/comments%s' % (MOVIE_ID, next_param)
            yield scrapy.Request(url=next_url, callback=self.parse)

if __name__ == '__main__':
    cookie_str = '放入登陆后的请求头的cookie信息'
    cookie_params = cookie_str.split(';')
    cookie_o = {}
    for cookie_param in cookie_params:
        cookie_o[cookie_param.split('=')[0]] = cookie_param.split('=')[1]
    print(cookie_o)