# coding:utf-8

from scrapy.spiders import CrawlSpider, Rule
from scrapy.selector import Selector
from scrapy.linkextractors import LinkExtractor
from multi.items import MovieItem,MovieComment
from scrapy import log

import re

AREA = re.compile(r"制片国家/地区:</span> (.+?)<br>")
LANGUAGE = re.compile(r"语言:</span> (.+?)<br>")
LENGTH = re.compile(r"片长:</span> (.+?)<br>")
MOMENT_TIME = re.compile(r"title=(.+?)")

class MovieSpiderClass(CrawlSpider):

    name = 'movie'

    allowed_domains = ['movie.douban.com']

    start_urls = ['https://movie.douban.com/top250',
                  'https://movie.douban.com/top250?start=25&filter=',
                  'https://movie.douban.com/top250?start=50&filter=',
                  'https://movie.douban.com/top250?start=75&filter=',
                  'https://movie.douban.com/top250?start=100&filter=',
                  'https://movie.douban.com/top250?start=125&filter=',
                  'https://movie.douban.com/top250?start=150&filter=',
                  'https://movie.douban.com/top250?start=175&filter=',
                  'https://movie.douban.com/top250?start=200&filter=',
                  'https://movie.douban.com/top250?start=225&filter=']

    rules = (
        #Rule(LinkExtractor(allow=r"/top250?start=\d+&filter=")),
        #Rule(LinkExtractor(allow=r"/subject/\d+/$"), callback="parse_movie", follow=True),
        Rule(LinkExtractor(allow=r"/subject/\d+/$"), callback="parse_comment", follow=True),
             )

    def parse_movie(self, response):

        item = MovieItem()

        try:

            item['Movie_Id'] = "".join(
                response.xpath('//*[@id="content"]/div/span[@class="top250-no"]/text()').extract()
            ).replace("No.", "")

            item['Movie_name'] = "".join(
                response.xpath('//*[@id="content"]/h1/span[@property="v:itemreviewed"]/text()').extract())

            try:
                item['Movie_year'] = "".join(
                    response.xpath('//*[@id="content"]/h1/span[@class="year"]/text()').extract()).replace(
                    "(", "").replace(")", "")
            except Exception as e:
                print('Exception:', e)
                item['Movie_year'] = ""

            item['Movie_director'] = "/".join(
                response.xpath('//*[@id="info"]/span/span/a[@rel="v:directedBy"]/text()').extract())

            item['Movie_screenWriter'] = "/".join(
                response.xpath('//*[@id="info"]/span[2]/span[2]/a/text()').extract())

            item['Movie_performer'] = "/".join(response.xpath("//a[@rel='v:starring']/text()").extract())

            item['Movie_type'] = "/".join(response.xpath('//*[@id="info"]/span[@property="v:genre"]/text()').extract())

            S = "".join(response.xpath("//div[@id='info']").extract())

            M = AREA.search(S)
            if M is not None:
                item['Movie_nation'] = "/".join([area.strip() for area in M.group(1).split("/")])
            else:
                item['Movie_nation'] = ""

            MovieInfo = "".join(response.xpath("//div[@id='info']").extract())

            language = LANGUAGE.search(MovieInfo)

            if language is not None:
                item['Movie_language'] = "/".join([language.strip() for language in language.group(1).split("/")])
            else:
                item['Movie_language'] = ""

            item['Movie_releaseDate'] = "/".join(
                response.xpath('//*[@id="info"]/span[@property="v:initialReleaseDate"]/text()').extract())



            item['Movie_Movielength'] = "".join(
                response.xpath('//*[@id="info"]/span[@property="v:runtime"]/text()').extract())


            try:
                item['Movie_rating'] = "".join(response.xpath(
                    '//*[@class="rating_self clearfix"]/strong/text()').extract())
                item['Movie_ratedCount'] = "".join(response.xpath(
                    '//*[@class="rating_self clearfix"]/div/div[@class="rating_sum"]/a/span/text()').extract())
            except Exception as error:
                item['Movie_rating'] = "0"
                item['Movie_ratedCount'] = "0"
                log(error)

            introduction = response.xpath('//*[@id="link-report"]/span[@property="v:summary"]/text()').extract()
            if introduction:
                item['Movie_synopsis'] = "".join(introduction).strip().replace("\r\n", "").replace("\u3000", "")
            else:
                item['Movie_synopsis'] = "".join(
                    response.xpath('//*[@id="link-report"]/span/text()').extract()).strip().replace("\r\n", " ")

            yield item

        except Exception as error:
            log(error)


    def parse_comment(self, response):

        MovieCommentID = "".join(
                response.xpath('//*[@id="content"]/div/span[@class="top250-no"]/text()').extract()
            ).replace("No.", "")

        MovieCommentName = "".join(
                response.xpath('//*[@id="content"]/h1/span[@property="v:itemreviewed"]/text()').extract())

        #评论列表
        comment_list = response.xpath('//*[@id="content"]/div[@class="grid-16-8 clearfix"]/div[@class="article"]'
                                      '/div[@id="comments-section"]/div[@class="mod-bd"]/div[@class="tab-bd"]'
                                      '/div[@id="hot-comments"]/div[@class="comment-item"]')

        for comment_item in comment_list:

            try:

                item = MovieComment()

                item['Movie_comment_movie_id'] = MovieCommentID

                item['Movie_comment_movie_name'] = MovieCommentName

                item['Movie_comment_userName'] = "".join(comment_item.xpath('.//*[@class="comment"]/h3/'
                                                                            'span[@class="comment-info"]/a/text()').extract())

                item['Movie_comment_content'] = "".join(comment_item.xpath('.//*[@class="comment"]'
                                                                   '/p/span[@class="short"]/text()').extract())

                item['Movie_comment_Time'] = "".join(comment_item.xpath('.//*[@class="comment"]/h3/'
                                                                            'span[@class="comment-info"]/span[@class="comment-time "]/@title').extract())

                item['Movie_comment_Votes'] = "".join(comment_item.xpath('.//*[@class="comment"]/h3/span'
                                                                         '[@class="comment-vote"]/span[@class="votes"]/text()').extract())

                yield item

            except Exception as error:
                log(error)
