# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
import json

result_movies = []
class DoubanSpider(CrawlSpider):
    name = 'douban'
    allowed_domains = ['movie.douban.com']
    start_urls = ['https://movie.douban.com/top250']

    # rules = (
    #     Rule(LinkExtractor(allow=r'Items/'), callback='parse_item', follow=True),
    # )

    def start_requests(self):
        # cookies = "login_sid_t=fb88840a5c2ebad61a5998b3da291385; cross_origin_proto=SSL; _s_tentry=passport.weibo.com; Apache=3201104882332.868.1654741926705; SINAGLOBAL=3201104882332.868.1654741926705; ULV=1654741926709:1:1:1:3201104882332.868.1654741926705:; WBtopGlobal_register_version=2022060910; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9Wh.6O3O_fW98sYRYf2fwTI35NHD95QfSKqcSKeNShqEWs4DqcjDi--ciKLhi-2Ri--NiKn0i-82i--Ri-isi-8Wi--4iKnNiKyheh5N1K.t; SSOLoginState=1654747716; SUB=_2A25PpQIUDeThGeNL7VQU8ynIzT6IHXVtaa5crDV8PUJbkNB-LVb-kW1NSNJowHemz3SFfRdogVei7q2wbzVUbVWl"  # 获取一个cookie
        # cookies = {i.split("=")[0]: i.split("=")[1] for i in cookies.split("; ")}
        print("****************************")
        yield scrapy.Request(
            self.start_urls[0],
            callback=self.parse,
            # cookies=cookies,
        )

    def parse(self, response):
        item = {}
        print("#################################")
        # print(response.body.decode(response.encoding))

        allMovie = response.xpath('//*[@class="hd"]/a')
        # print(allMovie)
        nameList = []
        for movie in allMovie:
            nameElement = movie.xpath('./span[1]/text()').get()
            nameList.append(nameElement)
        #print(nameList)
        # uls = response.xpath('//*[@class="title"]/table/tbody/tr')
        allMovie = response.xpath('//*[@class="bd"]/p')
        movieList = []

        for movie in allMovie:
            movieElement = {}
            s = movie.xpath('./text()').get()
            s1 = movie.xpath('./text()[2]').get()
            astr = s.split()
            bstr = s1.split()

            #print(bstr)
            if len(astr) != 0:
                #print(astr)
                #print(bstr[2])
                movieElement['director'] = astr[1]
                movieElement['year'] = bstr[0]
                movieElement['country'] = bstr[2]
                count = 0
                for i in range(len(bstr)):
                    if bstr[i] == '/':
                        count=count+1
                        if count==2:
                            movieElement['type'] = bstr[i+1]

                #print("director:" + astr[1])
                movieElement['actor'] = "未知"
                for i in range(len(astr)):
                    # print(astr[i])
                    if '主演:' in astr[i]:
                        movieElement['actor'] = astr[i + 1]

                #print(movieElement['actor'])
                movieList.append(movieElement)
       # print("movie list :")
        #print(movieList)
        #print("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
        allMovie = response.xpath('//*[@class="bd"]/div[1]')
        #print(allMovie)
        starList = []
        for movie in allMovie:
            starNum = movie.xpath('./span[2]/text()').get()
            if starNum!=None:
                starList.append(starNum)
        #print(starList)
        #print(len(starList))
        inq = response.xpath('//*[@class="inq"]/text()').getall()
        #print(inq)

        #print(len(inq))
        for i in range(len(inq)):
            movieList[i]['name'] = nameList[i]
            movieList[i]['appraise'] = starList[i]
            movieList[i]['inq'] = inq[i]
            # print(item['appraise'])
        print(movieList)
        result_movies.append(movieList)

        next_urls = ["https://movie.douban.com/top250?start=25&filter=",
                     "https://movie.douban.com/top250?start=50&filter=",
                     "https://movie.douban.com/top250?start=75&filter=",
                     "https://movie.douban.com/top250?start=10&filter=",
                     "https://movie.douban.com/top250?start=125&filter=",
                     "https://movie.douban.com/top250?start=150&filter=",
                     "https://movie.douban.com/top250?start=175&filter=",
                     "https://movie.douban.com/top250?start=200&filter=",
                     "https://movie.douban.com/top250?start=225&filter="]
        for i in range(len(next_urls)):
            #print("这是第%d页:"%i)
            yield scrapy.Request(url=next_urls[i],callback=self.next_page)
        # name = allMovie.xpath('.//*[@class="title"]/text()').extract()
        # print(name)
        fileObject = open('jsonFile1.json', 'a', encoding='utf-8')
        json.dump(result_movies, fileObject, ensure_ascii=False, indent = 4)
        # for dictObj in movieList:
        #     jsObj = json.dumps(dictObj, ensure_ascii=False)
        #     fileObject.write(jsObj)
        fileObject.close()




        return item

    def next_page(self,response):
        item = {}
        print("#################################")
        # print(response.body.decode(response.encoding))

        allMovie = response.xpath('//*[@class="hd"]/a')
        # print(allMovie)
        nameList = []
        for movie in allMovie:
            nameElement = movie.xpath('./span[1]/text()').get()
            nameList.append(nameElement)
        #print(nameList)
        # uls = response.xpath('//*[@class="title"]/table/tbody/tr')
        allMovie = response.xpath('//*[@class="bd"]/p')
        movieList = []

        for movie in allMovie:
            movieElement = {}
            s = movie.xpath('./text()').get()
            s1 = movie.xpath('./text()[2]').get()
            astr = s.split()
            bstr = s1.split()

            #print(bstr)
            if len(astr) != 0:
                # print(astr)
                # print(bstr[2])
                movieElement['director'] = astr[1]
                movieElement['year'] = bstr[0]
                movieElement['country'] = bstr[2]
                count = 0
                for i in range(len(bstr)):
                    if bstr[i] == '/':
                        count = count + 1
                        if count == 2:
                            movieElement['type'] = bstr[i + 1]

                # print("director:" + astr[1])
                movieElement['actor'] = "未知"
                for i in range(len(astr)):
                    # print(astr[i])
                    if '主演:' in astr[i]:
                        movieElement['actor'] = astr[i + 1]

                # print(movieElement['actor'])
                movieList.append(movieElement)
        #print("movie list :")
        #print(movieList)
        #print("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
        allMovie = response.xpath('//*[@class="bd"]/div[1]')
        # print(allMovie)
        starList = []
        for movie in allMovie:
            starNum = movie.xpath('./span[2]/text()').get()
            if starNum != None:
                starList.append(starNum)
        #print(starList)
        #print(len(starList))
        inq = response.xpath('//*[@class="inq"]/text()').getall()
        #print(inq)

        #print(len(inq))
        for i in range(len(inq)):
            movieList[i]['name'] = nameList[i]
            movieList[i]['appraise'] = starList[i]
            movieList[i]['inq'] = inq[i]
            # print(item['appraise'])
        print(movieList)
        #result_movies.append(movieList)
        print(result_movies)