import scrapy
from scrapy_splash import SplashRequest

from jayce.items import Ssr1Item


class Spa1Spider(scrapy.Spider):
    name = 'spa1'
    allowed_domains = ['spa1.scrape.center']
    start_urls = ['https://spa1.scrape.center/']
    splash_args = {
        'wait': 10,
        'http_method': 'GET',
        'render_all': 1
    }

    # 模拟点击采用js的方式
    script = """
    function main(splash,args)
      splash.images_enabled = false
      assert(splash:go(args.url))
      assert(splash:wait(10))
      splash:runjs(args.script)
      assert(splash:wait(10))
      return {html=splash:html()}
    end
    """

    def start_requests(self):
        for url in self.start_urls:
            yield SplashRequest(url=url, endpoint='render.html', args=self.splash_args,
                                callback=self.parse_total_pages)

    def parse_total_pages(self, response):
        # 获取总页数
        total_pages = int(response.xpath('//*[@id="index"]/div[2]/div/div/div/ul/li[last()]/text()')[0].extract())

        base_page_url = 'https://spa1.scrape.center/page/'
        for page_num in range(total_pages):
            yield SplashRequest(url=base_page_url + str((page_num + 1)), endpoint='render.html', args=self.splash_args,
                                callback=self.parse_item)

    def parse_item(self, response):
        links = [response.urljoin(link.extract()) for link in
                 response.xpath("//*[@id='index']/div[1]/div/div/div/div/div[2]/a/@href")]
        for link in links:
            yield SplashRequest(url=link, endpoint='render.html', args=self.splash_args, callback=self.parse)

        # yield SplashRequest(url=response.url, endpoint='execute', args={
        #     'http_method': 'GET',
        #     'render_all': 1,
        #     'lua_source': self.script,
        #     'script': """
        #                    document.querySelector('#index > div:nth-child(2) > div > div > div > button.btn-next').click();
        #              """,
        #     'url': response.url,
        #     'wait': 10
        # }, callback=self.parse_item)

    def parse(self, response):
        film_name = response.xpath('//*[@id="detail"]/div[1]/div/div/div[1]/div/div[2]/a/h2/text()')[0].extract()
        film_types = [t.extract().strip() for t in response.xpath("//div[@class='categories']//span/text()")]
        span_texts = [t.extract() for t in
                      response.xpath("//*[@id='detail']/div[1]/div/div/div[1]/div/div[2]/div[2]/span/text()")]
        film_country = span_texts[0]
        film_duration = span_texts[2].strip()[:-3]
        film_show_time = response.xpath('//*[@id="detail"]/div[1]/div/div/div[1]/div/div[2]/div[3]/span/text()')
        if len(film_show_time) > 0:
            film_show_time = film_show_time[0].extract()[:-3].strip()
        else:
            film_show_time = ''
        film_synopsis = response.xpath('//*[@id="detail"]/div[1]/div/div/div[1]/div/div[2]/div[4]/p/text()')[
            0].extract().strip()
        film_director = response.xpath('//*[@id="detail"]/div[2]/div/div/div/div/div/p/text()')[
            0].extract()
        film_score = response.xpath('//*[@id="detail"]/div[1]/div/div/div[1]/div/div[3]//p/text()')[
            0].extract().strip()
        yield Ssr1Item(film_name=film_name,
                       film_types=film_types,
                       film_country=film_country,
                       film_duration=film_duration,
                       film_show_time=film_show_time,
                       film_synopsis=film_synopsis,
                       film_director=film_director,
                       film_score=film_score)
