# -*- coding: utf-8 -*-

import scrapy
from scrapy.http import Request

import execjs

import os
from urllib.parse import unquote, urljoin
import re

from ComicSpider.items import ComicspiderItem


# run eg: scrapy crawl dmzj -a book=mix -a start_chapter=61 -a end_chapter=62
class DmzjSpider(scrapy.Spider):
    name = 'dmzj'
    allowed_domains = ['dmzj.com']

    url_base = "http://manhua.dmzj.com/"
    image_url = "http://images.dmzj.com"

    start_urls = (url_base,)

    def __init__(self, *args, **kwargs):
        super(DmzjSpider, self).__init__(*args, **kwargs)
        self.book = kwargs.get('book')

        self.start_chapter = int(kwargs.get('start_chapter', 1))
        self.end_chapter = int(kwargs.get('end_chapter', 9999999))

        self.start_urls = (self.start_urls[0] + self.book,)
        print(self.start_urls)

    def parse(self, response):
        for sel in response.xpath('//div[@class="cartoon_online_border"]/ul/li'):
            # title = sel.xpath('a/@title').extract()
            link = sel.xpath('a/@href').extract()[0]
            text = sel.xpath('a/text()').extract()[0]
            #print("="*60, text)
            m = re.search("(\d+)", text)
            chapter = int(m.group(1))
            #print("=" * 60, self.start_chapter, chapter, self.end_chapter)
            if chapter < self.start_chapter:
                print("skip chapter", chapter)
                continue

            if chapter > self.end_chapter:
                print("stop before", chapter)
                break

            url = urljoin(self.url_base, link)
            print("=" * 60, url)
            yield Request(url, callback=self.parse_image)

    def parse_image(self, response):
        # print(response.body)

        body = response.body.decode('utf8')
        #print(body)

        s_idx = body.find("eval(function(")
        e_idx = body.find("\n", s_idx)
        if s_idx == -1 or e_idx == -1:
            print("ERROR", body)
            return

        print(body[s_idx + 5 : e_idx])
        pages = execjs.eval(body[s_idx + 5: e_idx - 1])
        # print(pages)
        pages = pages[18: -3].replace('\/', '/').split(',')  # 18: "var pages=pages='[" 3: "]'；"
        print(pages)

        for page in pages:
            link = page.strip('"')
            chapter = os.path.basename(os.path.dirname(link))
            chapter = unquote(chapter)
            m = re.search("(\d+)", chapter)
            chapter = m.group(1)
            item = ComicspiderItem()
            item["book"] = self.book
            item["chapter"] = chapter.rjust(3, '0')
            item["file_urls"] = [urljoin(self.image_url, link)]
            item["file_name"] = os.path.basename(link)
            # print("="*60, response.url)
            item["referer"] = response.url
            yield item
