import scrapy
import json
import js2py

from scrapy_qimanwu.items import ScrapyQimanwuItem, ScrapyComicItem
# 加载settings文件
from scrapy.utils.project import get_project_settings


class QimanwuSpider(scrapy.Spider):
    settings = get_project_settings()
    comic_id = settings['COMIC_ID']
    print('comic_id', comic_id)
    name = 'qimanwu'
    allowed_domains = ['www.qiman57.com']
    start_urls = ['http://www.qiman57.com/{0}/'.format(comic_id)]

    def parse(self, response):
        name = response.xpath('//h1[@class="name_mh"]/text()').extract_first()
        comic = ScrapyComicItem(comic_name=name)
        yield comic
        a_list = response.xpath('//div[@id="chapter-list1"]/a')
        chapter_list = []
        for a in a_list:
            href = a.xpath('./@href').extract_first()
            chapterid = href[-12:-5]
            chaptername = a.xpath('./text()').extract_first()
            # print('chapterid:', chapterid, 'chaptername:', chaptername)
            chapter = {
                'chapterid': chapterid,
                'chaptername': chaptername
            }
            chapter_list.append(chapter)
        url = 'http://www.qiman57.com/bookchapter/'
        data = {
            'id': self.comic_id,
            'id2': '1'
        }

        yield scrapy.FormRequest(url=url, formdata=data, callback=self.parse_second,
                                 meta={'name': name, 'chapter_list': chapter_list})

    def parse_second(self, response):
        name = response.meta['name']
        chapter_list = response.meta['chapter_list']
        content = response.text
        obj_list = json.loads(content)
        chapter_list.extend(obj_list)
        chapter_list.reverse()
        xh = 0
        for chapter in chapter_list:
            chapterid = chapter['chapterid']
            href = self.start_urls[0] + chapterid + '.html'
            chaptername = chapter['chaptername']
            xh += 1
            yield scrapy.Request(url=href, callback=self.parse_third,
                                 meta={'name': name, 'chapterid': chapterid, 'chaptername': chaptername, 'href': href,
                                       'xh': xh})

    def parse_third(self, response):
        name = response.meta['name']
        chapterid = response.meta['chapterid']
        chaptername = response.meta['chaptername']
        href = response.meta['href']
        xh = response.meta['xh']
        global str_body
        script_list = response.xpath('//script')
        for script in script_list:
            content = script.xpath('./text()').extract_first()
            if content is not None and content.find('eval') != -1:
                str_body = content

        img_list = js2py.eval_js(str_body)
        item = ScrapyQimanwuItem(name=name, chapterid=chapterid, chaptername=chaptername, href=href, xh=xh,
                                 imgs=img_list)
        yield item
