# -*- coding: utf-8 -*-
import json

import scrapy
from lxml import etree
from u17.items import *


class ComicSpider(scrapy.Spider):
    name = 'comic'
    allowed_domains = ['u17.com']
    start_urls = ['http://u17.com/']
    max_page = 2

    def start_requests(self):
        """构造请求"""
        url = 'http://www.u17.com/comic/ajax.php?mod=comic_list&act=comic_list_new_fun&a=get_comic_list'
        headers = {
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36"
        }
        data = {"data[group_id]": "no",
                "data[theme_id]": "no",
                "data[is_vip]": "no",
                "data[accredit]": "no",
                "data[color]": "no",
                "data[comic_type]": "no",
                "data[series_status]": "no",
                "data[order]": "2",
                "data[page_num]": "1",
                "data[read_mode]": "no"}
        for page in range(1, self.max_page):
            print(page)
            data["data[page_num]"] = str(page)
            # 构建表单提交的请求方式
            yield scrapy.FormRequest(url=url, headers=headers, formdata=data, method='POST', callback=self.parse)

    def parse(self, response):
        html = response.text
        json_data = json.loads(html)
        # print(json_data)
        comic_list = json_data['comic_list']
        for comic in comic_list:
            item = U17Item()
            item['comic_id'] = comic['comic_id']
            item['name'] = comic['name']
            item['cover'] = comic['cover']
            item['line1'] = comic['line1']
            item['line2'] = comic['line2']
            yield item

            # 继续发起爬取章节页面请求
            # chap_url = f'http://www.u17.com/comic/{str(comic["comic_id"])}.html'
            # yield scrapy.Request(url=chap_url, callback=self.parse_chapter)

    def parse_chapter(self, response):
        """
        解析章节内容
        """
        comic_id = response.url.split('/')[-1].split('.')[0]
        etree_html = etree.HTML(response.text)
        a_list = etree_html.xpath('//ul[@id="chapter"]/li/a')
        for a in a_list:
            item = U17ChapterItem()
            item['link'] = a.xpath('./@href')
            item['name'] = a.xpath('./@title')
            item['comic_id'] = comic_id
            yield item
