import json

import scrapy

from biquyx.items import BiquyxItem
import requests
import os
import math


pageNum = '47'
mainPath = 'E:\\project\\python\\biquyx\\biquyx\\imgs\\'
mianUrl = 'https://www.biquyx.com'
downUrl = mianUrl + '/Mh/' + pageNum + '.html'
pageUrl = mianUrl + '/Book/getJino.html'
fanye = True


def mkdir(title):
    """ 123 """
    path = title.rstrip("\\")
    isExists = os.path.exists(path)
    if not isExists:
        os.makedirs(path)
        print(path + '创建成功')
        return True
    else:
        print(path + '目录已存在')
        return False


def download_img(img_url, path):
    """ 123 """
    if os.path.isfile(path):
        print(path + "已经下载，跳过")
    else:
        r = requests.get(img_url, stream=True, timeout=15)
        if r.status_code == 200:
            open(path, 'wb').write(r.content)
            print(path + "下载成功！")
        del r


def parse_detail(response):
    """ 123 """
    item = response.meta["item"]
    item['name'] = item['name'][:item['name'].index("話") + 1]
    mkdir(mainPath + item['title'] + '\\' + item['name'].strip())
    imgs = response.xpath("//figure/img/@data-original").extract()
    index = 0
    for img in imgs:
        print(img)
        if 'http' in img:
            url = img
        else:
            url = mianUrl + img
        if type(img) == str:
            path = mainPath + item['title'] + '\\' + item['name'] + '\\' + str(index) + '.png'
            try:
                download_img(url, path)
            except Exception as e:
                if os.path.isfile(path):
                    os.remove(path)
                    print(path + "下载失败，跳过")
        index += 1
    print("完成下载")
    yield item


class BiquyxSpider(scrapy.Spider):
    """ 123 """
    name = 'biquyx'
    allowed_domains = ['biquyx.com']
    start_urls = [downUrl]

    def parse(self, response):
        """ 123 """
        title = response.xpath('//div[@class="container"]/div[@class="title"]/text()').extract_first()
        tr_list = response.xpath('//div[@id="html_box"]/div')
        last = response.xpath('normalize-space(//div[@id="chapters"]/div/div/span)').extract_first()
        # 创建漫画
        mkdir(mainPath + title)
        num = 0
        for tr in tr_list:
            item = BiquyxItem()
            item['title'] = title
            item['name'] = tr.xpath('normalize-space(./a)').extract_first()
            item['href'] = mianUrl + tr.xpath('./a/@href').extract_first()
            item['last'] = last

            num = math.ceil([int(s) for s in item['last'] if s.isdigit()][0]/10.0)

            if type(item['href']) == str:
                # headers = {"Content-Type": "application/x-www-form-urlencoded"}
                # yield scrapy.Request(pageUrl, body=json.dumps({"type": "mh", "id": 48, "p": 1}), method='POST', headers=headers)
                yield scrapy.Request(item['href'], callback=parse_detail, meta={'item': item})

        print(tr_list)
        print(num)
        # if fanye:
        #     for i in range(0, num):
        #         headers = {"Content-Type": "application/x-www-form-urlencoded"}
        #         data = {"type": "mh", "id": pageNum, "p": str(i)}
        #         yield scrapy.FormRequest(url=pageUrl,
        #                                  formdata=data,
        #                                  headers=headers,
        #                                  callback=self.parse_post,
        #                                  meta={'title': title})
        # print("到这里")


    def parse_post(self, response):
        """ 123 """
        title = response.meta["title"]
        tr_list = response.xpath('//div[@class="item"]')
        for tr in tr_list:
            item = BiquyxItem()
            item['title'] = title
            item['name'] = tr.xpath('./a/text()').extract_first()
            if item['name'] != '休刊公告':
                item['name'] = item['name'][:item['name'].index("話") + 1]
                url = tr.xpath('./a/@href').extract_first()
                if 'http' in url:
                    url = url
                else:
                    url = mianUrl + url
                yield scrapy.Request(url, callback=parse_detail, meta={'item': item})

