#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time    : 2018/1/28 0028 14:18
# @Author  : Arliki
# @File    : comment.py

import json, re, scrapy, time
from scrapy_redis.spiders import RedisSpider
from onepiece.items import PageItem


class PageSpider(RedisSpider):
    name = "img_down"
    redis_key = "onepiece:page_urls"
    custom_settings = {
        'REDIS_PARAMS': {
            'db': 1,
            'password': '5tgbnhy67ujm'
        }
    }

    def __init__(self, *args, **kwargs):
        domain = kwargs.pop("domain", "")
        self.allowed_domains = filter(None, domain.split(","))
        super(PageSpider, self).__init__(*args, **kwargs)

    def parse(self, response):
        yield scrapy.Request(response.url, self.down_pic, dont_filter=True)

    def down_pic(self, response):
        bodys = response.body.decode('utf-8')
        h1 = response.xpath('//div[@id="pjax-container"]/h1/text()').extract()[0].strip()
        h1 = re.sub('[\D]', '-', h1)
        item = PageItem()
        # 图片下载item信息
        try:
            item['urls'] = response.url
            names = re.search('var mhurl = "(.*?)";', bodys).group(1)
            # cookie中提取ip地址
            picHost = response.request.headers.getlist('Cookie')['picHost']
            if '2015/' in names or '2016/' in names or '2017/' in names or '2018/' in names:
                pro = 'http://' + picHost + 'p1.xiaoshidi.net/'
            else:
                pro = 'http://' + picHost + 'p0.xiaoshidi.net/'
            item['down_url'] = ''.join(pro + names)
            page = re.search('-*(\d*)-*(\d*)', h1).group(2)
            if len(page) == 0:
                page = str(1)
            item['p_name'] = page + "页"
            item['f_path'] = re.search('-*(\d*)-*(\d*)', h1).group(1)
            if int(item['f_path']) < 50:
                item['f_path'] += "卷(合集)"
            yield item
        except:
            pass
        nu = response.url
        #判断是否为起始页
        try:
            nu = re.search('(.*?)index', response.url).group(1)
            nn = response.xpath('//a[@class="pure-button pure-button-primary"]/text()').extract()[1]
            purl = response.xpath('//a[@class="pure-button pure-button-primary"]/@href').extract()[1]
        except:
            nn = response.xpath('//a[@class="pure-button pure-button-primary"]/text()').extract()[0]
            purl = response.xpath('//a[@class="pure-button pure-button-primary"]/@href').extract()[0]
        #判断下一步地址属性
        if nn == "下一页":
            purl = nu + purl
            yield scrapy.Request(purl, self.down_pic, dont_filter=True)
        elif nn == "下一话吧":
            now_page = re.search('\/(\d*)\/index', response.url).group(1)
            nxt = re.search('\.\.\/(.*?)\/', purl).group(1)
            # 判断下一话地址是否正确
            if int(now_page) > int(nxt):
                purl = 'http:/' + '/'.join(response.url.split('/')[1:-2]) + '/' + nxt + '/'
            else:
                purl = 'http:/' + '/'.join(response.url.split('/')[1:-2]) + '/' + str(int(nxt) + 1) + '/'
            yield scrapy.Request(purl, self.down_pic, dont_filter=True)
        # lpush onepiece:page_urls http://www.fzdm.com/manhua/02/406/ http://www.fzdm.com/manhua/02/407/ http://www.fzdm.com/manhua/02/441/ http://www.fzdm.com/manhua/02/451/ http://www.fzdm.com/manhua/02/452/ http://www.fzdm.com/manhua/02/453/ http://www.fzdm.com/manhua/02/454/ http://www.fzdm.com/manhua/02/455/ http://www.fzdm.com/manhua/02/457/ http://www.fzdm.com/manhua/02/458/ http://www.fzdm.com/manhua/02/459/ http://www.fzdm.com/manhua/02/460/ http://www.fzdm.com/manhua/02/637/ http://www.fzdm.com/manhua/02/896/ http://www.fzdm.com/manhua/02/891/
