# -*- coding: utf-8 -*-
"""
死亡公司库爬取
"""
from ..items import DeathCompanyItem
import scrapy
import json
import random
import time
import logging
logging.getLogger().setLevel(logging.INFO)

# 死亡公司库爬虫
class DeathCompanySpider(scrapy.Spider):
    name = 'death_company'
    allowed_domains = ['www.itjuzi.com']
    # API接口路径
    start_url = 'https://www.itjuzi.com/api/closure'
    # 页码循环次数
    MAX_PAGE = 630
    # 停歇时间，防止频繁访问封IP
    idle_time=random.randint(0,5)
    # 传入对应的处理管道
    custom_settings={
        'ITEM_PIPELINES':{'itorange.pipelines.DeathCompanyPipeline':300},
        'DOWNLOADER_MIDDLEWARES':{'itorange.middlewares.ProxyMiddleware': 543}
    }

    # 传入页码参数
    def start_requests(self):
        # 稍作停歇，防止被封
        time.sleep(self.idle_time)

        for i in range(1,self.MAX_PAGE+1):
            data = dict()
            data['page']=i

            yield scrapy.Request(url=self.start_url,meta=data, callback=self.parse,dont_filter=True)

    # 解析响应报文
    def parse(self, response):
        # 解析全部报文
        try:
            result=json.loads(response.body)
        except Exception as e:
            logging.error('返回报文解析错误！')
            return

        # 判断响应是否成功
        result_code=result['code']
        if result_code!=200:
            return

        # 提取死亡公司信息
        try:
            death_company_item=DeathCompanyItem()
            company_info=result['data']['info']
            death_company_item['company_info']=company_info
            yield death_company_item
        except Exception as e:
            logging.error('未发现死亡公司信息！')
            return

