# -*- coding: utf-8 -*-

"""
LP数据爬取
"""
from ..items import LpItem
import scrapy
import json
import random
import time
import logging
logging.getLogger().setLevel(logging.INFO)

class LpSpider(scrapy.Spider):
    name = 'lp'
    allowed_domains = ['www.itjuzi.com']
    # API接口路径
    start_url = 'https://www.itjuzi.com/api/lp'
    # 页码循环次数
    MAX_PAGE = 1
    # 停歇时间，防止频繁访问封IP
    idle_time=random.randint(0,5)
    # 传入对应的处理管道
    custom_settings = {
        'ITEM_PIPELINES': {'itorange.pipelines.LpPipeline': 300}
    }

    # 传入页码参数
    def start_requests(self):
        # 稍作停歇，防止被封
        time.sleep(self.idle_time)

        for i in range(1, self.MAX_PAGE+1):
            data = dict()
            data['page'] = i

            yield scrapy.Request(url=self.start_url,method='POST', meta=data, callback=self.parse, dont_filter=True)

    # 解析响应报文
    def parse(self, response):
        print(response.headers)
        # 解析全部报文
        try:
            result = json.loads(response.body)
        except Exception as e:
            logging.error('返回报文解析错误！')
            return

        # 判断响应是否成功
        result_code = result['code']
        if result_code != 200:
            return

        # 提取LP信息
        try:
            lp_item = LpItem()
            lp_info = result['data']['data']
            lp_item['lp_info'] = lp_info
            yield lp_item
        except Exception as e:
            logging.error('未发现LP信息！')
            return
