# -*- coding: utf-8 -*-
import scrapy
import json
from spiders.items import HuobiItem
import time


class BiBoxSpider(scrapy.Spider):
    # 爬虫名称
    name = "bibox"
    # 设置允许的域名
    allowed_domains = ["bibox.com"]
    # 设置基础URL
    base_url = 'https://bibox.zendesk.com/hc/{language}'
    # 来源前缀url
    prefix_url = 'https://www.huobi.pro/{language}/notice_detail'
    # 自定义配置 pipeline
    custom_settings = {
        'ITEM_PIPELINES': {
            'spiders.pipelines.HuoBiPipeline.HuoBiPipeline': 1,
        }
    }
    # start_url
    start_url = {

    }
    # 分类ID
    cate_id = 1
    # 默认语文
    language = 'zh-cn'

    def start_requests(self):
        # 用for构成API链接循环入口
        for page in range(1, 10):
            yield scrapy.Request(url=self.base_url.format(language=self.language),
                                 callback=self.parse)

    def parse(self, response):
        res = json.loads(response.body)
        for x in res['data']['items']:
            item = HuobiItem()
            item['title'] = x['title']
            item['create_time'] = int(time.time())
            item['source'] = self.prefix_url.format(language=self.language) + '/id?' + x['id']
            item['content'] = x['content']
            item['is_top'] = x['topNotice']
            item['weight'] = x['weight']
            item['cate_id'] = self.cate_id
            item['desc'] = ''
            item['author'] = 'huobi.pro'
            item['click'] = ''
            item['tag'] = ''
            yield item
