# -*- coding: utf-8 -*-

import scrapy
import re
import json
from snowball_crawler.items import SnowballRebalancingItem
from snowball_crawler.spiders import snowball_spider

class snowball_his_rebalancing_spider( snowball_spider.snowball_base_spider):
    '''
    历史调仓
    '''
    name ="snowball_his_rebalancing"
    allowed_domains=["xueqiu.com"]
    format_url = 'https://xueqiu.com/cubes/rebalancing/history.json?cube_symbol=%s&count=50&page=%d'

    #实现start_spider方法 登录成功后会调用
    def start_spider(self):
        self.logger.debug('daily holding login done ')
        # 开始读取用户组合
        for page in range(000000, 999999):
            symbol ='ZH%s'%str(page).zfill(6)
            yield scrapy.Request(url=self.format_url%(symbol,1),
                                 headers=self.headers,
                                 meta={'symbol':symbol,'data':[]},
                                 callback=self.rebalancing_spider)

    def rebalancing_spider(self, response):
        symbol = response.meta['symbol']
        dic = json.loads(response.body.decode())
        data = []
        if response.meta['data']:
            data.extend(response.meta['data'])
        if dic['list'] :
            page = dic['page']
            max_page = dic['maxPage']
            if dic['list']:
                data.extend(dic['list'])
            if page < 20 and page < max_page :  #如果还有数据继续Spide
                page +=1
                return [scrapy.Request(url=self.format_url%(symbol,page),
                             headers=self.headers,
                             meta={'symbol':symbol,'data':data},
                             callback=self.rebalancing_spider)]

        item = SnowballRebalancingItem()
        item['symbol'] = response.meta['symbol']
        item['his_rebalancing'] = data
        return item

