# -*- coding: utf-8 -*-
import copy
import scrapy
from scrapy.http import Request
from scrapy.loader import ItemLoader
from scrapy.utils.curl import curl_to_request_kwargs
from bitcoin_crawler.items import BitcoinCrawlerItem


class BitcoinSpider(scrapy.Spider):
    name = 'bitcoin_spider'

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        spider = cls(crawler.settings, *args, **kwargs)
        spider._set_crawler(crawler)
        return spider

    def __init__(self, settings, *args, **kwargs):
        """
        获取开始页和结束页，币名称
        :param kwargs:
        """
        super().__init__(*args, **kwargs)
        self.host = settings.get('HOST', '')
        self.curl = settings.get('CURL', '')
        self.allowed_domains = [self.host.split('://')[-1].split('/')[0]]

        self.start_page = int(kwargs['start_page'])
        self.end_page = int(kwargs['end_page'])
        self.coin_name = str(kwargs['coin_name'])

    def start_requests(self):
        """
        获取到指定页数数据
        :return:
        """
        for page in range(self.start_page + 1, self.end_page + 1):
            url = '{host}zh/top-100-richest-{coin_name}-addresses-{page}.html'.format(host=self.host,
                                                                                      coin_name=self.coin_name,
                                                                                      page=page)
            request_kwargs = curl_to_request_kwargs(self.curl)
            request_kwargs['url'] = url
            yield Request(url=copy.deepcopy(request_kwargs['url']),
                          headers=copy.deepcopy(request_kwargs['headers']),
                          cookies=copy.deepcopy(request_kwargs['cookies']),
                          dont_filter=True,
                          callback=self.parse)

    def parse(self, response, **kwargs):
        """
        获取地址列表
        :param response:
        :return:
        """
        # scrapy 解析不了tbody标签
        nodes = response.css('table.table.table-striped.bb tr')
        for inx, node in enumerate(nodes):
            # 去除第一项th标题
            if inx > 0:
                index = node.css('td:nth-child(1)::text').extract_first('')
                address = node.css('td:nth-child(2) > a::text').extract_first('')
                link = node.css('td:nth-child(2) > a::attr(href)').extract_first('')
                total_coin = node.css('td:nth-child(3)::text').extract_first('')
                meta = {
                    'index': index,
                    'address': address,
                    'link': link,
                    'total_coin': total_coin,
                }
                yield Request(
                    url=copy.deepcopy(link),
                    meta=copy.deepcopy(meta),
                    headers=response.request.headers,
                    cookies=response.request.cookies,
                    dont_filter=True,
                    callback=self.parse_details
                )

    def parse_details(self, response):
        """
        单个地址详情列表
        :param response:
        :return:
        """
        index = response.meta.get('index', '')
        address = response.meta.get('address', '')
        link = response.meta.get('link', '')
        total_coin = response.meta.get('total_coin', '')
        # scrapy 解析不了tbody标签
        nodes = response.css('#table_maina tr')
        for inx, node in enumerate(nodes):
            # 去除第一项th标题
            if inx > 0:
                """
                单条交易信息
                """
                item_loader = ItemLoader(item=BitcoinCrawlerItem(), selector=node)
                item_loader.add_value("index", [index])
                item_loader.add_value("coin_name", [self.coin_name])
                item_loader.add_value("address", [address])
                item_loader.add_value("link", [link])
                item_loader.add_value("total_coin", [total_coin])
                item_loader.add_css("info_block", "td:nth-child(1) > a::text")
                item_loader.add_css("trading_link", "td:nth-child(1) > a::attr(href)")
                item_loader.add_css("info_time", "td:nth-child(2)::text")
                item_loader.add_css("info_num", "td:nth-child(3)::text")
                item_loader.add_css("info_total", "td:nth-child(4)::text")
                item_loader.add_css("info_total_usd", "td:nth-child(5)::text")
                item = item_loader.load_item()
                yield item
