# -*- coding: utf-8 -*-

import scrapy
import time
import copy
from scrapy.utils.project import get_project_settings
import logging


class EtherscanDpnSpider(scrapy.Spider):

    name = 'etherscan_dpn'
    allowed_domains = ['etherscan.io']
    start_urls = ['https://etherscan.io']

    cookies = {
                 '__cfduid': 'db0a724021436b9a7f45c5f5a143b5b371536462736',
                 '_ga': 'GA1.2.1095119893.1536462742',
                 '_gid': 'GA1.2.1364866163.1536462742',
                 '__cflb': '1305739016',
                 'ASP.NET_SessionId': 'qeshwtjaal5fvpxmmxrxm3bt',
                 '_gat_gtag_UA_46998878_6': '1'
                         }


    def parse(self, response):

        # 结束页面设置
        page = response.meta.get('page') if response.meta.get('page') else 1
        page_end = get_project_settings().get('ETHERSCAN_LV1_PAGE_END') + 1

        # 交易列表页(翻页)
        if page < page_end:
            # 创建下一页Request对象
            yield scrapy.Request(
                "https://etherscan.io/token/generic-tokentxns2?contractAddress={}&mode=&p=".format(get_project_settings().get('SPIDER_TXHASH')) + str(
                    page), callback=self.parse, meta={'page':copy.deepcopy(page+1)})

        # 抓取交易列表数据
        data = response.xpath('//*[@id="maindiv"]/table//tr[position()>1]')

        # 组装数据准备入库
        for i in data:

            TxHash = i.xpath('./td[1]//a/text()').extract_first()
            Age = time.strftime("%Y-%m-%d %H:%M:%S",
                                time.strptime(i.xpath('./td[2]//@title').extract_first(), "%b-%d-%Y %I:%M:%S %p"))
            From = i.xpath('./td[3]//a/text()').extract_first()
            To = i.xpath('./td[5]//a/text()').extract_first()
            Quantity = i.xpath('.//td[6]//text()').extract_first()



            item = {'lv': 0,
                    'data': dict(
                        TxHash=TxHash,
                        Age=Age,
                        From=From,
                        To=To,
                        Quantity=Quantity.replace(',',''),
                        FromTag=None,
                        ToTag=None,
                    )
                    }

            # 将item传递给pipelines #
            yield copy.deepcopy(item)

            """ 深度抓取相关 """

            # 子级地址
            son_url = 'https://etherscan.io/token/generic-tokentxns2?contractAddress=' + i.xpath(
                './td[3]//a/@href').extract_first().replace('?', '&')

            # 创建SonRequests对象 #
            yield scrapy.Request(son_url,
                                 callback=self.Depth_parse,
                                 meta=copy.deepcopy(dict(Father = TxHash)))

    # SON深度抓取方法 #
    def Depth_parse(self, response):
        # 错误页面 捕捉
        erro_data = response.xpath('/html/body/div[2]/b/text()').extract_first()

        if erro_data == None:
            data = response.xpath('//tr[position()>1]')
            Father = copy.deepcopy(response.meta['Father'])

            if  data == []:
                logging.WARNING('检测不到数据页面>>>>{}'.format(Father))
                logging.WARNING(response.body)

            for i in data:

                TxHash = i.xpath('./td[1]//a/text()').extract_first()
                Age = time.strftime("%Y-%m-%d %H:%M:%S",
                                    time.strptime(i.xpath('./td[2]//@title').extract_first(), "%b-%d-%Y %I:%M:%S %p"))
                From = i.xpath('./td[3]//a/text()').extract_first()
                To = i.xpath('./td[5]//a/text()').extract_first()
                Quantity = i.xpath('.//td[6]//text()').extract_first()
                url = i.xpath('.//td[3]//a/@href').extract_first()

                item = {'lv': 1,
                        'data': dict(
                            TxHash=copy.deepcopy(TxHash),
                            Age=Age,
                            From=From,
                            To=To,
                            Quantity=Quantity.replace(',', ''),
                            FromTag=None,
                            ToTag=None,
                            Father=copy.deepcopy(Father),
                        )
                        }
                yield copy.deepcopy(item)

                # 创建新的Requests对象
                if url:

                    # 翻页范围1-3页
                    for i in range(1,get_project_settings().get('ETHERSCAN_LV2_PAGE_END')+1):
                        Next_url = ('https://etherscan.io/token/generic-tokentxns2?contractAddress=' + url.replace('?', '&') + '&p={}'.format(str(i)))

                        yield scrapy.Request(copy.deepcopy(Next_url),
                                     callback=self.Depth_parse,
                                     meta=copy.deepcopy(dict(Father=TxHash)),
                                     cookies=EtherscanDpnSpider.cookies)