import re

import scrapy
from scrapy.spidermiddlewares.httperror import HttpError
from twisted.internet.error import DNSLookupError, TCPTimedOutError

from RegionScrapy.items.RegionItem import RegionItem


class RegionSpider(scrapy.Spider):
    name = "RegionSpider"

    # 覆盖配置
    custom_settings = {
        # 对单个网站进行并发请求的最大值
        "CONCURRENT_REQUESTS_PER_DOMAIN": 16,
        # 下载器在下载同一个网站下一个页面前需要等待的时间
        "DOWNLOAD_DELAY": 0.5,
        # 当从相同的网站获取数据时，Scrapy将会等待一个随机的值 (0.5到1.5之间的一个随机值 * DOWNLOAD_DELAY)
        "RANDOMIZE_DOWNLOAD_DELAY": True,
        # 下载器超时时间(单位: 秒)。
        "DOWNLOAD_TIMEOUT": 180,
        "ITEM_PIPELINES": {
            'RegionScrapy.pipelines.RegionPipeline.RegionPipeline': 300
        },
        # log的最低级别。可选的级别有: CRITICAL、 ERROR、WARNING、INFO、DEBUG
        "LOG_LEVEL": "DEBUG",
        # 如果为 True ，进程所有的标准输出(及错误)将会被重定向到log中
        "LOG_STDOUT": False,
        # 是否启用内存调试(memory debugging)
        "MEMDEBUG_ENABLED": False,
        # 如果启用，Scrapy将会尊重 robots.txt策略
        "ROBOTSTXT_OBEY": False,
        # 爬取的默认User-Agent
        "USER_AGENT": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.162 Safari/537.36",
    }

    def errback_httpbin(self, failure):
        # log all failures
        self.logger.error(repr(failure))

        # in case you want to do something special for some errors,
        # you may need the failure's type:

        if failure.check(HttpError):
            # these exceptions come from HttpError spider middleware
            # you can get the non-200 response
            response = failure.value.response
            self.logger.error('HttpError on %s', response.url)

        elif failure.check(DNSLookupError):
            # this is the original request
            request = failure.request
            self.logger.error('DNSLookupError on %s', request.url)

        elif failure.check(TimeoutError, TCPTimedOutError):
            request = failure.request
            self.logger.error('TimeoutError on %s', request.url)

    def start_requests(self):
        mainUrl = "http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2017/index.html"

        request = scrapy.Request(url=mainUrl, callback=self.parseProvincePage, errback=self.errback_httpbin)
        # request.meta["district"] = self.districts[district]
        yield request

    def parseProvincePage(self, response):
        currentUrl = response.url
        baseUrl = "http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2017/{}{}"

        # 进入各省
        for province in response.xpath("//td/a").extract():
            matchObj = re.match(r'.+ href="(\d+)(.+)".*>(.+)<br>.+', province, re.M | re.I)

            if matchObj:
                code = matchObj.group(1)
                suffix = matchObj.group(2)
                name = matchObj.group(3)

                yield RegionItem(code=code, pcode=None, name=name)

                url = baseUrl.format(code, suffix)
                request = response.follow(url=url, callback=self.parseCityPage, errback=self.errback_httpbin)
                request.meta["pcode"] = code
                request.meta["pname"] = name
                yield request

    def parseCityPage(self, response):
        currentUrl = response.url
        pcode = response.meta['pcode']
        baseUrl = re.match(r'(.+/)\d+.html', currentUrl, re.M | re.I).group(1)
        formatUrl = baseUrl + "{}{}"

        for city in response.xpath("//tr/td[2]/a").extract():
            matchObj = re.match(r'.+"(\d+)/(\d+).html.+>(.+)</.+', city, re.M | re.I)

            if matchObj:
                code1 = matchObj.group(1)
                code2 = matchObj.group(2)
                name = matchObj.group(3)
                code = code1 + "/" + code2
                yield RegionItem(code=code2, pcode=pcode, name=name)

                url = formatUrl.format(code, ".html")
                request = response.follow(url=url, callback=self.parseDistrictPage, errback=self.errback_httpbin)
                request.meta["pcode"] = code2
                request.meta["pname"] = name
                yield request

    def parseDistrictPage(self, response):
        currentUrl = response.url
        pcode = response.meta['pcode']
        baseUrl = re.match(r'(.+/)\d+.html', currentUrl, re.M | re.I).group(1)
        formatUrl = baseUrl + "/{}{}"

        for district in response.xpath("//tr/td[2]/a").extract():
            matchObj = re.match(r'.+"(\d+)/(\d+).html.+>(.+)</.+', district, re.M | re.I)

            if matchObj:
                code1 = matchObj.group(1)
                code2 = matchObj.group(2)
                name = matchObj.group(3)
                code = code1 + "/" + code2

                yield RegionItem(code=code2, pcode=pcode, name=name)

                # url = formatUrl.format(code, suffix)
                # request = response.follow(url=url, callback=self.parseDistrictPage, errback=self.errback_httpbin)
                # request.meta["pcode"] = code
                # request.meta["pname"] = name
                # request.meta["baseUrl"] = baseUrl + "/" + code
                # yield request
