# -*- coding: utf-8 -*-
import time
from logging import Logger
from urllib import parse

import scrapy
from lxml import etree
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings

from spiderScript.items import SpiderscriptItem


class AreaCnSpider(scrapy.Spider):
    name = 'area_cn'
    allowed_domains = ['stats.gov.cn']
    start_urls = ['http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2018/index.html']

    def parse(self, response):
        province_item = SpiderscriptItem()
        province_item['parent_code'] = '0'
        province_item['parent_codes'] = '0,'
        province_item['tree_leaf'] = '0'
        province_item['tree_level'] = 0
        province_item['area_type'] = '1'

        selector = response.xpath("//tr[@class='provincetr']/td/a").extract()
        provinces = []
        for item in selector:
            item_html = etree.HTML(item)
            next_url = item_html.xpath('//a/@href')[0]
            area_code = str(next_url)[0:2]
            area_name = str(item_html.xpath('//a/text()')[0])
            provinces.append({"area_name": area_name, "area_code": area_code + '00000', "tree_names": area_name + ","})
            yield scrapy.Request(parse.urljoin(response.url, next_url),
                                 meta=dict(parent_code=area_code + "00000", tree_level=1,
                                           parent_codes="0," + area_code + "00000,", tree_names=area_name + ","),
                                 callback=self.city_area)
        province_item['areas'] = provinces
        return province_item

    def city_area(self, response):
        city_item = SpiderscriptItem()
        city_item['parent_code'] = response.meta['parent_code']
        city_item['parent_codes'] = response.meta['parent_codes'] + "," + response.meta['parent_code'] + ','
        city_item['tree_leaf'] = '0'
        city_item['tree_level'] = response.meta['tree_level']
        city_item['area_type'] = '2'
        cities = []
        selector = response.xpath("//tr[@class='citytr']").extract()
        for item in selector:
            print(item)
            item_html = etree.HTML(item)
            print(response.url)
            next_url = item_html.xpath("//td[1]/a[1]/@href")[0]
            area_code = item_html.xpath("//td[1]/a/text()")[0]
            area_name = item_html.xpath('//td[2]/a/text()')[0]
            cities.append({"area_name": area_name, "area_code": area_code[0:-6], "tree_names": area_name + ","})
            yield scrapy.Request(parse.urljoin(response.url, next_url),
                                 meta=dict(parent_code=area_code, tree_level=2,
                                           parent_codes=city_item['parent_codes'] + area_code + ',',
                                           tree_names=response.meta['tree_names'] + area_name + ","),
                                 callback=self.city_area)
        city_item['areas'] = cities
        return city_item


if __name__ == '__main__':
    process = CrawlerProcess(get_project_settings())
    process.crawl(AreaCnSpider.name)
    process.start()
