# -*- coding: utf-8 -*-
import scrapy
from areas.settings import headers, cookies
from copy import deepcopy

class GetareasSpider(scrapy.Spider):
    name = 'getAreas'
    allowed_domains = ['stats.gov.cn']
    start_urls = ['http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2017/index.html']

    def start_requests(self):
        for url in self.start_urls:
            yield scrapy.Request(
                url=url,
                headers=headers,
                cookies=cookies,
                dont_filter=True
            )

    def parse(self, response):
        with open('./res.html', 'a') as f:
            f.write(response.text)
        province_elms = response.xpath('//table[@class="provincetable"]//tr[position()>3][position()<5]/td')
        num = 66
        for elm in province_elms:
            item = {
                "province_name": "",
                "province_id": "",
                "city_id": "",
                "city_name": "",
                "county_id": "",
                "county_name": "",
                "town_id": "",
                "town_name": "",
                "village_id": "",
                "type_id": "",
                "village_name": "",
            }
            province_name = elm.xpath("./a/text()").extract_first()
            province_name = province_name or ""
            if province_name == "":
                continue
            item["province_name"] = province_name
            item["province_id"] = str(num)

            print(province_name)

            pro_url = elm.xpath("./a/@href").extract_first()
            pro_url = response.urljoin(pro_url) if pro_url else ""
            if pro_url == "":
                continue

            yield scrapy.Request(
                url=pro_url,
                headers=headers,
                cookies=cookies,
                meta={"item": deepcopy(item)},
                callback=self.parse_city
            )
            num += 1

    def parse_city(self, response):
        item = response.meta.get("item")
        city_elms = response.xpath('//table[@class="citytable"]//tr[position()>1]')
        for elm in city_elms:
            city_id = elm.xpath('./td[1]//text()').extract_first()
            city_name = elm.xpath('./td[2]//text()').extract_first()
            city_url = elm.xpath('./td[1]/a/@href').extract_first()
            city_id = city_id or ""
            city_name = city_name or ""
            city_url = response.urljoin(city_url) if city_url else ""
            if not city_id or not city_name or not city_url:
                continue

            print(city_name, city_id)

            item["city_id"] = city_id
            item["city_name"] = city_name
            yield scrapy.Request(
                url=city_url,
                headers=headers,
                cookies=cookies,
                meta={"item": deepcopy(item)},
                callback=self.parse_county
            )

    def parse_county(self, response):
        item = response.meta.get("item")
        city_elms = response.xpath('//table[@class="countytable"]//tr[position()>1]')
        for elm in city_elms:
            county_id = elm.xpath('./td[1]//text()').extract_first()
            county_name = elm.xpath('./td[2]//text()').extract_first()
            county_url = elm.xpath('./td[1]/a/@href').extract_first()
            county_id = county_id if county_id else ""
            county_name = county_name or ""
            county_url = response.urljoin(county_url) if county_url else ""
            item["county_id"] = county_id
            item["county_name"] = county_name

            print(county_name, county_id)

            if not county_id or not county_name or not county_url:
                yield item
                continue
            yield scrapy.Request(
                url=county_url,
                headers=headers,
                cookies=cookies,
                meta={"item": deepcopy(item)},
                callback=self.parse_town
            )

    def parse_town(self, response):
        item = response.meta.get("item")
        city_elms = response.xpath('//table[@class="towntable"]//tr[position()>1]')
        for elm in city_elms:
            town_id = elm.xpath('./td[1]//text()').extract_first()
            town_name = elm.xpath('./td[2]//text()').extract_first()
            town_url = elm.xpath('./td[1]/a/@href').extract_first()
            town_id = town_id or ""
            town_name = town_name or ""
            town_url = response.urljoin(town_url) if town_url else ""
            item["town_id"] = town_id
            item["town_name"] = town_name

            print(town_name, town_id)

            if not town_id or not town_name or not town_url:
                yield item
                continue
            yield scrapy.Request(
                url=town_url,
                headers=headers,
                cookies=cookies,
                meta={"item": deepcopy(item)},
                callback=self.parse_village
            )

    def parse_village(self, response):
        item = response.meta.get("item")
        city_elms = response.xpath('//table[@class="villagetable"]//tr[position()>1]')
        for elm in city_elms:
            village_id = elm.xpath('./td[1]//text()').extract_first()
            type_id = elm.xpath('./td[2]//text()').extract_first()
            village_name = elm.xpath('./td[3]//text()').extract_first()
            village_url = elm.xpath('./td[1]/a/@href').extract_first()
            village_id = village_id or ""
            type_id = type_id or ""
            village_name = village_name or ""

            print(village_name, village_id)

            village_url = response.urljoin(village_url) if village_url else ""
            item["village_id"] = village_id
            item["type_id"] = type_id
            item["village_name"] = village_name
            if not village_id or not village_name or not village_url:
                yield deepcopy(item)

