# -*- coding: utf-8 -*-
import scrapy
import json
from urllib import parse


class SghwebSpider(scrapy.Spider):
    name = 'SGHWeb'
    start_urls = ['http://121.199.72.208:8081/SGHWeb/AjaxHandler.ashx?']

    def start_requests(self):
        params = {
            'class': u'AjaxTJCX',
            'method': u'GetStatisticsResult',
            'checkedName': u'灾害总数,崩塌,塌陷,泥石流,地面沉降,地裂缝,滑坡,斜坡',
            'queryLevel': u'省',
            'checkCodes': u'000000',
            'unit': u'PROVINCE'
        }

        for url in self.start_urls :
            url = url + parse.urlencode(params)
            yield scrapy.Request(url=url, callback=self.parse)

    def parse(self, response):
        json_response = json.loads(response.body)

        if json_response["data"] is not None:
            rows = json_response["data"]["rows"]
            guid = json_response["guid"]

            for row in rows:
                if row["省"] == '':
                    continue

                params = {
                    'guid': guid,
                    'class': u'AjaxTJCX',
                    'method': u'FindStatisticsDetailInfo',
                    'districtName': u'省',
                    'districtValue': row["省"],
                    'pageIndex': 0,
                    'pageSize': row["灾害总数"]
                }

                url = self.start_urls[0] + parse.urlencode(params)
                yield scrapy.Request(url=url, callback=self.get_points)
                
                print(f'{row["省"]} 共计 {row["灾害总数"]} 个地灾点')

    def get_points(self, response):
        url = parse.unquote(response.url)
        query_string = url.split('?')[-1]
        districtValue = parse.parse_qs(query_string)["districtValue"]

        filename = f'data/{districtValue}-地灾点.json'
        with open(filename, 'wb') as f:
            f.write(response.body)

        print(f'write file {filename}')

