import re

import scrapy

from work_data.items import WorkDataItem


class WorkWageSpider(scrapy.Spider):
    name = 'work_wage'
    allowed_domains = ['data.stats.gov.cn']

    def __init__(self, name=None, **kwargs):
        super().__init__(name, **kwargs)
        self.city = []
        self.k = 0
        self.wds = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C']
        # self.wds = ['4', '5']
        # self.wds = ['4']

    def start_requests(self):
        url = 'https://data.stats.gov.cn/easyquery.htm?'
        param = {
            'm': 'getOtherWds',
            'dbcode': 'fsnd',
            'rowcode': 'zb',
            'colcode': 'sj',
            'wds': '[]',
            'k1': '1668842093444'
        }
        yield scrapy.FormRequest(url=url, callback=self.parse, formdata=param, method='get',
                                 dont_filter=False)

    def parse(self, response, **kwatgs):
        obj = re.compile(r'"code":"(?P<num>.*?)","name":"(?P<city>.*?)","sort":"1"', re.S)
        data = obj.finditer(response.text)
        # 存储get参数
        par = []

        for item in data:
            par.append(item.group("num"))
            self.city.append(item.group("city"))

        for wds in self.wds:

            for item in par:
                params = {
                    "m": "QueryData",
                    "dbcode": "fsnd",
                    "rowcode": "zb",
                    "colcode": "sj",
                    "wds": '[{"wdcode": "reg", "valuecode": "' + item + '"}]',
                    "dfwds": '[{"wdcode": "zb", "valuecode": "A040' + wds + '"}]',
                    'k1': '1666962437751',
                    "h": '1'
                }
                urls = "https://data.stats.gov.cn/easyquery.htm?"

                yield scrapy.FormRequest(url=urls, callback=self.parse_next, formdata=params, method='get',
                                         dont_filter=True)

    def parse_next(self, response):
        obj_1 = re.compile(r'"strdata":"(?P<data>.*?)"', re.S)
        obj_2 = re.compile(r'"cname":"(?P<name>.*?)".*?"exp":"(?P<exp>.*?)".*?"unit":"(?P<unit>.*?)"', re.S)
        obj_3 = re.compile(r'"wdname":"指标"},."nodes":.{"cname":"(?P<city>.*?)"', re.S)
        dates_1 = obj_1.finditer(response.text)
        dates_2 = obj_2.finditer(response.text)
        dates_3 = obj_3.finditer(response.text)
        item = WorkDataItem()

        for data in dates_3:
            if len(data.group("city")) > 20:
                continue
            item['city'] = data.group("city")
            break

        for date_2 in dates_2:
            if date_2.group("unit") == "":
                break
            item['industry'] = date_2.group("name") + "(" + date_2.group("unit") + ")"
            item['city'] = item['city']
            ls = []
            i = 0
            for date_1 in dates_1:
                ls.append(date_1.group("data"))
                i += 1
                if i >= 10:
                    break
            item['list_2021'] = ls[0]
            item['list_2020'] = ls[1]
            item['list_2019'] = ls[2]
            item['list_2018'] = ls[3]
            item['list_2017'] = ls[4]
            item['list_2016'] = ls[5]
            item['list_2015'] = ls[6]
            item['list_2014'] = ls[7]
            item['list_2013'] = ls[8]
            item['list_2012'] = ls[9]
            item['exp'] = (date_2.group("exp"))
            yield item
