#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2024/9/11 15:26
# @Author  : 王凯
# @File    : gs_data_stats_spider.py
# @Project : scrapy_spider
import json
import time
from typing import Any, Iterable

import scrapy
from scrapy.http import Response

from apps.data_stats.data_stats.items import NetDataStatsItem
from components.component.scrapy_redis_custom.utils import bytes_to_str
from components.component.scrapy_redis_task.spiders import RedisTaskSpider
from utils.tools import urlencode, parse_url_params, run_mul


class AllDataStatsSpider(RedisTaskSpider):
    name = "all_data_stats"
    dbcode_mapping = {
        # "hgnd": "年度数据",
        "hgjd": "季度数据",
        "hgyd": "月度数据",
        # "fsnd": "分省年度",
        # "fsjd": "分省季度",
        # "fsyd": "分省月度",
    }

    def __init__(self, **kwargs: Any):
        super().__init__(**kwargs)

    def spider_close(self):
        pass

    def spider_idle(self):
        datas = self.fetch_data(self.redis_key, 1)
        for data in datas:
            task = json.loads(bytes_to_str(data, self.redis_encoding))
            req = scrapy.Request(
                url="https://data.stats.gov.cn/easyquery.htm?cn=A01",
                callback=self.parse_run_one,
                cb_kwargs=task,
                dont_filter=True
            )
            self.logger.info(f"[获取到任务(剩余 {self.count_size(self.redis_key)} 个)] -> {task} ")
            self.crawler.engine.crawl(req)

    def start_requests(self):
        for key in self.dbcode_mapping.keys():
            url = "https://data.stats.gov.cn/easyquery.htm"
            data = {"id": "zb", "dbcode": key, "wdcode": "zb", "m": "getTree"}
            yield scrapy.FormRequest(
                url=url,
                method="POST",
                formdata=data,
                callback=self.parse_zb_list
            )

    def parse(self, response: Response, **kwargs: Any) -> Any:
        pass

    def parse_zb_list(self, response, **kwargs):
        datas = response.json()
        url = "https://data.stats.gov.cn/easyquery.htm"
        root_url, params = parse_url_params(response.request.body.decode())
        for i in datas:
            name = kwargs.get("name", []) + [f'{i.get("name")}']
            if i.get("isParent"):
                self.logger.info(f"找到子类 「{name}」")
                yield scrapy.FormRequest(
                    url, formdata={**params, **{"id": i.get("id")}},
                    method="POST",
                    callback=self.parse_zb_list, cb_kwargs={"name": name}
                )
            else:
                # if i.get('pid') in [
                #     'A020L',
                #     'A020M',
                #     "A020N",
                #     "A020O",
                #     "A04",
                #     "A0301",
                #     "A0209",
                #     "A0B",
                #     "A020S",
                #     "A020R",
                # ] or i.get('id') in [
                #     "A020A",
                #     "A020I",
                #     "A020J",
                # ]:
                self.logger.info(f"准备爬取 「{name}」{i}")
                new_task = {
                    "valuecode": i['id'],
                    "dbcode": params['dbcode'],
                    "name": name
                }
                self.server.zadd(self.redis_key, {json.dumps(new_task, ensure_ascii=False): 0})

    def parse_run_one(self, response: Response, **kwargs: Any) -> Any:
        url = "https://data.stats.gov.cn/easyquery.htm"
        params = {
            "m": "QueryData",
            "dbcode": kwargs["dbcode"],
            "rowcode": "zb",
            "colcode": "sj",
            "wds": "[]",
            "dfwds": json.dumps([{"wdcode": "zb", "valuecode": kwargs["valuecode"]}]),
            "k1": f"{int(time.time() * 1000)}",
            "h": "1"
        }
        yield scrapy.Request(url=url + "?" + urlencode(params), callback=self.parse_data, cb_kwargs=kwargs)

    def parse_data(self, response, **kwargs):
        yield from self.parse_detail(response, **kwargs)
        url = "https://data.stats.gov.cn/easyquery.htm"
        params = {
            "m": "QueryData",
            "dbcode": kwargs["dbcode"],
            "rowcode": "zb",
            "colcode": "sj",
            "wds": "[]",
            "dfwds": "[{\"wdcode\":\"sj\",\"valuecode\":\"LAST13\"}]",
            "k1": f"{int(time.time() * 1000)}",
        }
        yield scrapy.Request(url=url + "?" + urlencode(params), callback=self.parse_detail, cb_kwargs=kwargs)

    def parse_detail(self, response, **kwargs):
        name = kwargs["name"]
        resp = response.json()
        data_nodes = resp["returndata"]["datanodes"]
        wd_nodes = resp["returndata"]["wdnodes"]

        table_datas = []

        for data in data_nodes:
            item = data['data']
            for wd in data.get("wds"):
                item.update({wd['wdcode']: wd['valuecode']})
            table_datas.append(item)

        table_headers = {}
        for node in wd_nodes:
            wd_code = node["wdcode"]
            if wd_code == "sj":
                continue
            nodes = node["nodes"]
            nodes_mapping = {i["code"]: i for i in nodes}
            table_headers[wd_code] = nodes_mapping

        table_datas_result = []
        for item in table_datas:
            for k, v in table_headers.items():
                if k in item:
                    tag_detail = v[item[k]]
                    item.update(tag_detail)
            table_datas_result.append(item)

        data_type = self.dbcode_mapping.get(kwargs["dbcode"])
        for i in table_datas_result:
            date_string = i["sj"]
            if '年度' in data_type:
                date_string = date_string + "-12-31"
            elif '季度' in data_type:
                date_string = date_string.replace("A", "Q1").replace("B", "Q2").replace("C", "Q3").replace("D", "Q4")
            item_dict = {
                "area": "全国",
                "tag_name": i['cname'],
                "num_string": i["strdata"] if i["hasdata"] else None,
                "unit": i['unit'],
                "num": i["data"] if i["hasdata"] else None,
                "date_string": date_string,
                "data_type": data_type,
                "source": "国家统计局",
            }
            for idx, cate in enumerate(name, start=1):
                item_dict[f"cate_{idx}"] = cate
            yield NetDataStatsItem(**item_dict)


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl all_data_stats".split())


if __name__ == "__main__":
    from argparse import ArgumentParser

    parser = ArgumentParser(description="命令运行爬虫，此命令慎重修改")

    parser.add_argument("--worker", type=int, default=1, help="进程数量")

    run_args = parser.parse_args()

    if run_args.worker:
        run_mul(max_workers=run_args.worker, target=run)
