#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/22 14:05
# @Author  : 王凯
# @File    : hebei_grade.py
# @Project : spider-man

import datetime
import json
import time

import scrapy

from apps.creadit_grade_a.creadit_grade_a.items import NetCreditGradeAItem
from utils.tools import urlencode, parse_url_params


class HebeiGradeSpider(scrapy.Spider):
    name = "hebei_grade"
    province = "河北"
    url = "https://etax.hebei.chinatax.gov.cn/yhs-web/cxzx/index.html#/xydjalnsrcx_new"
    Request = scrapy.Request
    FormRequest = scrapy.FormRequest
    headers = {
        "Accept": "*/*",
        "Accept-Language": "zh-CN,zh;q=0.9,zu;q=0.8,be;q=0.7,en;q=0.6",
        "Cache-Control": "no-cache",
        "Connection": "keep-alive",
        "Content-Type": "application/json;charset=UTF-8",
        "Origin": "https://etax.hebei.chinatax.gov.cn",
        "Pragma": "no-cache",
        "Referer": "https://etax.hebei.chinatax.gov.cn/yhs-web/cxzx/index.html?id=254&code=ajnsxyjbcx&_=1672715215676",
        "Sec-Fetch-Dest": "empty",
        "Sec-Fetch-Mode": "cors",
        "Sec-Fetch-Site": "same-origin",
        "X-Requested-With": "XMLHttpRequest",
        "sec-ch-ua": "\"Chromium\";v=\"124\", \"Google Chrome\";v=\"124\", \"Not-A.Brand\";v=\"99\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "\"Windows\""
    }
    custom_settings = {
        "CONCURRENT_REQUESTS": 1
    }

    def start_requests(self):
        url = "https://etax.hebei.chinatax.gov.cn/yhs-web/api/nsxy/pjnd/list"
        yield self.Request(url, callback=self.parse, headers=self.headers)

    def parse(self, response, **kwargs):
        years_list = response.json().get("value")
        url = "https://etax.hebei.chinatax.gov.cn/yhs-web/api/xydj/nsrcx/query/xzqh"
        yield self.Request(url, callback=self.parse_area, cb_kwargs={"years_list": years_list}, headers=self.headers)

    def parse_area(self, response, **kwargs):
        url = "https://etax.hebei.chinatax.gov.cn/yhs-web/api/xydj/nsrcx/query/xydjalnsrcx"
        res_value = response.json().get("value")
        if res_value:
            city_list = res_value.get("result")
            years_list = kwargs.get('years_list')
            for city_ in city_list:
                for year in years_list:
                    if str(year) in [str(i) for i in range(datetime.datetime.now().year - 1, datetime.datetime.now().year + 1)]:
                        sjxzqhDm = city_.get("sjxzqhDm")
                        sjxzqhMc = city_.get("sjxzqhMc")
                        if sjxzqhMc and sjxzqhDm:
                            data = {
                                "pageIndex": 1,
                                "pageSize": 40,
                                "pjnd": year,
                                "sjxzqhDm": sjxzqhDm,
                                "sjxzqhMc": sjxzqhMc,
                                "yzm": '',
                            }
                            data = {k: str(v) if isinstance(v, int) else v for k, v in data.items()}
                            yield scrapy.FormRequest(
                                url=url, body=json.dumps(data), method="POST", callback=self.parse_page, cb_kwargs=dict(area=sjxzqhMc),
                                headers={**self.headers, "Content-Type": "application/json", }
                            )

    def parse_page(self, response, **kwargs):
        request_data = json.loads(response.request.body.decode())
        yield from self.parse_detail(response, **kwargs)
        response_json = response.json()
        response_json_value = response_json.get("value") or {}
        all_total = response_json_value.get("total") or kwargs.get("total")
        if all_total:
            remainder = all_total % 40
            if remainder == 0:
                max_page = all_total // 40 + 1
            else:
                max_page = all_total // 40 + 2
            page_index = int(request_data.get("pageIndex"))
            area = kwargs.get("area")
            url = "https://etax.hebei.chinatax.gov.cn/yhs-web/api/xydj/nsrcx/query/list"
            if page_index < max_page:
                data = {
                    "pageIndex": page_index + 1,
                    "pageSize": 40,
                    "pjnd": request_data.get("pjnd"),
                    "sjxzqhDm": request_data.get("sjxzqhDm"),
                    "sjxzqhMc": request_data.get("sjxzqhMc"),
                    "yzm": '',
                }
                data = {k: str(v) if isinstance(v, int) else v for k, v in data.items()}
                yield scrapy.FormRequest(
                    url=url, body=json.dumps(data), method="POST", callback=self.parse_page, cb_kwargs=dict(area=area, total=all_total),
                    headers={**self.headers, "Content-Type": "application/json", }
                )

    def parse_detail(self, response, **kwargs):
        response_json = response.json()
        response_json_value = response_json.get("value", {})
        if response_json_value:
            datas = response_json_value.get("result", [])
            for data in datas:
                item = NetCreditGradeAItem()
                item.taxpayer_id = data.get("nsrsbh")
                item.company_name = data.get("nsrmc")
                item.year = data.get("pjnd")
                item.province = self.province
                yield item


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl hebei_grade".split())


if __name__ == "__main__":
    run()
