#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/25 13:41
# @Author  : 王凯
# @File    : jiangsu_grade.py
# @Project : spider-man
import datetime

import scrapy

from apps.creadit_grade_a.creadit_grade_a.items import NetCreditGradeAItem
from utils.tools import urlencode, parse_url_params


class JiangsuGradeSpider(scrapy.Spider):
    name = "jiangsu_grade"
    province = "江苏"
    url = "https://etax.jiangsu.chinatax.gov.cn/portal-web/include?appbh=1034"
    Request = scrapy.Request
    FormRequest = scrapy.FormRequest

    def start_requests(self):
        url = "https://etax.jiangsu.chinatax.gov.cn/portal/queryapi/commonPage.do?sign=query_ggcx_ajnsr"
        yield self.Request(url)

    def parse(self, response, **kwargs):
        url = "https://etax.jiangsu.chinatax.gov.cn/portal/queryapi/query.do"
        data = {"req_type": "commonQuery", "action": "queryAjNsrSwjgdm"}
        yield self.FormRequest(url, formdata=data, method="POST", callback=self.parse_area)

    def parse_area(self, response, **kwargs):
        res_json = response.json()
        res_json_data = res_json.get("DATA")
        for year in range(datetime.datetime.now().year - 1, datetime.datetime.now().year + 1):
            for d_ in res_json_data:
                swjgdm = d_.get("swjg_dm")
                area = d_.get("swjgmc")
                # if (int(year)!=2021 or area!= '南京市'):
                #     continue
                url = "https://etax.jiangsu.chinatax.gov.cn/portal/queryapi/query.do"
                data = {
                    "req_type": "commonQuery",
                    "action": "queryAjNsr",
                    "nsrmc": "",
                    "nsrsbh": "",
                    "year": str(year),
                    "page": "1",
                    "swjgdm": swjgdm,
                }
                yield self.FormRequest(url, formdata=data, method="POST", callback=self.parse_page, cb_kwargs=dict(area=area))

    def parse_page(self, response, **kwargs):
        yield from self.parse_detail(response, **kwargs)
        root_url, request_data = parse_url_params(response.request.body.decode())
        resp = response.json()
        area = kwargs.get('area')
        all_total = resp.get("total")
        # logger.info(all_total)
        remainder = all_total % 15
        if remainder == 0:
            max_page = all_total // 15 + 1
        else:
            max_page = all_total // 15 + 2
        # logger.info(max_page)
        url = "https://etax.jiangsu.chinatax.gov.cn/portal/queryapi/query.do"
        for page in range(2, max_page):
            data_1 = {
                "req_type": "commonQuery",
                "action": "queryAjNsr",
                "nsrmc": "",
                "nsrsbh": "",
                "year": request_data.get("year"),
                "page": str(page),
                "swjgdm": request_data.get("swjgdm"),
            }
            yield self.FormRequest(url, formdata=data_1, method="POST", callback=self.parse_detail, cb_kwargs=dict(area=area))

    def parse_detail(self, response, **kwargs):
        datas = response.json().get("rows", [])
        root_url, request_data = parse_url_params(response.request.body.decode())
        if datas:
            for data in datas:
                item = NetCreditGradeAItem()
                item.taxpayer_id = data.get("nsrsbh")
                item.company_name = data.get("nsrmc")
                item.year = data.get("pd_nd")
                item.province = self.province
                yield item


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl jiangsu_grade".split())


if __name__ == "__main__":
    run()
