#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/25 14:17
# @Author  : 王凯
# @File    : guangxi_grade.py
# @Project : spider-man
import datetime

import scrapy

from apps.creadit_grade_a.creadit_grade_a.items import NetCreditGradeAItem
from utils.tools import urlencode


class GuangXiGradeSpider(scrapy.Spider):
    name = "guangxi_grade"
    province = "广西"
    url = "https://etax.guangxi.chinatax.gov.cn:9723/web/dzswj/taxclient/ggfw/nsxyAjnsrmdggl.html"
    Request = scrapy.Request
    FormRequest = scrapy.FormRequest

    def start_requests(self):
        url = "https://etax.guangxi.chinatax.gov.cn:9723/web/dzswj/taxclient/ggfw/nsxyAjnsrmdggl.html"
        yield self.Request(url, callback=self.parse_nd)

    def parse_nd(self, response, **kwargs):
        url = "https://etax.guangxi.chinatax.gov.cn:9724//taxclient/wssq/cxtj/selectNSXY_GGL_CX.do"
        for nd in range(datetime.datetime.now().year - 2, datetime.datetime.now().year):
            data = {"PJND": f"{nd}", "NSRSBH_XYDJ": "", "NSRMC": "", "page": "1", "limit": "100"}
            yield self.Request(url + "?" + urlencode(data), callback=self.parse_page, cb_kwargs={"nd": nd})

    def parse_page(self, response, **kwargs):
        yield from self.parse_detail(response, **kwargs)
        url = "https://etax.guangxi.chinatax.gov.cn:9724//taxclient/wssq/cxtj/selectNSXY_GGL_CX.do"
        total_count = response.json().get("count")
        if total_count:
            total_page = int(total_count) // 100 + 1
            for page in range(2, int(total_page) + 1):
                nd = kwargs.get("nd")
                data = {"PJND": f"{nd}", "NSRSBH_XYDJ": "", "NSRMC": "", "page": f"{page}", "limit": "100"}
                yield self.Request(url + "?" + urlencode(data), callback=self.parse_detail)

    def parse_detail(self, response, **kwargs):
        datas = response.json().get("data")
        for data in datas:
            item = NetCreditGradeAItem()
            item.taxpayer_id = data.get("NSRSBH")
            item.company_name = data.get("NSRMC")
            item.year = data.get("PD_ND")
            item.province = self.province
            yield item


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl guangxi_grade".split())


if __name__ == "__main__":
    run()
