#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/25 14:14
# @Author  : 王凯
# @File    : guangdong_grade.py
# @Project : spider-man
import datetime

import scrapy

from apps.creadit_grade_a.creadit_grade_a.items import NetCreditGradeAItem
from utils.tools import urlencode


class GuangdongGradeSpider(scrapy.Spider):
    name = "guangdong_grade"
    province = "广东"
    url = "https://guangdong.chinatax.gov.cn/gdsw/InitCredit/InitCredit.shtml"
    Request = scrapy.Request
    FormRequest = scrapy.FormRequest

    def start_requests(self):
        url = "https://guangdong.chinatax.gov.cn/gdsw/InitCredit/InitCredit.shtml"
        yield self.Request(url, callback=self.parse_nd)

    def parse_nd(self, response, **kwargs):
        url = "https://guangdong.chinatax.gov.cn/siteapps/webpage/gdtax/nsrxyaj/nsrxy_a.jsp"
        for nd in range(datetime.datetime.now().year - 2, datetime.datetime.now().year):
            data = {"nsrsbh": "", "nsrmc": "", "year": f"{nd}", "city": "", "pageNo": "1"}
            yield self.Request(url + "?" + urlencode(data), callback=self.parse_page, cb_kwargs={"nd": nd})

    def parse_page(self, response, **kwargs):
        yield from self.parse_detail(response, **kwargs)
        total_count = response.json()[0].get("pages")
        url = "https://guangdong.chinatax.gov.cn/siteapps/webpage/gdtax/nsrxyaj/nsrxy_a.jsp"
        if total_count:
            for page in range(2, int(total_count) + 1):
                nd = kwargs.get("nd")
                data = {"nsrsbh": "", "nsrmc": "", "year": f"{nd}", "city": "", "pageNo": f"{page}"}
                yield self.Request(url + "?" + urlencode(data), callback=self.parse_detail)

    def parse_detail(self, response, **kwargs):
        datas = response.json()[0].get("data")
        for data in datas:
            item = NetCreditGradeAItem()
            item.taxpayer_id = data.get("NSRSBH")
            item.company_name = data.get("NSRMC")
            item.year = data.get("PDND")
            item.province = self.province
            yield item


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl guangdong_grade".split())


if __name__ == "__main__":
    run()
