#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/21 17:47
# @Author  : 王凯
# @File    : zhejiang_grade.py
# @Project : spider-man
import datetime

import scrapy

from apps.creadit_grade_a.creadit_grade_a.items import NetCreditGradeAItem
from utils.tools import urlencode, parse_url_params


class ZhejiangGradeSpider(scrapy.Spider):
    name = "zhejiang_grade"
    province = "浙江"
    url = "https://etax.zhejiang.chinatax.gov.cn/zjgfdzswjdjrd/dj/ajnsrcx/dzswj_ajnsrcx.html"
    Request = scrapy.Request
    page_size = 100

    def start_requests(self):
        for year in range(datetime.datetime.now().year - 1, datetime.datetime.now().year + 1):
            url = "https://etax.zhejiang.chinatax.gov.cn/zjgfdzswjdjrd/dzzpfw/queryajxsr.do"
            params = {"nsrsbh": "", "pjnd": year, "dqsy": "", "begin": "1", "end": self.page_size, "nsrmc": ""}
            yield self.Request(url + "?" + urlencode(params), callback=self.parse)

    def parse(self, response, **kwargs):
        yield from self.parse_detail(response, **kwargs)
        datas = response.json().get("data", [])
        if datas:
            zjl = datas[0].get("ZJL")
            remainder = zjl % self.page_size
            if remainder == 0:
                max_page = zjl // self.page_size + 1
            else:
                max_page = zjl // self.page_size + 2
            url = "https://etax.zhejiang.chinatax.gov.cn/zjgfdzswjdjrd/dzzpfw/queryajxsr.do"
            root_url, request_data = parse_url_params(response.request.url)
            for page in range(2, max_page):
                params = {
                    "nsrsbh": "",
                    "pjnd": request_data.get("pjnd"),
                    "dqsy": "",
                    "begin": self.page_size * (page - 1) + 1,
                    "end": self.page_size * page,
                    "nsrmc": "",
                }
                yield self.Request(url + "?" + urlencode(params), callback=self.parse_detail)

    def parse_detail(self, response, **kwargs):
        datas = response.json().get("data", [])
        for data in datas:
            item = NetCreditGradeAItem()
            item.taxpayer_id = data.get("NSRSBH")
            item.company_name = data.get("NSRMC")
            item.year = data.get("PD_ND")
            item.province = "浙江"
            yield item


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl zhejiang_grade".split())


if __name__ == "__main__":
    run()
