#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/26 11:05
# @Author  : 王凯
# @File    : dalian_grade.py
# @Project : spider-man
import datetime

import scrapy

from apps.creadit_grade_a.creadit_grade_a.items import NetCreditGradeAItem


class DalianGradeSpider(scrapy.Spider):
    province = "大连"
    name = "dalian_grade"
    url = "https://etax.dalian.chinatax.gov.cn/nsr/wzdk/toAnsrxydjcx?0.532704183722605"
    Request = scrapy.Request
    FormRequest = scrapy.FormRequest

    def start_requests(self):
        url = "https://etax.dalian.chinatax.gov.cn/nsr/wzdk/getYxndList?"
        yield self.Request(url)

    def parse(self, response, **kwargs):
        years_list = [str(i) for i in range(datetime.datetime.now().year - 2, datetime.datetime.now().year)]
        for year in years_list:
            url = "https://etax.dalian.chinatax.gov.cn/nsr/wzdk/getAxycx"
            data = {"nsrmc": "", "nsrsbh": "", "pdnd": year, "swjgDm": "", "pageSize": "15", "pageNum": "1"}
            yield self.FormRequest(url, formdata=data, method="POST", callback=self.parse_page, cb_kwargs={"year": year})

    def parse_page(self, response, **kwargs):
        yield from self.parse_detail(response, **kwargs)
        response_json = response.json()
        response_json_data = response_json.get("data")
        all_total = response_json_data.get("total")
        remainder = all_total % 15
        if remainder == 0:
            max_page = all_total // 15 + 1
        else:
            max_page = all_total // 15 + 2
        year = kwargs.get("year")
        for page in range(2, max_page):
            url = "https://etax.dalian.chinatax.gov.cn/nsr/wzdk/getAxycx"
            data = {
                "nsrmc": "",
                "nsrsbh": "",
                "pdnd": year,
                "swjgDm": "",
                "pageSize": "15",
                "pageNum": str(page),
            }
            yield self.FormRequest(url, formdata=data, method="POST", callback=self.parse_detail)

    def parse_detail(self, response, **kwargs):
        response_json = response.json()
        response_json_data = response_json.get("data")
        datas = response_json_data.get("rows", [])
        for data in datas:
            item = NetCreditGradeAItem()
            item.taxpayer_id = data.get("nsrsbh")
            item.company_name = data.get("nsrmc")
            item.year = data.get("pdNd")
            item.province = self.province
            yield item


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl dalian_grade".split())


if __name__ == "__main__":
    run()
