#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/25 13:48
# @Author  : 王凯
# @File    : fujian_grade.py
# @Project : spider-man
import datetime
import json

import scrapy

from apps.creadit_grade_a.creadit_grade_a.items import NetCreditGradeAItem


class FujianGradeSpider(scrapy.Spider):
    name = "fujian_grade"
    province = "福建"
    url = "https://etax.fujian.chinatax.gov.cn/grbsm-web/tax-credit-a-list?type=3"
    Request = scrapy.Request
    FormRequest = scrapy.FormRequest

    def start_requests(self):
        url = "https://etax.fujian.chinatax.gov.cn/zrrssgk-grbsm/web/grbsm/nsxymd/cx"
        xzqhszDm_list = ['350100', '350600', '350500', '350400', '350300', '350700', '350800', '350900', '350128']
        for year in range(datetime.datetime.now().year - 2, datetime.datetime.now().year):
            for xzqhszDm in xzqhszDm_list:
                data = {
                    "nsrsbh": "",
                    "nsrmc": "",
                    "pdnd": f"{year}",
                    "xzqhszDm": f"{xzqhszDm}",
                    "vcode": "",
                    "vcodeUuid": "",
                    "pageIndex": 1,
                    "pageSize": 15,
                    "czlx": "2"
                }
                headers = {
                    "Content-Type": "application/json",
                    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36",
                }
                yield self.FormRequest(url, body=json.dumps(data), method="POST", headers=headers, callback=self.parse_page, cb_kwargs={"year": year, "xzqhszDm": xzqhszDm})

    def parse_page(self, response, **kwargs):
        url = "https://etax.fujian.chinatax.gov.cn/zrrssgk-grbsm/web/grbsm/nsxymd/cx"
        yield from self.parse_detail(response, **kwargs)
        total_page = (response.json().get("data") or {}).get("total")
        total_page = (int(total_page) // 15) + 1
        year = kwargs.get("year")
        xzqhszDm = kwargs.get("xzqhszDm")
        for page in [i for i in range(2, total_page + 1)]:
            data = {
                "nsrsbh": "",
                "nsrmc": "",
                "pdnd": f"{year}",
                "xzqhszDm": f"{xzqhszDm}",
                "vcode": "",
                "vcodeUuid": "",
                "pageIndex": page,
                "pageSize": 15,
                "czlx": "2"
            }
            headers = {
                "Content-Type": "application/json",
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36",
            }
            yield self.FormRequest(url, body=json.dumps(data), method="POST", headers=headers, callback=self.parse_detail)

    def parse_detail(self, response, **kwargs):
        response_json = response.json()
        response_json_value = response_json.get("data")
        datas = response_json_value.get("list", [])
        for data in datas:
            item = NetCreditGradeAItem()
            item.taxpayer_id = data.get("nsrsbh")
            item.company_name = data.get("nsrmc")
            item.year = data.get("pdnd")
            item.province = self.province
            yield item


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl fujian_grade".split())


if __name__ == "__main__":
    run()
