#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/25 14:11
# @Author  : 王凯
# @File    : hubei_grade.py
# @Project : spider-man
import datetime
import re

import scrapy

from apps.creadit_grade_a.creadit_grade_a.items import NetCreditGradeAItem
from utils.tools import parse_url_params


class HubeiGradeSpider(scrapy.Spider):
    name = "hubei_grade"
    province = "湖北"
    url = "http://hubei.chinatax.gov.cn/hbsw/nsxyA.html"
    Request = scrapy.Request
    FormRequest = scrapy.FormRequest

    def start_requests(self):
        url = "https://etax.hubei.chinatax.gov.cn/webroot/nsfw/fwdt/gzcx/nsxyztcxQg.jsp"
        yield self.Request(url)

    def parse(self, response, **kwargs):
        url = "https://etax.hubei.chinatax.gov.cn/webroot/gzcxAction.do?method=nsxyztcx"
        year_list = [i for i in range(datetime.datetime.now().year - 1, datetime.datetime.now().year + 1)]
        for year in year_list:
            data = {"page": "1", "limit": "10", "pjnd": f"{year}", "nsrsbh": "", "nsrmc": "", "xydj": "A"}
            yield self.FormRequest(url, formdata=data, method="POST", callback=self.parse_page)

    def parse_page(self, response, **kwargs):
        root_url, request_data = parse_url_params(response.request.body.decode())
        url = "https://etax.hubei.chinatax.gov.cn/webroot/gzcxAction.do?method=nsxyztcx"
        yield from self.parse_detail(response, **kwargs)
        res = response.json()
        total_page = res.get("count")
        total_page = (int(total_page) // 10) + 1
        for page in [i for i in range(2, total_page + 1)]:
            data = {
                "page": f"{page}",
                "limit": "10",
                "pjnd": request_data.get("pjnd"),
                "nsrsbh": "",
                "nsrmc": "",
                "xydj": "A",
            }
            yield self.FormRequest(
                url, formdata=data, method="POST", callback=self.parse_detail
            )

    def parse_detail(self, response, **kwargs):
        datas = response.json().get("data")
        for data in datas:
            item = NetCreditGradeAItem()
            item.taxpayer_id = re.sub(r"\s+", "", data.get("NSRSBH"))
            item.company_name = re.sub(r"\s+", "", data.get("NSRMC"))
            item.year = re.sub(r"\s+", "", data.get("PD_ND"))
            item.province = self.province
            yield item


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl hubei_grade".split())


if __name__ == "__main__":
    run()
