#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2024/9/10 11:16
# @Author  : 王凯
# @File    : hunan_grade.py
# @Project : scrapy_spider

import datetime
import re

import scrapy

from apps.creadit_grade_a.creadit_grade_a.items import NetCreditGradeAItem
from utils.tools import parse_url_params


class HunanGradeSpider(scrapy.Spider):
    name = "hunan_grade"
    province = "湖南"
    url = "http://hunan.chinatax.gov.cn/taxpayercreditsearch/20190413003983"
    Request = scrapy.Request
    FormRequest = scrapy.FormRequest

    def start_requests(self):
        url = "https://hunan.chinatax.gov.cn/taxpayercreditsearchgetdata"
        year_list = [i for i in range(datetime.datetime.now().year - 2, datetime.datetime.now().year)]
        year_list.reverse()
        area_code = ""
        for year in year_list:
            data = {
                "page": "1",
                "limit": "10000",
                "taxpayer_number": "",
                "taxpayer_name": "",
                "year": str(year),
                "siteName": area_code,
                "_csrf": '',
            }
            yield self.FormRequest(url, formdata=data, callback=self.parse_page, cb_kwargs={"page": 1})

    def parse_page(self, response, **kwargs):
        yield from self.parse_detail(response, **kwargs)
        url = "https://hunan.chinatax.gov.cn/taxpayercreditsearchgetdata"
        result = response.json()
        page_count = result.get("pagecount")
        root_url, request_data = parse_url_params(response.request.body.decode())
        page = kwargs.get("page")
        if page <= page_count:
            self.logger.info(f"爬取进度 :{page}/{page_count}")
            data = {
                "page": f"{page + 1}",
                "limit": request_data.get("limit"),
                "taxpayer_number": "",
                "taxpayer_name": "",
                "year": request_data.get("year"),
                "siteName": request_data.get("siteName"),
                "_csrf": '',
            }
            yield self.FormRequest(url, formdata=data, callback=self.parse_page, cb_kwargs={"page": page + 1})

    def parse_detail(self, response, **kwargs):
        datas = response.json().get("data")
        for data in datas:
            item = NetCreditGradeAItem()
            item.taxpayer_id = re.sub(r"\s+", "", data.get("taxpayerNumber"))
            item.company_name = re.sub(r"\s+", "", data.get("taxpayerName"))
            item.year = re.sub(r"\s+", "", data.get("year"))
            item.province = self.province
            yield item


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl hunan_grade".split())


if __name__ == "__main__":
    run()
