#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/25 14:08
# @Author  : 王凯
# @File    : henan_grade.py
# @Project : spider-man
import datetime

import scrapy

from apps.creadit_grade_a.creadit_grade_a.items import NetCreditGradeAItem
from utils.tools import parse_url_params


class HenanGradeSpider(scrapy.Spider):
    name = "henan_grade"
    Request = scrapy.Request
    FormRequest = scrapy.FormRequest
    province = "河南"
    url = "https://etax.henan.chinatax.gov.cn/web/dzswj/taxclient/main_gzfw.html?PARAM=1005"

    def start_requests(self):
        url = "https://etax.henan.chinatax.gov.cn/web/dzswj/taxclient/ggfw/xydjAjnsrcx.html"
        yield self.Request(url, callback=self.parse_nd)

    def parse_nd(self, response, **kwargs):
        nd_list = response.xpath('//div[@class="layui-input-block"]//option/@value').getall()
        url = "https://etax.henan.chinatax.gov.cn/web/public/gzfw/ggcx/selectNSRXYDJ.do"
        for nd in nd_list:
            if str(nd) in [str(i) for i in range(datetime.datetime.now().year - 1, datetime.datetime.now().year + 1)]:
                data = {
                    "PDND": f"{nd}",
                    "rcode": "",
                    "NSRSBH": "",
                    "NSRMC": "",
                    "SWJGDM": "",
                    "PAGE": "1",
                    "CUR_USERID": "-1",
                }
                yield self.FormRequest(url, formdata=data, method="POST", callback=self.parse_page)

    def parse_page(self, response, **kwargs):
        root_url, request_data = parse_url_params(response.request.body.decode())
        yield from self.parse_detail(response, **kwargs)
        total_count = response.json().get("data")[0].get("TOTAL")
        self.logger.info(f"""{request_data.get("PDND"), total_count}""")
        if total_count:
            total_page = int(total_count) // 15 + 1
            for page in range(2, int(total_page) + 1):
                nd = request_data.get("PDND")
                data = {
                    "PDND": f"{nd}",
                    "rcode": "",
                    "NSRSBH": "",
                    "NSRMC": "",
                    "SWJGDM": "",
                    "PAGE": f"{page}",
                    "CUR_USERID": "-1",
                }
                # print('currpage', f"{nd, page}")
                yield self.FormRequest(
                    response.request.url, formdata=data, method="POST", callback=self.parse_detail,
                )

    def parse_detail(self, response, **kwargs):
        datas = response.json().get("data")
        for data in datas:
            item = NetCreditGradeAItem()
            item.taxpayer_id = data.get("NSRSBH")
            item.company_name = data.get("NSRMC")
            item.year = data.get("PDND")
            item.province = self.province
            yield item


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl henan_grade".split())


if __name__ == "__main__":
    run()
