#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/25 15:16
# @Author  : 王凯
# @File    : shaanxi_grade.py
# @Project : spider-man
import datetime
import re

import scrapy

from apps.creadit_grade_a.creadit_grade_a.items import NetCreditGradeAItem
from utils.tools import urlencode, parse_url_params


class ShaanxiGradeSpider(scrapy.Spider):
    name = "shaanxi_grade"
    province = "陕西"
    url = "https://shaanxi.chinatax.gov.cn/sxswjajcx/ajnsr/query.do"
    Request = scrapy.Request

    def start_requests(self):
        url = "https://shaanxi.chinatax.gov.cn/sxswjajcx/ajnsr/query.do"
        yield self.Request(url, callback=self.parse_nd)

    def parse_nd(self, response, **kwargs):
        nd_list = response.xpath('//ul[@class="pjnd-list"]/li/text()').getall()
        url = "https://shaanxi.chinatax.gov.cn/sxswjajcx/ajnsr/queryResult.do"
        for nd in nd_list:
            if str(nd[:4]) in [str(i) for i in range(datetime.datetime.now().year - 1, datetime.datetime.now().year + 1)]:
                data = {"pjnd": f"{nd[:4]}", "pageNo": "1"}
                yield self.Request(url + "?" + urlencode(data), callback=self.parse_page)

    def parse_page(self, response, **kwargs):
        root_url, request_params = parse_url_params(response.request.url)
        yield from self.parse_detail(response, **kwargs)
        total_page = response.json().get("params").get("pageSum")
        if total_page:
            for page in range(2, int(total_page) + 1):
                data = {"pjnd": f"{request_params.get('pjnd')}", "pageNo": page}
                yield self.Request(root_url + "?" + urlencode(data), callback=self.parse_detail)

    def parse_detail(self, response, **kwargs):
        root_url, request_params = parse_url_params(response.request.url)
        datas = response.json().get("params")
        if datas:
            self.logger.info(f"""{"currpage", f"{int(request_params.get('pjnd'))}", request_params.get("pageNo", "1")}""")
            for data in datas.get("dataList"):
                item = NetCreditGradeAItem()
                item.taxpayer_id = re.sub(r"\s+", "", data.get("nsrsbh"))
                item.company_name = re.sub(r"\s+", "", data.get("nsrmc"))
                item.year = re.sub(r"\s+", "", data.get("pd_ND"))
                item.province = self.province
                yield item


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl shaanxi_grade".split())


if __name__ == "__main__":
    run()
