#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/25 15:04
# @Author  : 王凯
# @File    : yunan_grade.py
# @Project : spider-man
import datetime

import scrapy

from apps.creadit_grade_a.creadit_grade_a.items import NetCreditGradeAItem
from utils.tools import urlencode, parse_url_params


class YuNanGradeSpider(scrapy.Spider):
    name = "yunan_grade"
    province = "云南"
    url = "https://tianjin.chinatax.gov.cn/wzcx/nsrxydjACx_init.action"
    Request = scrapy.Request

    def start_requests(self):
        url = "https://yunnan.chinatax.gov.cn/ynswjajcx/ajnsr/query.do"
        yield self.Request(url, callback=self.parse_nd)

    def parse_nd(self, response, **kwargs):
        nd_list = response.xpath('//select[@id="evalyear"]/option/@value').getall()
        url = "https://yunnan.chinatax.gov.cn/ynswjajcx/ajnsr/queryResult"
        for nd in nd_list:
            if str(nd) in [str(i) for i in range(datetime.datetime.now().year - 1, datetime.datetime.now().year + 1)]:
                self.logger.info(nd)
                data = {"nsrmc": "", "pjnd": f"{nd}", "nsrsbh": "", "pageNo": "1"}
                yield self.Request(url + "?" + urlencode(data), callback=self.parse_page)

    def parse_page(self, response, **kwargs):
        yield from self.parse_detail(response, **kwargs)
        total_page = response.json().get("params").get("pageSum")
        root_url, params = parse_url_params(response.request.url)
        if total_page:
            for page in range(2, int(total_page) + 1):
                data = {"nsrmc": "", "pjnd": params.get("pjnd"), "nsrsbh": "", "pageNo": f"{page}"}
                yield self.Request(
                    root_url + "?" + urlencode(data), callback=self.parse_detail, dont_filter=True,
                )

    def parse_detail(self, response, **kwargs):
        datas = response.json().get("params").get("data")
        root_url, params = parse_url_params(response.request.url)
        self.logger.info(f"""currpage {params.get('pjnd')} {params.get("pageNo", "1")}""")
        for data in datas:
            # tds = data.xpath('./td[@class="blue14"]/text()').getall()
            item = NetCreditGradeAItem()
            item.taxpayer_id = data.get("nsrsbh")
            item.company_name = data.get("nsrmc")
            item.year = data.get("pd_ND")
            item.province = self.province
            yield item


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl yunan_grade".split())


if __name__ == "__main__":
    run()
