#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/26 11:14
# @Author  : 王凯
# @File    : shenzhen_grade.py
# @Project : spider-man

import datetime
import random
import re

import scrapy

from apps.creadit_grade_a.creadit_grade_a.items import NetCreditGradeAItem
from utils.tools import parse_url_params


class ShenZhenGradeSpider(scrapy.Spider):
    name = "shenzhen_grade"
    province = "深圳"
    url = "https://shenzhen.chinatax.gov.cn/sztaxapp/NsxySearch/index"
    Request = scrapy.Request
    FormRequest = scrapy.FormRequest

    def start_requests(self):
        url = "https://shenzhen.chinatax.gov.cn/sztaxapp/NsxySearch/index"
        yield self.Request(url, callback=self.parse)

    def parse(self, response, **kwargs):
        pjyear = response.xpath('//*[@id="pjyear"]/option/@value').getall()
        url = f"https://shenzhen.chinatax.gov.cn/sztaxapp/NsxySearch/index?time={random.random()}"
        for year in pjyear:
            if str(year) in [str(i) for i in range(datetime.datetime.now().year - 1, datetime.datetime.now().year + 1)]:
                data = {"currentPage": "1", "zgswjg": "", "nsrsbh": "", "nsrmc": "", "pjyear": str(year), "token": ""}
                yield self.FormRequest(url, formdata=data, method="POST", callback=self.parser_next_page)

    def parser_next_page(self, response, **kwargs):
        root_url, request_data = parse_url_params(response.request.body.decode())
        contentList = response.xpath('//*[@id="contentList"]/tr')
        if contentList:
            yield from self.parse_detail(response, **kwargs)
            max_page_list = re.findall(r"共\D*(\d*)\D*页", response.text, flags=re.S)
            if max_page_list:
                int_max_page = int(max_page_list[0])
                for page in range(2, int_max_page + 1):
                    data = {
                        "currentPage": page,
                        "zgswjg": "",
                        "nsrsbh": "",
                        "nsrmc": "",
                        "pjyear": request_data.get("pjyear"),
                        "token": "",
                    }
                    data = {k: str(v) if isinstance(v, int) else v for k, v in data.items()}
                    url = f"https://shenzhen.chinatax.gov.cn/sztaxapp/NsxySearch/index?time={random.random()}"
                    yield self.FormRequest(url, formdata=data, method="POST", callback=self.parse_detail)

    def parse_detail(self, response, **kwargs):
        contentList = response.xpath('//*[@id="contentList"]/tr')
        if contentList:
            for tag_tr in contentList:
                company_name = tag_tr.xpath("./td[2]/text()").get()
                company_name = re.sub(r"\s", "", company_name)
                taxpayer_id = tag_tr.xpath("./td[1]/text()").get()
                taxpayer_id = re.sub(r"\s", "", taxpayer_id)
                year = tag_tr.xpath("./td[3]/text()").get()
                year = re.sub(r"\s", "", year)
                if all([taxpayer_id, year]):
                    item = NetCreditGradeAItem()
                    item.taxpayer_id = taxpayer_id
                    item.company_name = company_name
                    item.year = year
                    item.province = self.province
                    yield item


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl shenzhen_grade".split())


if __name__ == "__main__":
    run()
