#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/21 19:16
# @Author  : 王凯
# @File    : shanghai_grade.py
# @Project : spider-man
import datetime
import re
from typing import Iterable

import scrapy

from apps.creadit_grade_a.creadit_grade_a.items import NetCreditGradeAItem
from utils.tools import parse_url_params


class ShanghaiGradeSpider(scrapy.Spider):
    name = "shanghai_grade"
    province = "上海"
    url = "https://shanghai.chinatax.gov.cn/newxbwz/tycx/TYCXqynsxydjcxCtrl-gotoNsrxydjcxNew.pfv?action=2&curPage=1"
    Request = scrapy.Request
    FormRequest = scrapy.FormRequest

    def start_requests(self) -> Iterable[Request]:
        url = "https://shanghai.chinatax.gov.cn/newxbwz/tycx/TYCXqynsxydjcxCtrl-gotoNsrxydjcxNew.pfv"
        yield self.Request(url, callback=self.parse)

    def parse(self, response, **kwargs) -> Iterable[Request]:
        area_list = response.xpath('//div[contains(@onclick, "gofjlink")]')
        area_list = [i.re_first(r"gofjlink\(\'(.*?)\'\)") for i in area_list]
        url = "https://shanghai.chinatax.gov.cn/newxbwz/tycx/TYCXqynsxydjcxCtrl-gotoNsrxydjcxNew.pfv?action=2&curPage=1"
        this_year = datetime.datetime.now().year
        for nd in range(this_year - 2, this_year + 1):
            for area in area_list:
                data = {
                    "txtNsrdjh": "",
                    "txtNsrmc": "",
                    "nd": f"{nd}",
                    "login_wz_yzm": "请输入验证码",
                    "txtChangeNum": "",
                    "swjgdm": area,
                    "yzmmsg": "",
                }
                yield self.FormRequest(url, formdata=data, callback=self.parse_list)

    def parse_list(self, response, **kwargs):
        yield from self.parse_detail(response, **kwargs)
        root_url, request_data = parse_url_params(response.request.url)
        root_url, request_body_data = parse_url_params(response.request.body.decode())
        total = response.xpath('.').re_first(r"共\s*(\d+)\s*条")
        if total:
            all_page = int(total) // 15 + 1
            cur_page = request_data.get("curPage", "1")
            if int(cur_page) < all_page:
                url = f"https://shanghai.chinatax.gov.cn/newxbwz/tycx/TYCXqynsxydjcxCtrl-gotoNsrxydjcxNew.pfv?action=2&curPage={int(cur_page) + 1}"
                yield self.FormRequest(url, formdata=request_body_data, callback=self.parse_list)

    def parse_detail(self, response, **kwargs):
        rows = response.xpath('//form[@name="form1"]//table[contains(@class, "table-hover")]//tr[not(@class)]')
        for row in rows:
            if row.xpath("./td[2]"):
                item = NetCreditGradeAItem()
                item.taxpayer_id = re.sub(r"\s*", "", row.xpath("string(./td[1])").get())
                item.company_name = re.sub(r"\s*", "", row.xpath("string(./td[2])").get())
                item.year = re.sub(r"\s*", "", row.xpath("string(./td[3])").get())
                item.province = self.province
                yield item


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl shanghai_grade".split())


if __name__ == "__main__":
    run()
