#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/22 13:48
# @Author  : 王凯
# @File    : tianjing_grade.py
# @Project : spider-man
import datetime
import re

import scrapy

from apps.creadit_grade_a.creadit_grade_a.items import NetCreditGradeAItem


class TianjinGradeSpider(scrapy.Spider):
    name = "tianjing_grade"
    province = "天津"
    url = "https://tianjin.chinatax.gov.cn/wzcx/nsrxydjACx_init.action"
    Request = scrapy.Request
    FormRequest = scrapy.FormRequest

    def start_requests(self):
        url = "https://tianjin.chinatax.gov.cn/wzcx/nsrxydjACx_init.action"
        yield self.Request(url, callback=self.parse_area)

    def parse_area(self, response, **kwargs):
        area_list = response.xpath('//a[@class="LJ"]/@href')
        total_url = "https://tianjin.chinatax.gov.cn/wzcx/nsrxydjACx_xxcx.action"
        for url in area_list:
            fjdm = url.re_first("fjdm=(.*)")
            for year in [i for i in range(datetime.datetime.now().year - 1, datetime.datetime.now().year + 1)]:
                data = {"pageCount": "1", "pageNum": "1", "nd": str(year), "nsrsbh": "", "nsrmc": "", "fjdm": fjdm}
                yield self.FormRequest(total_url, formdata=data, callback=self.parse_area_page)

    def parse_area_page(self, response, **kwargs):
        yield from self.parse_detail(response, **kwargs)
        text = response.xpath('//script[@language="javascript"]')[-1]
        pageCount = text.re_first(r"var pageCount = '(\d+)'")
        nd = text.re_first(r"var nd = '(\d+)'")
        fjdm = text.re_first(r"var fjdm = '(.*?)'")
        url = "https://tianjin.chinatax.gov.cn/wzcx/nsrxydjACx_xxcx.action"
        for page in range(2, int(pageCount) + 1):
            data = {
                "pageCount": str(pageCount),
                "pageNum": f"{page}",
                "nd": str(nd),
                "nsrsbh": "",
                "nsrmc": "",
                "fjdm": str(fjdm),
            }
            yield self.FormRequest(url, formdata=data, callback=self.parse_detail)

    def parse_detail(self, response, **kwargs):
        datas = response.xpath('//table[@cellspacing="1"]//tr[not(@class="t05c")]')
        for data in datas:
            item = NetCreditGradeAItem()
            item.taxpayer_id = re.sub(r"\s+", "", data.xpath("string(./td[2])").get())
            item.company_name = re.sub(r"\s+", "", data.xpath("string(./td[3])").get())
            item.year = re.sub(r"\s+", "", data.xpath("string(./td[4])").get())
            item.province = self.province
            yield item


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl tianjing_grade".split())


if __name__ == "__main__":
    run()
