#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/25 14:19
# @Author  : 王凯
# @File    : hainan_grade.py
import datetime
import re

import scrapy

from apps.creadit_grade_a.creadit_grade_a.items import NetCreditGradeAItem
from utils.tools import parse_url_params


class HainanGradeSpider(scrapy.Spider):
    name = "hainan_grade"
    province = "海南"
    url = "https://etax.guangxi.chinatax.gov.cn:9723/web/dzswj/taxclient/ggfw/nsxyAjnsrmdggl.html"
    Request = scrapy.Request
    FormRequest = scrapy.FormRequest

    def start_requests(self):
        url = "https://hainan.chinatax.gov.cn/bsfw_5_1/"
        yield self.Request(url)

    def parse(self, response, **kwargs):
        url = "https://hainan.chinatax.gov.cn/bsfw_5_1.json"
        year_list = [i for i in range(datetime.datetime.now().year - 2, datetime.datetime.now().year)]
        area_list = [i.xpath("string(.)").get() for i in response.xpath("//li/a[contains(@onclick, 'bsfw_5_1')]")]
        for area in area_list:
            for year in year_list:
                data = {"name": "", "id": "", "pageNo": "1", "year": f"{year}", "areaName": area}
                yield self.FormRequest(url, formdata=data, callback=self.parse_page)

    def parse_page(self, response, **kwargs):
        root_url, request_data = parse_url_params(response.request.body.decode())
        url = "https://hainan.chinatax.gov.cn/bsfw_5_1.json"
        yield from self.parse_detail(response, **kwargs)
        total_page = response.xpath("string(//span)").re_first(r"条\s*(.*?)页\s*") or "0"
        for page in [i for i in range(2, int(total_page) + 1)]:
            data = {
                "name": "",
                "id": "",
                "pageNo": f"{page}",
                "year": request_data.get("year"),
                "areaName": request_data.get("areaName"),
            }
            yield self.FormRequest(url, formdata=data, callback=self.parse_detail)

    def parse_detail(self, response, **kwargs):
        datas = response.xpath("//tr")
        for data in datas:
            item = NetCreditGradeAItem()
            item.taxpayer_id = re.sub(r"\s+", "", data.xpath("string(./td[1])").get())
            item.company_name = re.sub(r"\s+", "", data.xpath("string(./td[2])").get())
            item.year = re.sub(r"\s+", "", data.xpath("string(./td[3])").get())
            item.province = self.province
            yield item


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl hainan_grade".split())


if __name__ == "__main__":
    run()
