# -*- coding: utf-8 -*-
# @Time    : 2023/9/25 16:27
# @Author  : 王凯
# @File    : gansu_grade.py
# @Project : spider-man

import re

import scrapy

from apps.creadit_grade_a.creadit_grade_a.items import NetCreditGradeAItem
from utils.tools import urlencode


class GansuGradeSpider(scrapy.Spider):
    name = "gansu_grade"
    province = "甘肃"
    url = "http://gansu.chinatax.gov.cn/col/col42/index.html"
    Request = scrapy.Request
    FormRequest = scrapy.FormRequest

    def start_requests(self):
        url = "http://gansu.chinatax.gov.cn/col/col9558/index.html"
        yield self.Request(url, callback=self.parse_page)

    def parse_page(self, response, **kwargs):
        yield from self.parse_detail(response, **kwargs)
        pt = re.compile(r"totalRecord:(\d+),", re.S)
        data = {
            "col": "1",
            "webid": "1",
            "path": "http://gansu.chinatax.gov.cn/",
            "columnid": "9558",
            "sourceContentType": "1",
            "unitid": "43459",
            "webname": "国家税务总局甘肃省税务局",
            "permissiontype": "0",
        }
        url = "http://gansu.chinatax.gov.cn/module/web/jpage/dataproxy.jsp"
        total_page = int(pt.findall(response.text)[0]) // 120
        if total_page:
            for page in range(0, int(total_page)):
                params = {"startrecord": f"{1 + page * 120}", "endrecord": f"{(page + 1) * 120}", "perpage": "40"}
                yield self.FormRequest(
                    url + "?" + urlencode(params), formdata=data, callback=self.parse_detail,
                )

    def parse_detail(self, response, **kwargs):
        datas = response.text
        pt = re.compile("<tr>(.*?)</tr>", re.S)
        datas = pt.findall(datas)
        if datas:
            for data in datas:
                pt_ = re.compile("<td>(.*?)</td>", re.S)
                data = pt_.findall(data)
                item = NetCreditGradeAItem()
                item.taxpayer_id = re.sub(r"\s+", "", data[0])
                item.company_name = re.sub(r"\s+", "", data[1])
                item.year = re.sub(r"\s+", "", data[2])
                item.province = self.province
                yield item


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl gansu_grade".split())


if __name__ == "__main__":
    run()
