#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/25 14:40
# @Author  : 王凯
# @File    : sichuan_grade.py
# @Project : spider-man
import datetime
import re

import scrapy

from apps.creadit_grade_a.creadit_grade_a.items import NetCreditGradeAItem
from utils.tools import urlencode, parse_url_params


class SichuanGradeSpider(scrapy.Spider):
    name = "sichuan_grade"
    province = "四川"
    url = "https://sichuan.chinatax.gov.cn/col/col19682/index.html"
    Request = scrapy.Request

    def start_requests(self):
        url = "https://sichuan.chinatax.gov.cn/col/col19682/index.html"
        yield self.Request(url, callback=self.parse_nd)

    def parse_nd(self, response, **kwargs):
        field = response.xpath("//input[@name='field']/@value").get()
        nd_list = response.xpath('//select[@id="choice"]/option/@value').getall()
        url = "https://sichuan.chinatax.gov.cn/module/search/index.jsp"
        for nd in nd_list:
            if str(nd) in [str(i) for i in range(datetime.datetime.now().year - 1, datetime.datetime.now().year + 1)]:
                self.logger.info(nd)
                data = {"field_1130": f"{nd}", "i_columnid": "19682", "field": field}
                yield self.Request(url + "?" + urlencode(data), callback=self.parse_page)

    def parse_page(self, response, **kwargs):
        root_url, request_data = parse_url_params(response.request.url)
        yield from self.parse_detail(response, **kwargs)

        page_text = response.xpath('//div[@class="digg"]/span[2]/text()').get()
        pt = re.compile(r"\d+")
        if page_text:
            total_page = pt.findall(str(page_text))[0]
            for page in range(2, int(total_page) + 1):
                data = {
                    "field_1130": f"{request_data.get('field_1130')}",
                    "i_columnid": "19682",
                    "field": request_data.get("field"),
                    "currpage": page,
                }
                yield self.Request(root_url + "?" + urlencode(data), callback=self.parse_detail)

    def parse_detail(self, response, **kwargs):
        root_url, request_data = parse_url_params(response.request.url)
        datas = response.xpath('//table[@class="dzcxtable"]/tr')
        self.logger.info(f"""{"currpage", f"{int(request_data.get('field_1130'))}", request_data.get("currpage", "1")}""")

        for data in datas:
            tds = data.xpath("./td/text()").getall()
            item = NetCreditGradeAItem()
            if tds:
                item.taxpayer_id = re.sub(r"\s+", "", tds[0])
                item.company_name = re.sub(r"\s+", "", tds[1])
                item.year = re.sub(r"\s+", "", tds[2])
                item.province = self.province
                yield item


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl sichuan_grade".split())


if __name__ == "__main__":
    run()
