#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/25 14:42
# @Author  : 王凯
# @File    : guizhou_grade.py
# @Project : spider-man
import datetime
import json
import re

import scrapy

from apps.creadit_grade_a.creadit_grade_a.items import NetCreditGradeAItem
from utils.tools import urlencode, parse_url_params


class GuizhouGradeSpider(scrapy.Spider):
    name = "guizhou_grade"
    province = "贵州"
    url = "https://sichuan.chinatax.gov.cn/col/col19682/index.html"
    Request = scrapy.Request
    FormRequest = scrapy.FormRequest

    def start_requests(self):
        url = "https://sichuan.chinatax.gov.cn/module/search/index.jsp"
        year_list = [i for i in range(datetime.datetime.now().year - 2, datetime.datetime.now().year)]
        for year in year_list:
            params = {
                "field": "vc_name:1:0,field_1126:1:1,field_1130:12:1,field_1131:12:0",
                "i_columnid": "19682",
                "vc_name": "",
                "field_1126": "",
                "field_1130": f"{year}",
                "field_1131": "",
                "currpage": "1"
            }
            yield self.Request(url + "?" + urlencode(params), callback=self.parse_page, cb_kwargs={"year": year})

    def parse_page(self, response, **kwargs):
        year = kwargs.get("year")
        url = "https://sichuan.chinatax.gov.cn/module/search/index.jsp"
        yield from self.parse_detail(response, **kwargs)
        total_page = response.xpath('.').re_first(r"共\s*(\d+)\s*页")
        for i in range(2, int(total_page) + 1):
            params = {
                "field": "vc_name:1:0,field_1126:1:1,field_1130:12:1,field_1131:12:0",
                "i_columnid": "19682",
                "vc_name": "",
                "field_1126": "",
                "field_1130": f"{year}",
                "field_1131": "",
                "currpage": f"{i}"
            }
            yield self.Request(url + "?" + urlencode(params), callback=self.parse_detail)

    def parse_detail(self, response, **kwargs):
        datas = response.xpath('//table[@class="dzcxtable"]//tr')
        for data in datas:
            item = NetCreditGradeAItem()
            item.taxpayer_id = data.xpath('.//td[1]/text()').get()
            item.company_name = data.xpath('.//td[2]/text()').get()
            item.year = data.xpath('.//td[3]/text()').get()
            item.province = self.province
            if item.company_name != '纳税人名称':
                yield item


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl guizhou_grade".split())


if __name__ == "__main__":
    run()
