#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/22 16:54
# @Author  : 王凯
# @File    : heilongjiang_grade.py
# @Project : spider-man
import datetime
import re

import scrapy

from apps.creadit_grade_a.creadit_grade_a.items import NetCreditGradeAItem
from utils.tools import urlencode, parse_url_params


class HeilongjiangGradeSpider(scrapy.Spider):
    name = "heilongjiang_grade"
    Request = scrapy.Request
    FormRequest = scrapy.FormRequest
    province = "黑龙江"
    url = "https://etax.heilongjiang.chinatax.gov.cn/nologin/xxcx/xydjAlnsrcx.jsp"

    HeiLongJiang_City = {
        "哈尔滨": "2301",
        "齐齐哈尔": "2302",
        "鸡西": "2303",
        "鹤岗": "2304",
        "双鸭山": "2305",
        "大庆": "2306",
        "伊春": "2307",
        "佳木斯": "2308",
        "七台河": "2309",
        "牡丹江": "2310",
        "黑河": "2311",
        "绥化": "2312",
        "大兴安岭": "2327",
    }

    def start_requests(self):
        url = "https://etax.heilongjiang.chinatax.gov.cn/nologin/xxcx/xydjAlnsrcx.jsp"
        yield self.Request(url)

    def parse(self, response, **kwargs):
        evalyear = response.xpath('//*[@id="pj_nd"]/option/@value').getall()
        for v in self.HeiLongJiang_City.values():
            for year_select in evalyear:
                if str(year_select) in [str(i) for i in range(datetime.datetime.now().year - 1, datetime.datetime.now().year + 1)]:
                    url = "https://etax.heilongjiang.chinatax.gov.cn/nologin/xxcx/Ajnsrmd_list.jsp"
                    params = {"nsrsbh": "", "nsrmc": "", "swjg_dm": v, "pj_nd": year_select}
                    data = {"cc": "1", "pc": "1000"}
                    yield self.FormRequest(url + "?" + urlencode(params), formdata=data, callback=self.parse_page, method="POST")

    def parse_page(self, response, **kwargs):
        # logger.info('开始处理parse_page')
        yield from self.parse_detail(response, **kwargs)
        max_page_list = re.findall(r"共\D*(\d*)\D*页", response.text, flags=re.S)
        if max_page_list:
            int_max_page = int(max_page_list[0])
            for page in range(2, int_max_page + 1):
                data = {"cc": str(page), "pc": "1000"}
                yield self.FormRequest(response.request.url, formdata=data, callback=self.parse_detail, method="POST")

    def parse_detail(self, response, **kwargs):
        # logger.info('开始处理parse_detail')
        tag_tr_list = response.xpath('//table[@class="layui-table"]/tr')
        for tag_tr in tag_tr_list:
            company_name = tag_tr.xpath("./td[3]/text()").get() or ""
            company_name = re.sub(r"\s", "", company_name)
            taxpayer_id = tag_tr.xpath("./td[2]/text()").get() or ""
            taxpayer_id = re.sub(r"\s", "", taxpayer_id)
            year = tag_tr.xpath("./td[4]/text()").get() or ""
            year = re.sub(r"\s", "", year)
            if all([taxpayer_id, year]):
                item = NetCreditGradeAItem()
                item.taxpayer_id = taxpayer_id
                item.company_name = company_name
                item.year = year
                item.province = self.province
                yield item


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl heilongjiang_grade".split())


if __name__ == "__main__":
    run()
