#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/22 14:17
# @Author  : 王凯
# @File    : shanxi_grade.py
# @Project : spider-man
import datetime
import json

import scrapy

from apps.creadit_grade_a.creadit_grade_a.items import NetCreditGradeAItem
from utils.tools import urlencode, parse_url_params


class ShanxiGradeSpider(scrapy.Spider):
    name = "shanxi_grade"
    province = "山西"
    url = "http://shanxi.chinatax.gov.cn/nsfw/ajnsr/sx-11400"
    Request = scrapy.Request
    FormRequest = scrapy.FormRequest
    headers = {
        # "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        # "Accept-Language": "zh-CN,zh;q=0.9,zu;q=0.8,be;q=0.7,en;q=0.6",
        # "Cache-Control": "no-cache",
        # "Connection": "keep-alive",
        "Origin": "https://shanghai.chinatax.gov.cn",
        "Referer": "http://shanxi.chinatax.gov.cn/nsfw/ajnsr/sx-11400",
        # "Pragma": "no-cache",
        # "Sec-Fetch-Dest": "document",
        # "Sec-Fetch-Mode": "navigate",
        # "Sec-Fetch-Site": "same-origin",
        # "Sec-Fetch-User": "?1",
        # "Upgrade-Insecure-Requests": "1",
    }
    custom_settings = {
        "RETRY_HTTP_CODES": [403, 307],
        "RETRY_TIMES": 30,
    }

    def start_requests(self):
        url_nsrcx = "http://shanxi.chinatax.gov.cn/nsfw/ajnsr/sx-11400"
        yield self.Request(url_nsrcx, headers=self.headers)

    def parse(self, response, **kwargs):
        pjnd_value = response.xpath('//ul[@class="select_embellish_list"]/li/@data-value').getall()
        url = "http://shanxi.chinatax.gov.cn/nsfw/ajnsrSearch"
        for year in pjnd_value:
            if str(year) in [str(i) for i in range(datetime.datetime.now().year - 1, datetime.datetime.now().year + 1)]:
                params = {"cx_nsrsbh": "", "cx_nsrmc": "", "cx_pdnd": year, "cx_dq": "", "page": "1"}
                data = {"start": "0"}
                yield self.FormRequest(
                    url + "?" + urlencode(params), method="POST", body=json.dumps(data), callback=self.parse_page,
                    headers={**self.headers, **{"Content-Type": "application/json"}}
                )

    def parse_page(self, response, **kwargs):
        root_url, request_params = parse_url_params(response.request.url)
        yield from self.parse_detail(response, **kwargs)
        res = response.json()
        res_value = res.get("message")
        if res_value:
            max_page = res_value.get("totalPage")
            page_number = res_value.get("pageNumber")
            get_com_url = "http://shanxi.chinatax.gov.cn/nsfw/ajnsrSearch"
            if page_number < max_page:
                params = {
                    "cx_nsrsbh": "",
                    "cx_nsrmc": "",
                    "cx_pdnd": request_params.get("cx_pdnd"),
                    "cx_dq": "",
                    "page": page_number + 1,
                }
                data = {"start": str(page_number * 15)}
                yield self.FormRequest(
                    url=get_com_url + "?" + urlencode(params),
                    body=json.dumps(data),
                    method="POST",
                    callback=self.parse_page,
                    headers={**self.headers, **{"Content-Type": "application/json"}}
                )

    def parse_detail(self, response, **kwargs):
        res_value = response.json().get("message")
        if res_value:
            datas = res_value.get("list")
            for data in datas:
                item = NetCreditGradeAItem()
                item.taxpayer_id = data.get("NSRSBH")
                item.company_name = data.get("NSRMC")
                item.year = data.get("PD_ND")
                item.province = self.province
                yield item


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl shanxi_grade".split())


if __name__ == "__main__":
    run()
