#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/26 11:09
# @Author  : 王凯
# @File    : xiamen_grade.py
# @Project : spider-man
import datetime

import scrapy

from apps.creadit_grade_a.creadit_grade_a.items import NetCreditGradeAItem
from utils.tools import urlencode, parse_url_params


class XiaMenGradeSpider(scrapy.Spider):
    name = "xiamen_grade"
    province = "厦门"
    url = "https://etax.dalian.chinatax.gov.cn/nsr/wzdk/toAnsrxydjcx?0.532704183722605"
    Request = scrapy.Request
    FormRequest = scrapy.FormRequest

    def start_requests(self):
        url = "http://xiamen.chinatax.gov.cn/xmswcms/nszx/InitCredit.html"
        yield self.Request(url, callback=self.parse_nd)

    def parse_nd(self, response, **kwargs):
        url = "http://xiamen.chinatax.gov.cn/dwfw/findCredit.do"
        nd_list = response.xpath('//select[@id="evalyear"]//option/@value').getall()

        for nd in nd_list:
            if str(nd) in [str(i) for i in range(datetime.datetime.now().year - 1, datetime.datetime.now().year + 1)]:
                data = {"page": "0", "location": "13502000000", "code": "", "name": "", "evalyear": f"{nd}"}
                yield self.FormRequest(url, formdata=data, method="POST", callback=self.parse_page)

    def parse_page(self, response, **kwargs):
        yield from self.parse_detail(response, **kwargs)
        total_page = int(response.json().get("pageSum"))
        url, request_data = parse_url_params(response.request.body.decode())
        if total_page:
            for page in range(1, int(total_page)):
                data = {
                    "page": f"{page}",
                    "location": "13502000000",
                    "code": "",
                    "name": "",
                    "evalyear": request_data.get("evalyear"),
                }
                yield self.FormRequest(response.request.url, formdata=data, method="POST", callback=self.parse_detail)
        else:
            self.logger.info(f'{request_data.get("evalyear"), request_data.get("location")}')

    def parse_detail(self, response, **kwargs):
        url, request_data = parse_url_params(response.request.body.decode())
        datas = response.json().get("creditList")
        self.logger.info(f"""{"currpage", f"{int(request_data.get('evalyear'))}", request_data.get("page", "1")}""")
        if datas:
            for data in datas:
                item = NetCreditGradeAItem()
                item.taxpayer_id = data.get("nsrsbh")
                item.company_name = data.get("nsrmc")
                item.year = data.get("pd_nd")
                item.province = self.province
                yield item


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl xiamen_grade".split())


if __name__ == "__main__":
    run()
