#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/25 16:42
# @Author  : 王凯
# @File    : qinghai_grade.py
# @Project : spider-man
import datetime
import re

import pandas as pd
import scrapy

from apps.creadit_grade_a.creadit_grade_a.items import NetCreditGradeAItem
from utils.tools import urlencode, parse_url_params


class QinghaiGradeSpider(scrapy.Spider):
    name = "qinghai_grade"
    province = "青海"
    url = "http://qinghai.chinatax.gov.cn/web/ajnsr/nszx/InitCredit.html"
    Request = scrapy.Request

    def start_requests(self):
        url = "http://qinghai.chinatax.gov.cn/web/ajnsr/nszx/InitCredit.html"
        yield self.Request(url, callback=self.parse_nd)

    def parse_nd(self, response, **kwargs):
        yield from self.parse_deal_xls()
        url = "http://qinghai.chinatax.gov.cn/service/api/findCredit.do"
        nd_list = response.xpath('//select[@id="evalyear"]//option/@value').getall()
        city_list = response.xpath('//*[@id="location_table"]//a/text()').getall()
        for city in city_list:
            for nd in nd_list:
                if str(nd) in [str(i) for i in range(datetime.datetime.now().year - 1, datetime.datetime.now().year + 1)]:
                    data = {"page": "1", "pagesize": "50", "code": "", "name": "", "evalyear": f"{nd}", "location": city}
                    yield self.Request(url + "?" + urlencode(data), callback=self.parse_page)

    def parse_page(self, response, **kwargs):
        root_url, request_data = parse_url_params(response.request.url)
        yield from self.parse_detail(response, **kwargs)
        total_page = int(response.json().get("count")) // 50
        if total_page:
            self.logger.info(f"""{request_data.get("evalyear"), request_data.get("location"), total_page}""")
            for page in range(2, int(total_page) + 1):
                data = {
                    "code": "",
                    "name": "",
                    "page": f"{page}",
                    "pagesize": "50",
                    "evalyear": request_data.get("evalyear"),
                    "location": request_data.get("location"),
                }
                yield self.Request(root_url + "?" + urlencode(data), callback=self.parse_detail)
        else:
            self.logger.info(f"""{request_data.get("evalyear"), request_data.get("location"), "None"}""")

    def parse_detail(self, response, **kwargs):
        datas = response.json().get("data")
        # print(len(datas))
        # pt = re.compile('<tr>(.*?)</tr>', re.S)
        # datas = pt.findall(datas)
        if datas:
            # print('currpage', f"{int(request.params.get('pjnd'))}", request.params.get('pageNo', '1'))
            for data in datas:
                item = NetCreditGradeAItem()
                item.taxpayer_id = re.sub(r"\s+", "", data.get("code"))
                item.company_name = re.sub(r"\s+", "", data.get("name"))
                item.year = re.sub(r"\s+", "", data.get("evalyear"))
                item.province = self.province
                yield item

    def parse_deal_xls(self):
        url = "http://qinghai.chinatax.gov.cn/web/nsxy/202304/40342594269746819c29997d32cd45a2/files/2022%E5%B9%B4%E5%BA%A6%E5%9B%BD%E5%AE%B6%E7%A8%8E%E5%8A%A1%E6%80%BB%E5%B1%80%E9%9D%92%E6%B5%B7%E7%9C%81%E7%A8%8E%E5%8A%A1%E5%B1%80A%E7%BA%A7%E7%BA%B3%E7%A8%8E%E4%BA%BA%E5%90%8D%E5%8D%95.xlsx"
        headers = {
            "Accept": "image/avif,image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8",
            "Accept-Language": "zh-CN,zh;q=0.9,zu;q=0.8,be;q=0.7,en;q=0.6",
            "Cache-Control": "no-cache",
            "Connection": "keep-alive",
            "Pragma": "no-cache",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36",
        }
        import requests

        response = requests.get(url, headers=headers, verify=False)
        df_all = pd.read_excel(response.content, sheet_name=None)

        for k, df in df_all.items():
            rename = {
                "纳税人识别号（统一社会信用码）": "taxpayer_id",
                "纳税人识别号（统一社会信用代码）": "taxpayer_id",
                "纳税人名称": "company_name",
                "评价年度": "year",
            }
            df = df.rename(columns=rename)
            if not df.empty:
                df = df[["taxpayer_id", "company_name", "year"]]
                datas = df.to_dict("records")
                for data in datas:
                    item = NetCreditGradeAItem()
                    item.taxpayer_id = re.sub(r"\s+", "", data.get("taxpayer_id"))
                    item.company_name = re.sub(r"\s+", "", data.get("company_name"))
                    item.year = data.get("year")
                    item.province = self.province
                    yield item


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl qinghai_grade".split())


if __name__ == "__main__":
    run()
