import datetime
import json
from typing import Any

import scrapy
from scrapy.http import Response

from apps.creadit_grade_a.creadit_grade_a.clean import PgsqlDB
from apps.creadit_grade_a.creadit_grade_a.items import NetCreditGradeAItem
from components.component.scrapy_redis_task.spiders import RedisTaskSpider
from utils.tonghuashun_tools import TONGHUASHUN_CONFIG
from utils.tools import parse_url_params


class RosterBeiJingSpider(RedisTaskSpider):
    save_mysql_log = True
    uk_id = None
    to_db = None
    wfq_source_db = None
    custom_settings = {
        "REDIS_START_URLS_KEY": "scrapy:task:ths:%(name)s:start_urls",
        "LOG_LEVEL": "INFO",
    }

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.pgsql_db = PgsqlDB(
            ip=TONGHUASHUN_CONFIG["ip"],
            port=TONGHUASHUN_CONFIG["port"],
            db=TONGHUASHUN_CONFIG["db"],
            user_name=TONGHUASHUN_CONFIG["user_name"],
        )

    def add_task(self):
        record_task_table = self.redis_key + ':id'
        last_task_id = self.server.get(record_task_table)
        batch = 1000
        last_task_id = int(last_task_id) if last_task_id else 0
        if last_task_id:
            sql = (
                f"SELECT corp_name as query_key, used_name, unified_social_credit_code as taxpayer_id from "
                f"enterprise_basic_info where isvalid=1 "
                f" and is_latest=1 "
                f" and province = '北京市' "
                f" and unified_social_credit_code is not null "
                f"and seq > {last_task_id} and seq <  {last_task_id + batch}"
            )
        else:
            sql = (
                f"SELECT corp_name as query_key, used_name, unified_social_credit_code as taxpayer_id"
                " from enterprise_basic_info where isvalid=1 "
                f" and province = '北京市' "
                f" and unified_social_credit_code is not null "
                f" and is_latest=1 and seq <  {batch}"
            )
        datas = self.pgsql_db.find(sql, to_json=True)
        if datas:
            self.logger.info(
                "数据加载完成,{}, id从{}到{} 数量 {}".format(
                    self.redis_key, last_task_id, last_task_id + batch, len(datas)
                )
            )
            for data in datas:

                query_key = data.get("query_key")
                taxpayer_id = data.get("taxpayer_id")
                new_task_list = [{"query_key": query_key, "taxpayer_id": taxpayer_id}]

                used_name = data.get("used_name")
                if used_name:
                    for name in used_name.split(","):
                        new_task_list.append({"query_key": name, "taxpayer_id": taxpayer_id})

                for new_task in new_task_list:
                    if len(new_task.get("query_key")) > 3:
                        self.server.zadd(self.redis_key, {json.dumps(new_task, ensure_ascii=False): 0})

            self.server.set(record_task_table, last_task_id + batch)
        else:
            self.logger.info(
                f"数据加载完成,没有数据,记录最后一条任务id {self.redis_key} {last_task_id}"
            )
            self.server.set(record_task_table, last_task_id)
            if self.count_size(self.redis_key):
                return
            self.logger.info("数据加载完成,没有数据,准备下一轮")
            self.server.set(record_task_table, "0")
            self.add_task()

    def make_request_from_data(self, formatted_data: str or dict):
        url = "http://beijing.chinatax.gov.cn/bjsat/office/jsp/Ajqyquery"
        years = datetime.datetime.now().year
        company_name = formatted_data["query_key"]
        taxpayer_id = formatted_data["taxpayer_id"]
        for year in range(years - 5, years):
            if taxpayer_id:
                data = {"nsrsbh": taxpayer_id, "dwmc": "", "ssny": str(year), "qjmc": "", "nowPageNum": "1", "pageSise": "15"}
            else:
                data = {"nsrsbh": "", "dwmc": company_name, "ssny": str(year), "qjmc": "", "nowPageNum": "1", "pageSise": "15"}
            yield scrapy.FormRequest(url, formdata=data, verify=False, method="POST", callback=self.parse)


class BeiJingGradeSpider(scrapy.Spider):
    name = "beijing_grade"
    province = "北京"
    url = "http://beijing.chinatax.gov.cn/bjsat/office/jsp/ajqy/query.jsp"
    Request = scrapy.Request
    FormRequest = scrapy.FormRequest
    custom_settings = {
        # "CONCURRENT_REQUESTS": 1,
        "RETRY_TIMES": 100,
    }

    def start_requests(self):
        url = "http://beijing.chinatax.gov.cn/bjsat/office/jsp/Ajqyquery"
        for year in range(datetime.datetime.now().year - 2, datetime.datetime.now().year):
            data = {"nsrsbh": "", "dwmc": '', "ssny": str(year), "qjmc": "", "nowPageNum": "1", "pageSise": "15"}
            yield scrapy.FormRequest(url, formdata=data, method="POST", callback=self.parse, cb_kwargs={"nowPageNum": 1})

    def parse(self, response: Response, **kwargs: Any) -> Any:
        yield from self.parse_detail(response, **kwargs)
        last = response.json()["last"]
        now_page_num = kwargs.get("nowPageNum")
        url = "http://beijing.chinatax.gov.cn/bjsat/office/jsp/Ajqyquery"
        root_url, request_data = parse_url_params(response.request.body.decode())
        if not last:
            data = {**request_data, **{"nowPageNum": f"{now_page_num + 1}"}}
            yield self.FormRequest(url, formdata=data, method="POST", callback=self.parse, cb_kwargs={"nowPageNum": now_page_num + 1})

    def parse_detail(self, response, **kwargs):
        # print(response.request.body, response.json())
        datas = response.json().get("resultList") or []
        for data in datas:
            item = NetCreditGradeAItem()
            item.taxpayer_id = data["nsrsbhInfo"]
            item.year = data["ssny"]
            item.company_name = data["dwmc"]
            item.province = self.province
            yield item


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl beijing_grade".split())


if __name__ == "__main__":
    run()
