# -*- coding: utf-8 -*-
import math
import redis
import requests
from requests.exceptions import RequestException
import re

from scrapy.settings import BaseSettings

from apps.listed_company.listed_company.items import DishonestPersonSubjectToEnforcement
from datetime import datetime
from twisted.internet import reactor, threads
from scrapy.crawler import CrawlerProcess
import time
from urllib.parse import urlencode
import json
import scrapy


def convert_date_format(date_str):
    """
    将日期从 "YYYY年MM月DD日" 格式转换为 "YYYY-MM-DD" 格式。

    参数:
        date_str (str): 输入的日期字符串，格式为 "YYYY年MM月DD日"。

    返回:
        str: 转换后的日期字符串，格式为 "YYYY-MM-DD"。
    """
    try:
        # 解析输入的日期字符串
        date_obj = datetime.strptime(date_str, "%Y年%m月%d日")
        # 格式化为目标日期字符串
        formatted_date_str = date_obj.strftime("%Y-%m-%d")
        return formatted_date_str
    except ValueError as e:
        # 如果输入格式不正确，抛出异常并返回错误信息
        return f"日期格式错误: {e}"


def get_name_list():
    """
    从Redis db0的comp_name列表获取数据，每次取一条并删除
    返回生成器对象，持续产出直到列表为空
    """
    # 连接Redis（根据实际情况修改连接参数）
    r = redis.Redis(
        host='localhost',
        port=6379,
        db=0,
        # password='your_password',  # 如果有密码需要添加
        decode_responses=True  # 自动解码二进制数据为字符串
    )

    while True:
        # 从列表左侧弹出元素（原子操作）
        name = r.lpop('comp_name')
        if name is None:
            break
        yield name


def get_pagination_indices(total_count, page_size=500):
    """
    根据数据总条数和每页大小，生成分页起始索引列表。
    参数:
        total_count (int): 数据总条数。
        page_size (int): 每页显示的数据条数，默认为 500。
    返回:
        list: 包含每页起始索引的列表。
    """
    # 计算需要多少页（向上取整）
    num_pages = (total_count + page_size - 1) // page_size

    # 生成分页起始索引列表
    pagination_indices = [i * page_size for i in range(num_pages)]

    return pagination_indices


class Spider(scrapy.Spider):
    name = 'low_credit'
    headers = {
        "Accept": "*/*",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Connection": "keep-alive",
        "Referer": "https://www.baidu.com/s?wd=%E5%85%A8%E5%9B%BD%E5%A4%B1%E4%BF%A1%E8%A2%AB%E6%89%A7%E8%A1%8C%E4%BA%BA%E5%90%8D%E5%8D%95%20&rsv_spt=1&rsv_iqid=0xa139add9002129a5&issp=1&f=8&rsv_bp=1&rsv_idx=2&ie=utf-8&rqlang=cn&tn=baiduhome_pg&rsv_dl=tb&rsv_enter=0&oq=%25E5%25A4%25B1%25E4%25BF%25A1&rsv_btype=t&inputT=1297&rsv_t=3dd7RQr4AlMzBIPfHQndwVcTFc3KhzykogehLrOu0MeyTMCLFN6rUoOZVKsVOY%2FY4i8d&rsv_pq=ef9aad8d00297d42&rsv_n=2&rsv_sug3=15&rsv_sug1=13&rsv_sug7=100&rsv_sug2=0&rsv_sug4=1298",
        "Sec-Fetch-Dest": "script",
        "Sec-Fetch-Mode": "no-cors",
        "Sec-Fetch-Site": "same-site",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36",
        "sec-ch-ua": "\"Chromium\";v=\"134\", \"Not:A-Brand\";v=\"24\", \"Google Chrome\";v=\"134\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "\"Windows\""
    }
    cookies = {
        "BAIDUID": "60E6CA632E15091F497051EA9F3EBDE2:FG=1",
        "PSTM": "1737373935",
        "BIDUPSID": "8573A59342C381E8E46AE847EEE7ED71",
        "BDORZ": "B490B5EBF6F3CD402E515D22BCDA1598",
        "H_PS_PSSID": "61027_62232_62325_62338_62345_62373_62427_62475_62484_62499_62457_62455_62452_62451_62644_62674_62676_62702_62618",
        "H_WISE_SIDS": "61027_62232_62325_62338_62345_62373_62427_62475_62484_62499_62457_62455_62452_62451_62644_62674_62676_62702_62618",
        "BAIDUID_BFESS": "60E6CA632E15091F497051EA9F3EBDE2:FG=1",
        "ZFY": "You:BEagdqmMQAJ:A7XIPbeiqo8Lub77YlPrVYJn0vaLg:C",
        "BDRCVFR[feWj1Vr5u3D]": "mk3SLVN4HKm",
        "delPer": "0",
        "PSINO": "5",
        "BDRCVFR[C0p6oIjvx-c]": "I67x6TjHwwYf0",
        "arialoadData": "false",
        "BA_HECTOR": "8405a02k80840k8g008lak2h0m2c3f1juaeii22"
    }
    total_list = []
    err_list = []
    custom_settings = {
        "CONCURRENT_REQUESTS": 32
    }

    def start_requests(self):
        """
        修改后的请求生成逻辑
        持续从Redis获取数据直到列表为空
        """
        # 获取名称生成器
        name_generator = get_name_list()

        # 遍历生成器获取公司名称
        for name in name_generator:
            s = time.time_ns() // 1_000_000
            url = "https://sp1.baidu.com/8aQDcjqpAAV3otqbppnN2DJv/api.php"
            params = {
                "resource_id": "6899",
                "query": "失信被执行人名单",
                "cardNum": "",
                "iname": f"{name}",
                "areaName": "",
                "pn": "0",
                "rn": "10",
                "from_mid": "1",
                "ie": "utf-8",
                "oe": "utf-8",
                "format": "json",
                "t": f"{s}",
                "cb": "jQuery110203945014504715405_1743124949823",
                "_": f"{s - 2020}"
            }
            full_url = f"{url}?{urlencode(params)}"
            yield scrapy.Request(
                url=full_url,
                headers=self.headers,
                cookies=self.cookies,
                callback=self.parse_list,
                meta={'name': name}
            )

    def parse_list(self, response, **kwargs):
        s = time.time_ns() // 1_000_000
        totle_num = \
            json.loads(response.text.replace('/**/jQuery110203945014504715405_1743124949823(', '')[:-2])['data'][
                0][
                'dispNum']
        step_list = get_pagination_indices(totle_num)
        print("name_step_list", response.meta['name'], totle_num, step_list)
        for step in step_list:
            url = "https://sp1.baidu.com/8aQDcjqpAAV3otqbppnN2DJv/api.php"
            # 请求参数
            params = {
                "resource_id": "6899",
                "query": "失信被执行人名单",
                "cardNum": "",
                "iname": f"{response.meta['name']}",
                "areaName": "",
                "pn": f"{step}",
                "rn": "500",
                "from_mid": "1",
                "ie": "utf-8",
                "oe": "utf-8",
                "format": "json",
                "t": f"{s}",
                "cb": "jQuery110203945014504715405_1743124949823",
                "_": f"{s - 2910}"
            }
            # 将参数编码并拼接完整 URL
            full_url = f"{url}?{urlencode(params)}"
            # 发送请求
            yield scrapy.Request(
                url=full_url,
                headers=self.headers,
                cookies=self.cookies,
                callback=self.parse_detail
            )

    def parse_detail(self, response, **kwargs):
        # print(response.text.replace('/**/jQuery1102037825674660841924_1742902442407(', '')[:-2])
        item_list = \
            json.loads(response.text.replace('/**/jQuery110203945014504715405_1743124949823(', '')[:-2])['data'][0][
                'disp_data']
        for item in item_list:
            try:
                dishonest_person_subject_to_enforcement = DishonestPersonSubjectToEnforcement(**{
                    'name': item['iname'],
                    'cardnum': item['cardNum'],
                    'court_of_execution': item['courtName'],
                    'province': item['areaName'],
                    'reference': item['caseCode'],
                    'obligations_determined_by_effective_legal_documents': {"text": item['duty']},
                    'performance_of_the_executed_party': item['performance'],
                    'specific_situation': item['disruptTypeName'],
                    'release_time': convert_date_format(item['publishDate']),
                })
                # print(dishonest_person_subject_to_enforcement)
                yield dishonest_person_subject_to_enforcement
            except:
                raise Exception("异常异常")


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl low_credit".split())
