import pandas as pd
import requests
import math
from typing import Dict
from tqdm import tqdm
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
import time

def get_tqdm(enable: bool = True):
    """
    返回适用于当前环境的 tqdm 对象。

    Args:
        enable (bool): 是否启用进度条。默认为 True。

    Returns:
        tqdm 对象。
    """
    if not enable:
        # 如果进度条被禁用，返回一个不显示进度条的 tqdm 对象
        return lambda iterable, *args, **kwargs: iterable

    try:
        # 尝试检查是否在 jupyter notebook 环境中，有利于退出进度条
        # noinspection PyUnresolvedReferences
        shell = get_ipython().__class__.__name__
        if shell == "ZMQInteractiveShell":
            from tqdm.notebook import tqdm
        else:
            from tqdm import tqdm
    except (NameError, ImportError):
        # 如果不在 Jupyter 环境中，就使用标准 tqdm
        from tqdm import tqdm

    return tqdm


def fetch_paginated_data(url: str, base_params: Dict, headers: Dict, timeout: int = 15) -> pd.DataFrame:
    """分页获取数据并合并结果（增强版）"""
    params = base_params.copy()
    all_data = []

    try:
        # 初始化会话（优化重试策略）
        session = requests.Session()
        retry_strategy = Retry(
            total=5,  # 增加重试次数
            backoff_factor=2,  # 指数退避（等待时间翻倍）
            status_forcelist=[429, 500, 502, 503, 504, 403],  # 新增403禁止访问状态码
            allowed_methods=["GET"]
        )
        session.mount("https://", HTTPAdapter(max_retries=retry_strategy))

        # 发送第一页请求（添加随机延迟）
        time.sleep(1)  # 1-3秒随机延迟
        response = session.get(
            url,
            params=params,
            headers=headers,
            timeout=timeout,
            verify=False  # 临时禁用SSL验证（调试用）
        )
        response.raise_for_status()
        data_json = response.json()
        print(data_json)
        # 解析分页信息（容错处理）
        per_page_num = len(data_json.get("data", {}).get("diff", []))
        total_records = data_json.get("data", {}).get("total", 0)
        total_pages = math.ceil(total_records / per_page_num) if per_page_num > 0 else 1

        # 存储第一页数据
        if per_page_num > 0:
            all_data.append(pd.DataFrame(data_json["data"]["diff"]))

        # 分页获取剩余数据（增强延迟策略）
        for page in tqdm(range(2, total_pages + 1), desc="Fetching pages", leave=False):
            params["pn"] = page
            time.sleep(2)  # 2-5秒随机延迟
            response = session.get(
                url,
                params=params,
                headers=headers,
                timeout=timeout,
                verify=False
            )
            response.raise_for_status()
            data_json = response.json()
            print(data_json)
            all_data.append(pd.DataFrame(data_json["data"]["diff"]))

        # 合并数据（先检查数据是否存在）
        if all_data:
            result_df = pd.concat(all_data, ignore_index=True)
            if "f3" in result_df.columns:
                result_df["f3"] = pd.to_numeric(result_df["f3"], errors="coerce")
                result_df.sort_values(by=["f3"], ascending=False, inplace=True, ignore_index=True)
            result_df.reset_index(drop=True, inplace=True)
            result_df.index = result_df.index + 1  # 从1开始编号
            return result_df
        return pd.DataFrame()

    except requests.exceptions.ProxyError:
        print("错误：请检查网络代理设置")
    except requests.exceptions.SSLError:
        print("错误：SSL证书验证失败，已禁用验证（仅调试用）")
    except requests.exceptions.ConnectionError as e:
        print(f"连接错误: {e}\n提示：可能是反爬机制拦截，建议更换网络或使用代理")
    except requests.exceptions.Timeout:
        print("请求超时：请检查网络稳定性或增加timeout参数")
    except (KeyError, ValueError) as e:
        print(f"数据解析异常: {e}\n返回数据: {data_json if 'data_json' in locals() else '无'}")
    except Exception as e:
        print(f"未知错误: {e}")
    finally:
        session.close()
    return pd.DataFrame()