#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
爬取东方财富融资融券总量分页数据
接口示例：
https://datacenter-web.eastmoney.com/api/data/v1/get
?callback=xxx
&reportName=RPTA_RZRQ_LSHJ
&columns=ALL
&sortColumns=dim_date
&sortTypes=-1
&pageNumber={page}
&pageSize=50
"""

import csv
import json
import time
import re
import requests
import pandas as pd
from pathlib import Path
import logging
from typing import List, Dict, Any, Optional, Union
from concurrent.futures import ThreadPoolExecutor, as_completed
from functools import wraps
import os

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('../result/rzrq_fetch.log', encoding='utf-8'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

# 常量配置
BASE_URL = "https://datacenter-web.eastmoney.com/api/data/v1/get"
PARAMS = {
    "reportName": "RPTA_RZRQ_LSHJ",
    "columns": "ALL",
    "source": "WEB",
    "sortColumns": "dim_date",
    "sortTypes": "-1",
    "pageSize": 50,
    "filter": "",
}

HEADERS = {
    "Accept": "*/*",
    "Accept-Language": "zh-CN,zh;q=0.9",
    "Referer": "https://data.eastmoney.com/rzrq/total.html",
    "User-Agent": (
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
        "AppleWebKit/537.36 (KHTML, like Gecko) "
        "Chrome/119.0 Safari/537.36"
    ),
}

CSV_FILE = "../result/rzrq_total.csv"

COLUMN_RENAME = {
    "DIM_DATE": "交易日期",
    "NEW": "沪深300收盘",
    "ZDF": "沪深300涨幅",
    "ZDF3D": "沪深300涨幅3d",
    "ZDF5D": "沪深300涨幅5d",
    "ZDF10D": "沪深300涨幅10d",
    "RZYE": "融资余额",
    "RZYEZB": "融资余额余额占流通市值比",
    "RZMRE": "融资买入额",
    "RZMRE3D": "融资买入额3d",
    "RZMRE5D": "融资买入额5d",
    "RZMRE10D": "融资买入额10d",
    "RZCHE": "融资偿还额",
    "RZCHE3D": "融资偿还额3d",
    "RZCHE5D": "融资偿还额5d",
    "RZCHE10D": "融资偿还额10d",
    "RQYE": "融券余额",
    "RQYL": "融券余量（股）",
    "RQCHL": "融券偿还量（股）",
    "RQCHL3D": "融券偿还量3d（股）",
    "RQCHL5D": "融券偿还量5d（股）",
    "RQCHL10D": "融券偿还量10d（股）",
    "RQMCL": "融券卖出量",
    "RQMCL3D": "融券卖出量3D",
    "RQMCL5D": "融券卖出量5D",
    "RQMCL10D": "融券卖出量10D",
    "RQJMG": "融券净卖出股",
    "RQJMG3D": "融券净卖出股3D",
    "RQJMG5D": "融券净卖出股5D",
    "RQJMG10D": "融券净卖出股10D",
    "RZRQYE": "融资融券余额",
    "RZRQYECZ": "融资融券差值",
}


def retry_on_failure(max_retries: int = 3, delay: float = 1.0):
    """重试装饰器"""

    def decorator(func):
        @wraps(func)
        def wrapper(*args, **kwargs):
            for attempt in range(max_retries):
                try:
                    return func(*args, **kwargs)
                except Exception as e:
                    if attempt == max_retries - 1:
                        logger.error(f"函数 {func.__name__} 最终失败: {e}")
                        raise
                    logger.warning(f"函数 {func.__name__} 第 {attempt + 1} 次失败: {e}, 重试中...")
                    time.sleep(delay * (attempt + 1))  # 指数退避
            return None

        return wrapper

    return decorator


class RzrqDataFetcher:
    """融资融券数据获取器"""

    def __init__(self, session: Optional[requests.Session] = None):
        self.session = session or requests.Session()
        self.session.headers.update(HEADERS)

    @retry_on_failure(max_retries=3, delay=2.0)
    def fetch_one_page(self, page: int) -> pd.DataFrame:
        """拉取单页数据"""
        params = {
            **PARAMS,
            "pageNumber": page,
            "pageNo": page,
            "callback": "",
        }

        logger.info(f"正在获取第 {page} 页数据...")
        resp = self.session.get(BASE_URL, params=params, timeout=15)
        resp.raise_for_status()

        data = resp.json()
        if not data.get("result") or not data["result"].get("data"):
            raise ValueError(f"第 {page} 页数据格式异常")

        data_list = data["result"]["data"]
        df = pd.DataFrame(data_list)
        df = df.rename(columns=COLUMN_RENAME)

        logger.info(f"第 {page} 页获取成功，共 {len(df)} 条记录")
        return df

    def get_total_pages(self) -> int:
        """获取总页数"""
        try:
            resp = self.session.get(BASE_URL, params={**PARAMS, "pageNumber": 1}, timeout=10)
            resp.raise_for_status()
            data = resp.json()
            total_pages = data["result"]["pages"]
            logger.info(f"总页数: {total_pages}")
            return total_pages
        except Exception as e:
            logger.error(f"获取总页数失败: {e}")
            raise

    def fetch_all_pages(self, max_workers: int = 5) -> pd.DataFrame:
        """并发获取所有页面数据"""
        total_pages = self.get_total_pages()
        logger.info(f"开始获取 {total_pages} 页数据，使用 {max_workers} 个线程")

        all_dataframes = []

        # 先获取第一页
        first_page_df = self.fetch_one_page(1)
        all_dataframes.append(first_page_df)

        # 并发获取剩余页面
        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            future_to_page = {
                executor.submit(self.fetch_one_page, page): page
                for page in range(2, total_pages + 1)
            }

            for future in as_completed(future_to_page):
                page = future_to_page[future]
                try:
                    df = future.result()
                    all_dataframes.append(df)
                    logger.info(f"第 {page} 页获取完成")
                except Exception as e:
                    logger.error(f"第 {page} 页获取失败: {e}")

        # 合并所有数据
        if all_dataframes:
            result_df = pd.concat(all_dataframes, ignore_index=True)
            logger.info(f"数据获取完成，总计 {len(result_df)} 条记录")
            return result_df
        else:
            raise ValueError("没有获取到任何数据")

    def save_data(self, df: pd.DataFrame, filename: str = CSV_FILE) -> None:
        """保存数据到CSV文件"""
        if df.empty:
            logger.warning("数据为空，跳过保存")
            return

        # 确保目录存在
        os.makedirs(os.path.dirname(filename), exist_ok=True)

        df.to_csv(filename, index=False, encoding='utf-8-sig')
        logger.info(f"已保存 {len(df)} 条记录到 {filename}")

    def load_existing_data(self, filename: str = CSV_FILE) -> Optional[pd.DataFrame]:
        """加载现有数据"""
        try:
            if os.path.exists(filename):
                df = pd.read_csv(filename)
                logger.info(f"加载现有数据 {len(df)} 条记录")
                return df
        except Exception as e:
            logger.warning(f"加载现有数据失败: {e}")
        return None

    def merge_and_update(self, new_df: pd.DataFrame, existing_df: Optional[pd.DataFrame] = None) -> pd.DataFrame:
        """合并和更新数据"""
        if existing_df is None:
            existing_df = self.load_existing_data()

        if existing_df is None or existing_df.empty:
            return new_df

        # 合并数据并去重
        combined_df = pd.concat([existing_df, new_df], ignore_index=True)
        combined_df = combined_df.sort_values('交易日期').drop_duplicates(subset=['交易日期'], keep='last')

        logger.info(f"数据合并完成，原有 {len(existing_df)} 条，新增 {len(new_df)} 条，合并后 {len(combined_df)} 条")
        return combined_df


def fetch_one_page(page: int) -> pd.DataFrame:
    """兼容性函数，保持原有接口"""
    fetcher = RzrqDataFetcher()
    return fetcher.fetch_one_page(page)


def fetch_all_pages(max_retry: int = 3) -> List[Dict[str, Any]]:
    """兼容性函数，保持原有接口"""
    fetcher = RzrqDataFetcher()
    df = fetcher.fetch_all_pages()
    return df.to_dict(orient="records")


def save_csv(rows: Union[List[Dict[str, Any]], pd.DataFrame], filename: str = CSV_FILE) -> None:
    """保存为CSV文件"""
    fetcher = RzrqDataFetcher()

    if isinstance(rows, pd.DataFrame):
        df = rows
    else:
        df = pd.DataFrame(rows)

    fetcher.save_data(df, filename)


def rename_column_name():
    """重命名列名"""
    SRC_FILE = Path("../result/rzrq_total.csv")
    DEST_FILE = Path("../result/rzrq_total_renamed.csv")

    if not SRC_FILE.exists():
        raise FileNotFoundError(f"{SRC_FILE} 不存在，请先运行抓取脚本生成原始文件")

    df = pd.read_csv(SRC_FILE)
    df.rename(columns=COLUMN_RENAME, inplace=True)
    df.to_csv(DEST_FILE, index=False, encoding="utf-8-sig")
    logger.info(f"重命名完成！已保存为 {DEST_FILE.resolve()}")


def main():
    """主函数"""
    try:
        fetcher = RzrqDataFetcher()

        # 获取所有数据
        new_df = fetcher.fetch_all_pages()

        # 合并现有数据
        merged_df = fetcher.merge_and_update(new_df)

        # 保存数据
        fetcher.save_data(merged_df)

        logger.info("数据获取和保存完成！")

    except Exception as e:
        logger.error(f"程序执行失败: {e}")
        raise


if __name__ == "__main__":
    main()
