# https://pypi.org/project/KunQuant/
import logging
import os
from typing import Any, Dict, List, Optional, Tuple
import numpy as np
import pandas as pd
from scipy.stats import spearmanr, pearsonr, zscore
import matplotlib.pyplot as plt
import lightgbm as lgb
import joblib
from sklearn.model_selection import TimeSeriesSplit

from KunQuant.jit import cfake
from KunQuant.Driver import KunCompilerConfig
from KunQuant.Op import Builder, Input, Output
from KunQuant.Stage import Function
from KunQuant.predefined import Alpha101
from KunQuant.runner import KunRunner as kr
from KunQuant.predefined.Alpha101 import AllData, all_alpha

def dataframe_kundata(dfs: List[pd.DataFrame], cols: List[str]) -> tuple[int,dict]:
  # cols = dfs[0].columns.values
  col2idx = dict(zip(cols, range(len(cols))))
  # print("columns to index", col2idx)
  num_time = len(dfs[0])
  # print("dimension in time", num_time)

  collected = np.empty((len(col2idx), len(dfs), len(dfs[0])), dtype="float32")
  for stockidx, data in enumerate(dfs):
      for colname, colidx in col2idx.items():
          mat = data[colname].to_numpy()
          collected[colidx, stockidx, :] = mat
  # Transpose the matrix to [features, time, stocks]

  # [features, stocks, time] => [features, time, stocks]
  transposed = collected.transpose((0, 2, 1))
  transposed = np.ascontiguousarray(transposed)
  # print(transposed)

  # Now fill the input data in a dict of {"open": matrix_open, "close": ...}

  input_dict = dict()
  col2idx={'open': 0, 'high': 1, 'low': 2, 'close': 3, 'volume': 4, 'amount': 5}
  for colname, colidx in col2idx.items():
      input_dict[colname] = transposed[colidx]
  return num_time, input_dict


def cal_alpha101(num_time:int, input_dict:dict) -> dict:
  # using 4 threads
  executor = kr.createMultiThreadExecutor(4)
  builder = Builder()
  with builder:
      vclose = Input("close")
      low = Input("low")
      high = Input("high")
      vopen = Input("open")
      amount = Input("amount")
      vol = Input("volume")
      all_data = Alpha101.AllData(low=low,high=high,close=vclose,open=vopen, amount=amount, volume=vol)
      # Output(Alpha101.alpha001(all_data), "alpha001")
      for f in all_alpha:
          out = f(all_data)
          Output(out, f.__name__)

  f = Function(builder.ops)
  lib = cfake.compileit([("alpha101", f, KunCompilerConfig(input_layout="TS", output_layout="TS"))], "out_first_lib", cfake.CppCompilerConfig())
  modu = lib.getModule("alpha101")
  out = kr.runGraph(executor, modu, input_dict, 0, num_time)
  
  # for sq in out:
  #   df = pd.DataFrame(out[sq])
  #   print(sq)    
  #   print(df)

  return num_time,out

def print_out(out:dict):
  for sq in out:
    df = pd.DataFrame(out[sq])
    print(sq)    
    print(df)

#用nan填充停牌的数据
def fill_missing_days(df, date_col='date', code_col='fullCode'):
    all_dates = sorted(df[date_col].unique())
    all_codes = sorted(df[code_col].unique())
    
    # 创建完整索引
    index = pd.MultiIndex.from_product([all_dates, all_codes], names=[date_col, code_col])
    
    # 设置索引并补全# 按 fullCode 分组，对每个分组进行前向填充
# df_filled = df_complete.groupby('fullCode').ffill()
# # 注意：第一天如果有 NaN，ffill 无法填充 → 可选择 drop 或 fill with 0/bfill
    df_out = df.set_index([date_col, code_col]).reindex(index)
                # .groupby(code_col).ffill().reset_index())
    
    return df_out

#将所有股票都在一个大dataframe转换为多个单独的打他frame，并用nan填充停牌的数据,value_col=['open', 'high', 'low', 'close', 'volume', 'amount']
def convert_big_dataframe_dataframes(df, date_col='date', code_col='fullCode'):
    all_dates = sorted(df[date_col].unique())
    all_codes = sorted(df[code_col].unique())
    
    # 创建完整索引
    index = pd.MultiIndex.from_product([all_dates, all_codes], names=[date_col, code_col])
    
    # 设置索引并补全# 按 fullCode 分组，对每个分组进行前向填充
# df_filled = df_complete.groupby('fullCode').ffill()
# # 注意：第一天如果有 NaN，ffill 无法填充 → 可选择 drop 或 fill with 0/bfill
    df_out = df.set_index([date_col, code_col]).reindex(index)
                # .groupby(code_col).ffill().reset_index())
    
    dfs= []
    # 遍历每个 fullCode
    for fullcode, group in df_out.groupby(code_col):
        # group 的索引是 (date, fullCode)，date 在索引中
        # 按 date 排序（虽然通常已有序，但保险起见）
        group_sorted = group.sort_index(level=date_col)

        group_reset = group_sorted.reset_index()
        
        # print(f"\n{fullcode} {len(group_reset)} 天数据，收益率:")
        # print(group_reset[['open', 'high', 'low', 'close', 'volume', 'amount']].head())
        dfs.append(group_reset)
    return all_dates,all_codes,dfs


def preprocess_factor(factor_matrix, method='zscore', min_std=1e-8):
    """
    稳健的因子预处理（防 NaN、防标准差为 0）
    
    Parameters:
    - factor_matrix: (N, D) 股票 × 时间
    - method: 'zscore' or 'rank'
    - min_std: 防止除以 0 的最小标准差阈值
    """
    df = pd.DataFrame(factor_matrix.T).copy()
    
    # 1. 处理 NaN 和 inf
    df.replace([np.inf, -np.inf], np.nan, inplace=True)
    df.fillna(0, inplace=True)  # 或用前后值填充：method='ffill'

    N, D = df.shape
    
    # 2. 每天横截面去极值（winsorize）
    for t in range(D):
        if df.iloc[:, t].nunique() == 1:  # 全相同
            df.iloc[:, t] = 0
            continue
            
        low = df.iloc[:, t].quantile(0.02)
        high = df.iloc[:, t].quantile(0.98)
        df.iloc[:, t] = df.iloc[:, t].clip(lower=low, upper=high)
        
        # 再次处理可能引入的 NaN
        if df.iloc[:, t].isnull().all():
            df.iloc[:, t] = 0

    # 3. 标准化（按每天横截面）
    out = np.zeros_like(df.values, dtype=np.float32)
    
    for t in range(D):
        x = df.iloc[:, t].values
        
        if method == 'zscore':
            std = x.std()
            if std < min_std:  # 标准差太小，视为常数
                x = np.zeros_like(x)
            else:
                x = (x - x.mean()) / std
                
        elif method == 'rank':
            x = pd.Series(x).rank(method='average').fillna(0).values
            x = (x - x.mean()) / (x.std() + min_std)
            
        out[:, t] = x
        
    return out

def preprocess_factor2(factor_matrix, method='zscore', min_std=1e-8):
    """
    高性能、向量化的因子预处理（防 NaN、inf、标准差为0）

    Parameters:
    - factor_matrix: (N, D) 股票 × 时间
    - method: 'zscore' or 'rank'
    - min_std: 防止除以0的最小标准差阈值

    Returns:
    - processed: (N, D) np.float32 数组
    """
    # 转置并转为 DataFrame，时间作为列（N 股票 × D 时间）
    df = pd.DataFrame(factor_matrix.T)  # 自动继承索引，无需 copy()

    # 1. 替换 inf 为 NaN，并填充为 0
    df.replace([np.inf, -np.inf], np.nan, inplace=True)
    df.fillna(0, inplace=True)

    # 2. 横截面去极值：对每一天（每列）进行 winsorize（向量化）
    low = df.quantile(0.02, axis=0)   # 每天的 2% 分位数 (D,)
    high = df.quantile(0.98, axis=0)  # 每天的 98% 分位数 (D,)
    
    # 使用 clip 进行向量化裁剪
    df_clipped = df.clip(lower=low, upper=high, axis=1)

    # 再次处理全为 NaN 的列（虽然前面 fillna 了，但以防万一）
    df_clipped = df_clipped.where(~(df_clipped.isna().all()), 0)

    # 转为 numpy (N, D)
    X = df_clipped.values  # (N, D)

    # 3. 横截面标准化：对每一天（每列）进行 z-score 或 rank
    if method == 'zscore':
        means = X.mean(axis=0, keepdims=True)           # (1, D)
        stds = X.std(axis=0, keepdims=True)             # (1, D)
        
        # 防止标准差过小
        stds = np.where(stds < min_std, 1.0, stds)      # 避免除0；原为常数则归0
        result = (X - means) / stds
        # 将原本标准差太小的列设为 0
        result = np.where(stds < min_std, 0.0, result)

    elif method == 'rank':
        # 向量化横截面排序：沿 axis=0 排名
        ranks = pd.DataFrame(X).rank(method='average', axis=0).values  # (N, D)
        means = ranks.mean(axis=0, keepdims=True)
        stds = ranks.std(axis=0, keepdims=True)
        result = (ranks - means) / (stds + min_std)
    else:
        raise ValueError(f"Unknown method: {method}")

    # 强制转换为 float32 并返回
    return np.float32(result)


# 每天选 top 10% 股票
def topN_portfolio(alpha, N_select=100):
    T = alpha.shape[1]
    portfolio = np.zeros_like(alpha)
    for t in range(T):
        if np.sum(~np.isnan(alpha[:, t])) < N_select:
            continue
        ranks = pd.Series(alpha[:, t]).rank(ascending=False, method='first')
        portfolio[:, t] = (ranks <= N_select).astype(float)
    return portfolio / N_select  # 等权持仓

def backtest(weights, dclose, trading_days=None):
    """
    weights: (N, D) 每日持仓权重
    dclose: (N, D) 收盘价
    """
    # 计算每日收益（考虑 T+1）
    ret = np.diff(np.log(dclose), axis=1)  # (N, D-1)
    
    # 加权收益（注意：t 日权重用于赚 t+1 日收益）
    daily_ret = np.sum(weights[:, :] * ret, axis=0)  # (D-1,)  daily_ret = np.sum(weights[:, :] * ret, axis=0)
    
    # 净值曲线
    equity = np.cumprod(1 + daily_ret)
    equity = np.insert(equity, 0, 1.0)
    
    return equity, daily_ret


def backtest_with_orders(
    weights: np.ndarray,
    dclose: np.ndarray,
    trading_dates=None,
    stock_codes: Optional[List[str]] = None,
    initial_cash: float = 1e6,
    log_file: str = "backtest.log",
    fee_rate: float = 0.0005,   # 手续费 0.05%
    slippage: float = 0.001     # 滑点 0.1%
) -> Tuple[np.ndarray, np.ndarray, List[Dict[str, Any]]]:
    """
    回测函数：支持调仓指令、日志、手续费、滑点，已修复资产翻倍问题

    Parameters:
    - weights: (N, D) 目标权重矩阵
    - dclose: (N, D) 收盘价矩阵
    - trading_dates: 长度为 D 的日期列表
    - stock_codes: 长度为 N 的股票代码列表
    - initial_cash: 初始资金
    - log_file: 日志文件路径
    - fee_rate: 交易手续费率
    - slippage: 滑点比例

    Returns:
    - equity: 每日总资产序列
    - daily_ret: 日收益率序列
    - all_orders: 所有交易订单记录
    """
    N, D = weights.shape
    assert dclose.shape == (N, D), "dclose 形状必须为 (N, D)"
    assert initial_cash > 0, "初始资金必须大于 0"

    # 设置时间索引
    if trading_dates is None:
        trading_dates = [f"Day_{t}" for t in range(D)]
    else:
        trading_dates = np.array(trading_dates)
    assert len(trading_dates) == D, "trading_dates 长度必须等于 D"

    # 设置股票代码
    if stock_codes is None:
        stock_codes = [f"STK{i:03d}" for i in range(N)]
    else:
        assert len(stock_codes) == N, "stock_codes 长度必须等于 N"

    # 初始化日志
    logger = logging.getLogger('Backtest')
    logger.setLevel(logging.INFO)
    if logger.handlers:
        logger.handlers.clear()

    fh = logging.FileHandler(log_file, mode='w', encoding='utf-8')
    ch = logging.StreamHandler()
    formatter = logging.Formatter('%(message)s')
    fh.setFormatter(formatter)
    ch.setFormatter(formatter)
    logger.addHandler(fh)
    logger.addHandler(ch)

    def log(msg):
        logger.info(msg)

    # === 回测变量初始化 ===
    shares = np.zeros((N, D), dtype=int)      # 每日持仓股数
    cash = initial_cash                       # 当前可用现金
    equity = np.full(D, np.nan, dtype=float)  # 每日总资产
    all_orders = []                           # 存储所有订单

    log(f"回测开始 | 初始资金: {initial_cash:,.2f} | 交易日数: {D} | 股票数: {N}")
    log(f"手续费率: {fee_rate:.2%} | 滑点: {slippage:.2%}")
    log("=" * 90)

    # === 主循环 ===
    prev_selected_set = set()  # 初始化昨日选中股票集合

    for t in range(D):
      current_date = trading_dates[t]
      price = dclose[:, t].copy()
      invalid_price = (price <= 0) | np.isnan(price)
      if np.any(invalid_price):
        price[invalid_price] = 1e-8

      # --- 获取目标权重并确定今日选中股票 ---
      target_weight = weights[:, t].copy()
      weight_sum = np.sum(target_weight)

      if weight_sum <= 0 or np.isnan(weight_sum) or np.isinf(weight_sum):
        current_selected_set = set()
        target_shares = np.zeros(N, dtype=int)
        # 继承昨日持仓（但不交易）
        if t == 0:
          shares[:, t] = np.zeros(N, dtype=int)
        else:
          shares[:, t] = shares[:, t-1].copy()
        # 现金不变
        cash = cash if t > 0 else initial_cash
      else:
        # 归一化权重
        target_weight = target_weight / weight_sum
        # 找出今日选中的股票（权重 > 0 的）
        current_selected = np.where(target_weight > 1e-10)[0]
        current_selected_set = set(current_selected.tolist())

        # 判断是否需要调仓
        need_rebalance = (t == 0) or (current_selected_set != prev_selected_set)

        if not need_rebalance:
          # ✅ 品种没变，不调仓：直接继承昨日状态
          shares[:, t] = shares[:, t-1].copy()
          # 现金不变（沿用昨日 cash）
        else:
          # --- 需要调仓：执行原逻辑 ---
          prev_shares = shares[:, t-1] if t > 0 else np.zeros(N, dtype=int)
          total_value = cash + np.sum(prev_shares * price)
          target_value = total_value * target_weight
          target_shares = np.array([
            int(tv / p) if tv > 0 and p > 0 else 0 
            for tv, p in zip(target_value, price)
          ], dtype=int)

          # 初始化今日持仓为昨日持仓
          shares[:, t] = prev_shares.copy()
          trade_shares = target_shares - prev_shares

          # 先卖出
          for i in range(N):
            if trade_shares[i] < 0:
              sell_shares = int(abs(trade_shares[i]))
              if sell_shares > 0 and prev_shares[i] >= sell_shares:
                sell_price = price[i] * (1 - slippage)
                revenue = sell_shares * sell_price
                fee = revenue * fee_rate
                net_revenue = revenue - fee
                cash += net_revenue
                shares[i, t] -= sell_shares
                # 记录日志和订单（略，保留原逻辑）

          # 再买入
          for i in range(N):
            if trade_shares[i] > 0:
              buy_shares = int(trade_shares[i])
              buy_price = price[i] * (1 + slippage)
              cost = buy_shares * buy_price
              fee = cost * fee_rate
              total_cost = cost + fee
              if cash >= total_cost and buy_shares > 0:
                cash -= total_cost
                shares[i, t] += buy_shares
                # 记录日志和订单（略）

        # 更新 prev_selected_set
        prev_selected_set = current_selected_set

      # --- 计算总资产（无论是否调仓）---
      current_portfolio_value = np.sum(shares[:, t] * price)
      equity[t] = cash + current_portfolio_value

      # ✅ 断言检查：总资产不应异常增长
      if t == 0:
          assert abs(equity[t] - initial_cash) < initial_cash * 0.1, \
              f"第一天总资产异常: {equity[t]:,.2f}，预期接近 {initial_cash:,.2f}"

      log(f"[{current_date}] 总资产: {equity[t]:,.2f}, 现金: {cash:,.2f}, "
          f"持仓市值: {current_portfolio_value:,.2f}")
      log("-" * 90)

    # === 回测结束 ===
    final_equity = equity[-1]
    total_return = (final_equity / initial_cash) - 1
    annual_return = ((1 + total_return) ** (252 / D)) - 1 if D > 0 else 0

    log(f"回测完成！")
    log(f"最终总资产: {final_equity:,.2f}")
    log(f"总收益率: {total_return:.2%}")
    log(f"年化收益率 (252天): {annual_return:.2%}")
    log(f"日均交易额: {np.mean([o['value'] for o in all_orders if o['value']>0]):,.2f}")

    # 计算日收益率
    daily_ret = np.diff(np.log(equity))

    return equity, daily_ret, all_orders


def performance(equity, daily_ret, risk_free_rate=0.03):
    annual_ret = equity[-1] ** (252/len(daily_ret)) - 1
    annual_vol = np.std(daily_ret) * np.sqrt(252)
    sharpe = (annual_ret - risk_free_rate) / annual_vol
    max_drawdown = np.max(equity - np.maximum.accumulate(equity)) / np.max(equity)
    
    print(f"年化收益: {annual_ret:.1%}")
    print(f"年化波动: {annual_vol:.1%}")
    print(f"夏普比率: {sharpe:.2f}")
    print(f"最大回撤: {max_drawdown:.1%}")


def combine_ohlcv_to_dclose(stock_dfs_dict, date_col='date', code_col='fullCode', close_col='close'):
    """
    将 {股票代码: DataFrame} 字典合并为 dclose 矩阵
    
    Parameters:
    - stock_dfs_dict: dict, e.g. {'000001.SZ': df1, '600000.SH': df2}
    - date_col: str, 日期列名
    - close_col: str, 收盘价列名
    
    Returns:
    - dclose: (N, D) numpy array
    - stock_list: 股票代码列表
    - date_list: 交易日期列表（排序后）
    """
    pieces = []
    
    for df in stock_dfs_dict:
        # if df.empty:
        #     continue
            
        # 创建副本，避免修改原数据
        df_clean = df[[date_col, code_col, close_col]].copy()
        df_clean[date_col] = pd.to_datetime(df_clean[date_col], format='%Y%m%d')
        
        # 重命名便于统一
        df_clean.rename(columns={close_col: 'close'}, inplace=True)
        
        pieces.append(df_clean)
    
    # 合并所有
    combined = pd.concat(pieces, ignore_index=True)
    
    # Pivot: 行=时间，列=股票，值=收盘价
    close_wide = combined.pivot(index=date_col, columns=code_col, values='close')
    
    # 按时间排序
    close_wide.sort_index(inplace=True)
    
    # 处理停牌：每只股票独立向前填充
    close_wide = close_wide.T.ffill().T #close_wide.T.fillna(method='ffill').T
    
    # 强制转换为 float32
    close_wide = close_wide.astype(np.float32)
    
    # 转为 (N, D) 矩阵
    dclose = close_wide.values.T  # (N, D)
    
    return dclose, close_wide.columns.tolist(), close_wide.index.tolist()

def get_factors_eq(processed_factors,N,D):
# 方法 1：等权合成
# 所有因子等权相加
  alpha = np.zeros((N, D))
  for arr in processed_factors.values():
      alpha += arr
  alpha /= len(processed_factors)  # 平均
  return alpha

  # 方法 2：IC 加权（推荐）
# 计算每期 IC

def get_factors_ic(processed_factors,N,D,dclose):
# 方法 2：IC 加权（推荐）
# 计算每期 IC
  future_ret = np.diff(np.log(dclose), axis=1)  # 下一期对数收益 (N, D-1)

  ic_weights = []
  for t in range(D-1):
      ic_t = np.corrcoef(
          [f[:, t] for f in processed_factors.values()], 
          future_ret[:, t]
      )[0, -1]  # 最后一行是因子与收益的相关性
      ic_weights.append(ic_t)

  # 滚动加权（例如用过去20天IC均值）
  ic_weights = np.array(ic_weights)
  rolling_ic = pd.Series(ic_weights).rolling(20, min_periods=5).mean().fillna(0).values

  # 合成 alpha
  alpha = np.zeros((N, D-1))
  for arr, ic in zip(processed_factors.values(), rolling_ic):
      alpha += arr[:, :-1] * ic

  return alpha


def get_factors_xgb(processed_factors,N,D,dclose):
  import xgboost as xgb

  # 构造训练数据
  X, y = [], []
  for t in range(1, D-1):
      for i in range(N):
          if not np.isnan(dclose[i,t]) and dclose[i,t] > 0:
              features = [processed_factors[name][i, t] for name in processed_factors]
              X.append(features)
              y.append(np.log(dclose[i, t+1] / dclose[i, t]))  # 下一期收益

  X = np.array(X)
  y = np.array(y)

  # 训练模型
  model = xgb.XGBRegressor(n_estimators=100, max_depth=3)
  model.fit(X, y)

  # 预测（滚动预测）
  alpha = np.zeros((N, D))
  for t in range(1, D-1):
      X_t = np.array([[processed_factors[name][i, t] for name in processed_factors] for i in range(N)])
      pred = model.predict(X_t)
      alpha[:, t+1] = pred  # 预测下一期
  return alpha


def get_factors_lgb(
    processed_factors: Dict[str, np.ndarray],
    N: int, D: int, dclose: np.ndarray,
    val_size: int = 20,
    n_estimators: int = 100,
    max_depth: int = 3,
    model_save_dir: Optional[str] = None,
    save_final_model: bool = False,
    reuse_final_model: bool = False
) -> np.ndarray:
    """
    使用 LightGBM 合成多因子 alpha，并支持保存/加载模型以用于下次预测

    Parameters:
    - processed_factors: dict of (N, D) arrays
    - N, D: 股票数、天数
    - dclose: (N, D) 价格矩阵
    - val_size: 验证集大小（保留接口）
    - n_estimators: 树的数量
    - max_depth: 树深度
    - model_save_dir: 模型保存目录，如果为 None 则不保存
    - save_final_model: 是否保存一个“最终模型”用于未来推理
    - reuse_final_model: 是否加载已保存的最终模型进行预测（跳过训练）

    Returns:
    - alpha: (N, D-1) 预测的 alpha 值
    """
    if model_save_dir is not None:
        os.makedirs(model_save_dir, exist_ok=True)
        avg_model_path = os.path.join(model_save_dir, 'lgb_model_final.txt')
    else:
        avg_model_path = None

    # ========================
    # 【选项1】复用已训练模型进行预测
    # ========================
    if reuse_final_model and avg_model_path and os.path.exists(avg_model_path):
        print(f"🔁 复用已有模型进行预测: {avg_model_path}")
        model = lgb.Booster(model_file=avg_model_path)

        # 加载元数据获取因子名
        meta_path = os.path.join(model_save_dir, 'metadata.pkl')
        if not os.path.exists(meta_path):
            raise FileNotFoundError(f"缺少元数据文件: {meta_path}")
        meta_data = joblib.load(meta_path)
        factor_names = meta_data['factor_names']

        # 构造预测数据
        df = _construct_feature_dataframe(processed_factors, factor_names, N, D-1)
        X = df[factor_names]

        # 使用模型预测
        predictions = model.predict(X)
        alpha = predictions.reshape(N, D-1)
        return alpha

    # ========================
    # 【选项2】正常训练流程
    # ========================
    # 1. 计算未来收益 (T+1 收益)
    ret = np.diff(np.log(dclose), axis=1)  # (N, D-1)

    # 2. 构建特征 DataFrame
    factor_names = list(processed_factors.keys())
    df = _construct_feature_dataframe(processed_factors, factor_names, N, D-1)
    X = df[factor_names]
    y = pd.Series(ret.flatten(), name='return')  # 展平为 (N*(D-1), )

    # 3. 时间序列交叉验证
    tscv = TimeSeriesSplit(n_splits=5)
    fold_preds = np.zeros(len(y))
    trained_models: List[lgb.Booster] = []

    print("🚀 开始时间序列交叉验证训练...")
    for fold_idx, (train_idx, val_idx) in enumerate(tscv.split(X)):
        X_train, X_val = X.iloc[train_idx], X.iloc[val_idx]
        y_train, y_val = y.iloc[train_idx], y.iloc[val_idx]

        train_data = lgb.Dataset(X_train, label=y_train)
        val_data = lgb.Dataset(X_val, label=y_val, reference=train_data)

        params = {
            'objective': 'regression',
            'metric': 'l2',
            'boosting_type': 'gbdt',
            'max_depth': max_depth,
            'num_leaves': min(2 ** max_depth - 1, 31),  # 更合理的 num_leaves
            'learning_rate': 0.05,
            'feature_fraction': 0.8,
            'bagging_fraction': 0.8,
            'bagging_freq': 5,
            'verbose': -1,
            'seed': 42 + fold_idx
        }

        model = lgb.train(
            params,
            train_data,
            num_boost_round=n_estimators,
            valid_sets=[val_data],
            callbacks=[lgb.early_stopping(10), lgb.log_evaluation(-1)]
        )

        val_pred = model.predict(X_val)
        fold_preds[val_idx] += val_pred
        trained_models.append(model)
        print(f"  ✅ 第 {fold_idx+1} 折训练完成")

    # 4. 构造 alpha 矩阵
    alpha = fold_preds.reshape(N, D-1)
    alpha = np.nan_to_num(alpha, nan=0.0)

    # 5. 保存模型与元数据
    if model_save_dir is not None:
        # 保存每折模型（可选）
        for idx, model in enumerate(trained_models):
            model_path = os.path.join(model_save_dir, f'lgb_model_fold_{idx}.txt')
            model.save_model(model_path)
            print(f"✅ 保存交叉验证模型: {model_path}")

        # 保存元数据
        meta_data = {
            'factor_names': factor_names,
            'N': N,
            'D': D,
            'n_estimators': n_estimators,
            'max_depth': max_depth,
            'trained_on_dates': D-1,
            'folds': len(trained_models)
        }
        meta_path = os.path.join(model_save_dir, 'metadata.pkl')
        joblib.dump(meta_data, meta_path)
        print(f"✅ 保存元数据: {meta_path}")

        # 保存最终推理模型（例如：第一折模型 或 模型平均）
        if save_final_model and trained_models:
            final_model = trained_models[0]  # 可替换为模型集成策略
            final_model.save_model(avg_model_path)
            print(f"✅ 保存最终推理模型: {avg_model_path}")

    return alpha

def _construct_feature_dataframe(
    processed_factors: Dict[str, np.ndarray],
    factor_names: List[str],
    N: int,
    T: int
) -> pd.DataFrame:
    """
    构建用于训练/预测的 DataFrame
    T 通常是 D-1（因收益是 diff 得到）
    """
    df_data = []
    for i in range(N):
        for t in range(T):
            row = {'stock_id': i, 'date': t}
            for name in factor_names:
                row[name] = processed_factors[name][i, t]
            df_data.append(row)
    return pd.DataFrame(df_data)


def factor_analysis(factor_dict: dict, returns: pd.DataFrame, forward_window=5):
    """
    factor_dict: {name: np.array (T, N)} or {name: pd.DataFrame(T, N)}
    returns: pd.DataFrame(T, N), 下一期收益
    """
    results = {}
    
    for name, factor in factor_dict.items():
        if isinstance(factor, np.ndarray):
            factor = pd.DataFrame(factor.T)  # ST -> TS
        
        # Align time index
        common_idx = factor.index.intersection(returns.index)
        fac = factor.loc[common_idx]
        ret = returns.loc[common_idx]

        # Shift factor to predict future return
        fac_lagged = fac.shift(1).dropna()
        ret_forward = ret.rolling(forward_window).sum().shift(-forward_window).loc[fac_lagged.index]

        # Drop NaN
        valid_mask = ~(fac_lagged.isna() | ret_forward.isna())
        fac_clean = fac_lagged[valid_mask].stack()
        ret_clean = ret_forward[valid_mask].stack()

        # IC and Rank IC
        ic = pearsonr(fac_clean, ret_clean)[0]
        rank_ic = spearmanr(fac_clean, ret_clean)[0]
        ir = rank_ic / fac_clean.corr(ret_clean, method='spearman').std() if len(fac_clean) > 1 else np.nan

        turnover = (fac_lagged.diff(1).abs().mean(axis=1)).mean()

        results[name] = {
            'IC': ic,
            'Rank_IC': rank_ic,
            'IR': ir,
            'Turnover': turnover,
            'Mean': fac_clean.mean(),
            'Std': fac_clean.std(),
            'AutoCorr_1D': fac_lagged.corrwith(fac_lagged.shift(1), axis=1).mean()
        }

    summary = pd.DataFrame(results).T
    summary.sort_values("Rank_IC", key=abs, ascending=False, inplace=True)
    return summary

def combine_factors_ic_weighted(factor_dict, returns, lookback=20):
    from sklearn.preprocessing import RobustScaler
    
    signals = {}
    for t in range(lookback, len(returns)):
        window_factors = {k: v[t-lookback:t] for k, v in factor_dict.items()}
        analysis = factor_analysis(window_factors, returns[t-lookback:t])
        
        weights = analysis['Rank_IC'].fillna(0).clip(-0.3, 0.3)  # 限制极端权重
        weights = weights / weights.abs().sum()  # 归一化
        
        current_factors = pd.DataFrame({k: v[t] for k, v in factor_dict.items()})
        signal = (current_factors * weights).sum(axis=1)
        signals[t] = RobustScaler().fit_transform(signal.values.reshape(-1, 1)).flatten()
    
    return pd.DataFrame(signals).T

# def backtest(signal_df: pd.DataFrame, price_df: pd.DataFrame, 
#              rebalance_freq='5D', cost_rate=0.001):
#     """
#     signal_df: (T, N) 综合信号
#     price_df: (T, N) 收盘价
#     """
#     # 对齐时间
#     common_idx = signal_df.index.intersection(price_df.index)
#     signal = signal_df.loc[common_idx].fillna(0)
#     price = price_df.loc[common_idx]

#     # 生成持仓（每 5 天调仓）
#     position = signal.rolling(2).mean().shift(1)  # 平滑 & lag
#     position = position.resample(rebalance_freq).ffill()

#     # 收益计算
#     ret = price.pct_change().reindex(position.index)
#     portfolio_ret = (position * ret).mean(axis=1) - abs(position.diff()).mean(axis=1) * cost_rate

#     # 累计净值
#     nav = (1 + portfolio_ret).cumprod()
    
#     # 绩效指标
#     ann_ret = nav.pct_change(periods=252).mean()
#     ann_vol = portfolio_ret.std() * np.sqrt(252)
#     sharpe = ann_ret / ann_vol
#     mdd = (nav / nav.cummax() - 1).min()

#     # print(f"Sharpe: {sharpe:.2f}, Ann Ret: {ann_ret*100:.2f}%, MDD: {mdd*100:.2f}%")
    
#     # nav.plot(title="Portfolio NAV")
#     # plt.show()

#     return portfolio_ret, nav


def combine_factors_ic_weighted(factor_dict, returns, lookback=20):
    from sklearn.preprocessing import RobustScaler
    
    signals = {}
    for t in range(lookback, len(returns)):
        window_factors = {k: v[t-lookback:t] for k, v in factor_dict.items()}
        analysis = factor_analysis(window_factors, returns[t-lookback:t])
        
        weights = analysis['Rank_IC'].fillna(0).clip(-0.3, 0.3)  # 限制极端权重
        weights = weights / weights.abs().sum()  # 归一化
        
        current_factors = pd.DataFrame({k: v[t] for k, v in factor_dict.items()})
        signal = (current_factors * weights).sum(axis=1)
        signals[t] = RobustScaler().fit_transform(signal.values.reshape(-1, 1)).flatten()
    
    return pd.DataFrame(signals).T

# def backtest(signal_df: pd.DataFrame, price_df: pd.DataFrame, 
#              rebalance_freq='5D', cost_rate=0.001):
#     """
#     signal_df: (T, N) 综合信号
#     price_df: (T, N) 收盘价
#     """
#     # 对齐时间
#     common_idx = signal_df.index.intersection(price_df.index)
#     signal = signal_df.loc[common_idx].fillna(0)
#     price = price_df.loc[common_idx]

#     # 生成持仓（每 5 天调仓）
#     position = signal.rolling(2).mean().shift(1)  # 平滑 & lag
#     position = position.resample(rebalance_freq).ffill()

#     # 收益计算
#     ret = price.pct_change().reindex(position.index)
#     portfolio_ret = (position * ret).mean(axis=1) - abs(position.diff()).mean(axis=1) * cost_rate

#     # 累计净值
#     nav = (1 + portfolio_ret).cumprod()
    
#     # 绩效指标
#     ann_ret = nav.pct_change(periods=252).mean()
#     ann_vol = portfolio_ret.std() * np.sqrt(252)
#     sharpe = ann_ret / ann_vol
#     mdd = (nav / nav.cummax() - 1).min()

#     print(f"Sharpe: {sharpe:.2f}, Ann Ret: {ann_ret*100:.2f}%, MDD: {mdd*100:.2f}%")
    
#     nav.plot(title="Portfolio NAV")
#     plt.show()

#     return portfolio_ret, nav

if __name__ == "__main__":
  pass