"""ETF 特征工程流水线."""

from __future__ import annotations

import logging
from typing import Iterable, Tuple

import numpy as np
import pandas as pd
from scipy import stats

# 导入基础工具函数
from .utils import _ema, _rolling_std, _sma

# 导入经典技术指标函数
from .indicators import (
    _adx,
    _atr,
    _bollinger,
    _cci,
    _cmo,
    _kdj,
    _macd,
    _mfi,
    _obv,
    _rsi,
    _williams_r,
)

# 导入高级技术指标函数
from .advanced_indicators import (
    _garman_klass_volatility,
    _parkinson_volatility,
    _price_percentile_rank,
    _roc,
    _trend_persistence,
    _trix,
    _volatility_cone,
    _vwap,
)

# 导入K线形态识别函数
from .patterns import _detect_candlestick_patterns

DEFAULT_SCALE_WINDOWS: Tuple[int, int, int] = (10, 14, 20)
logger = logging.getLogger(__name__)


def build_feature_dataframe(
    frame: pd.DataFrame,
    *,
    price_column: str | None = None,
    high_column: str | None = None,
    low_column: str | None = None,
    volume_column: str | None = None,
    drop_na: bool = True,
    additional_windows: Iterable[int] | None = None,
) -> pd.DataFrame:
    """根据原始 OHLCV 数据构建特征 DataFrame.

    Parameters
    ----------
    frame:
        包含 OHLCV 数据的 DataFrame。支持英文列名（open, high, low, close, volume）
        或中文列名（开盘、最高、最低、收盘、成交量）。
    price_column:
        收盘价列名，如果为 None 则自动检测（优先使用中文列名）
    high_column:
        最高价列名，如果为 None 则自动检测
    low_column:
        最低价列名，如果为 None 则自动检测
    volume_column:
        成交量列名，如果为 None 则自动检测
    drop_na:
        是否丢弃由于滚动计算产生的缺失行。默认 True。
    additional_windows:
        除默认窗口外额外计算的波动率窗口。
    """

    df = frame.copy()
    df = df.sort_index()

    # 自动检测列名（优先使用中文列名，兼容英文列名）
    if price_column is None:
        if "收盘" in df.columns:
            price_column = "收盘"
        elif "close" in df.columns:
            price_column = "close"
        else:
            raise ValueError("无法找到收盘价列（收盘 或 close）")
    
    if high_column is None:
        if "最高" in df.columns:
            high_column = "最高"
        elif "high" in df.columns:
            high_column = "high"
        else:
            raise ValueError("无法找到最高价列（最高 或 high）")
    
    if low_column is None:
        if "最低" in df.columns:
            low_column = "最低"
        elif "low" in df.columns:
            low_column = "low"
        else:
            raise ValueError("无法找到最低价列（最低 或 low）")
    
    if volume_column is None:
        if "成交量" in df.columns:
            volume_column = "成交量"
        elif "volume" in df.columns:
            volume_column = "volume"
        else:
            raise ValueError("无法找到成交量列（成交量 或 volume）")

    required_cols = {price_column, high_column, low_column, volume_column}
    missing = required_cols.difference(frame.columns)
    if missing:
        raise ValueError(f"特征工程输入缺少必要列: {missing}")

    close = df[price_column]
    high = df[high_column]
    low = df[low_column]
    volume = df[volume_column]
    returns = close.pct_change()
    log_returns = np.log(close / close.shift(1))

    sample_count = len(df)
    valid_price_mask = (~high.isna()) & (~low.isna()) & (~close.isna())
    valid_price_ratio = float(valid_price_mask.sum() / sample_count) if sample_count else 0.0

    features = pd.DataFrame(index=df.index)
    features["return_1d"] = returns
    features["log_return_1d"] = log_returns
    features["mom_10"] = close.pct_change(10)
    features["volatility_20"] = returns.rolling(20, min_periods=20).std()
    features["volatility_20_annualized"] = features["volatility_20"] * np.sqrt(252)

    sma20 = _sma(close, 20)
    ema20 = _ema(close, 20)
    features["sma_20"] = sma20
    features["ema_20"] = ema20
    features["bias_sma_20"] = (close - sma20) / sma20
    features["bias_ema_20"] = (close - ema20) / ema20
    
    # ========== 趋势持续性（中优先级） ==========
    # 计算价格在移动平均线上方/下方的连续天数，反映趋势的持续性
    # 正数表示价格在均线上方，负数表示在下方，绝对值表示连续天数
    # 帮助判断趋势是否即将反转
    features["trend_persistence_sma20"] = _trend_persistence(close, sma20)
    features["trend_persistence_ema20"] = _trend_persistence(close, ema20)

    macd = _macd(close)
    features = pd.concat([features, macd], axis=1)

    features["rsi_14"] = _rsi(close)

    cmo_candidate = _cmo(close)
    if sample_count >= 50:
        overlap_corr = features["rsi_14"].corr(cmo_candidate)
        if pd.notna(overlap_corr) and abs(overlap_corr) >= 0.95:
            logger.info(
                "跳过 CMO 指标（与 RSI 高度相关，corr=%.3f）", overlap_corr
            )
        else:
            features["cmo_14"] = cmo_candidate
    else:
        logger.info("跳过 CMO 指标（样本量不足 %s < 50）", sample_count)

    features["atr_14"] = _atr(high, low, close)

    boll = _bollinger(close)
    features = pd.concat([features, boll], axis=1)
    features["pct_from_bb_mid"] = (close - boll["bb_mid"]) / boll["bb_mid"]

    vol_sma20 = _sma(volume, 20)
    features["volume_raw"] = volume
    features["volume_sma_20"] = vol_sma20
    features["volume_ratio"] = volume / vol_sma20.replace(0, np.nan)

    # ========== 扩展移动平均特征 ==========
    for window in [5, 10, 30, 60]:
        sma = _sma(close, window)
        ema = _ema(close, window)
        features[f"sma_{window}"] = sma
        features[f"ema_{window}"] = ema
        features[f"bias_sma_{window}"] = (close - sma) / sma.replace(0, np.nan)
        features[f"bias_ema_{window}"] = (close - ema) / ema.replace(0, np.nan)
    
    # 均线交叉特征
    features["sma_cross_5_20"] = (features["sma_5"] > features["sma_20"]).astype(int)
    features["ema_cross_5_20"] = (features["ema_5"] > features["ema_20"]).astype(int)
    features["sma_cross_10_30"] = (features["sma_10"] > features["sma_30"]).astype(int)
    
    # 均线斜率特征
    for window in [5, 10, 20, 30]:
        sma = features[f"sma_{window}"]
        features[f"sma_{window}_slope"] = sma.pct_change(1)
        ema = features[f"ema_{window}"]
        features[f"ema_{window}_slope"] = ema.pct_change(1)

    # ========== 新增技术指标 ==========
    if sample_count >= 50:
        kdj = _kdj(high, low, close)
        features = pd.concat([features, kdj], axis=1)
    else:
        logger.info("跳过 KDJ 指标（样本量不足 %s < 50）", sample_count)

    if sample_count >= 100:
        features["cci_20"] = _cci(high, low, close, window=20)
    else:
        logger.info("跳过 CCI 指标（样本量不足 %s < 100）", sample_count)

    adx_df = _adx(high, low, close)
    features = pd.concat([features, adx_df], axis=1)
    
    # ========== TRIX（三重指数平滑移动平均，中优先级） ==========
    # TRIX是长期趋势指标，通过三重EMA平滑过滤短期波动，适合捕捉长期趋势变化
    # 窗口14日
    features["trix_14"] = _trix(close, window=14)
    
    features["obv"] = _obv(close, volume)
    features["obv_ma_20"] = _sma(features["obv"], 20)
    features["obv_ratio"] = features["obv"] / features["obv_ma_20"].replace(0, np.nan)
    
    features["mfi_14"] = _mfi(high, low, close, volume)

    if sample_count >= 50 and valid_price_ratio >= 0.8:
        features["williams_r_14"] = _williams_r(high, low, close)
    else:
        logger.info(
            "跳过 Williams %%R 指标（样本量=%s，价格有效占比=%.2f，需要≥50 且 ≥0.80）",
            sample_count,
            valid_price_ratio,
        )

    # ========== 价格形态特征 ==========
    # 自动检测开盘价列名（中文或英文）
    if "开盘" in df.columns:
        open_price = df["开盘"]
    elif "open" in df.columns:
        open_price = df["open"]
    else:
        open_price = close  # 如果没有开盘价，使用收盘价
    features["price_range"] = (high - low) / close
    features["body_size"] = (close - open_price) / close
    features["upper_shadow"] = (high - pd.concat([close, open_price], axis=1).max(axis=1)) / close
    features["lower_shadow"] = (pd.concat([close, open_price], axis=1).min(axis=1) - low) / close
    features["price_position"] = (close - low) / (high - low).replace(0, np.nan)
    
    # ========== 价格百分位排名（高优先级） ==========
    # 计算当前价格在过去N日价格区间中的百分位排名，反映价格相对位置
    # 百分位排名范围0-100，值越大表示价格越接近历史高位，能够识别价格是否处于历史高位或低位
    for window in [20, 60, 120]:
        if sample_count >= window:
            features[f"price_percentile_{window}"] = _price_percentile_rank(close, window)
        else:
            logger.info("跳过价格百分位排名（window=%s，样本量不足 %s < %s）", window, sample_count, window)
    
    # ========== K线形态识别（高级形态） ==========
    # 识别10种常见的K线形态，包括基础形态和高级形态
    # 每个形态返回0（未识别）或1（已识别）的布尔值特征
    candlestick_patterns = _detect_candlestick_patterns(open_price, high, low, close)
    features = pd.concat([features, candlestick_patterns], axis=1)
    
    # ========== 扩展动量特征 ==========
    for window in [5, 20, 30, 60]:
        features[f"mom_{window}"] = close.pct_change(window)
    
    # 动量变化率
    features["mom_change_10"] = features["mom_10"].diff()
    features["mom_change_20"] = features["mom_20"].diff()
    
    # 加速度特征（动量的动量）
    features["mom_accel_10"] = features["mom_10"].diff()
    features["mom_accel_20"] = features["mom_20"].diff()
    
    # ========== ROC（变动率指标，高优先级） ==========
    # ROC反映价格变化率，与RSI互补，用于衡量价格动量
    # 公式：ROC = (当前价格 - N日前价格) / N日前价格 × 100
    for window in [10, 20]:
        features[f"roc_{window}"] = _roc(close, window)
    
    # ========== 相对强度特征 ==========
    # 基于ETF自身滚动收益率计算不同周期之间的相对强度比值
    # 相对强度反映短期动量相对于长期动量的强度，用于识别趋势加速或减速
    # 例如：rs_5_20 = mom_5 / mom_20，如果>1表示短期动量强于长期动量
    if "mom_5" in features.columns and "mom_20" in features.columns:
        # 5日相对于20日的相对强度
        features["relative_strength_5_20"] = features["mom_5"] / features["mom_20"].replace(0, np.nan)
    
    if "mom_10" in features.columns and "mom_60" in features.columns:
        # 10日相对于60日的相对强度
        features["relative_strength_10_60"] = features["mom_10"] / features["mom_60"].replace(0, np.nan)
    
    if "mom_20" in features.columns and "mom_60" in features.columns:
        # 20日相对于60日的相对强度
        features["relative_strength_20_60"] = features["mom_20"] / features["mom_60"].replace(0, np.nan)

    # ========== 统计特征 ==========
    stats_windows = [20]
    if sample_count >= 100:
        for window in stats_windows:
            rolling_returns = returns.rolling(window, min_periods=window)

            skew_series = rolling_returns.apply(lambda x: stats.skew(x.dropna()), raw=False)
            kurt_series = rolling_returns.apply(lambda x: stats.kurtosis(x.dropna()), raw=False)

            if not skew_series.dropna().empty:
                skew_stability = skew_series.rolling(window, min_periods=max(5, window // 2)).std().iloc[-1]
                if pd.notna(skew_stability) and skew_stability < 0.5:
                    features[f"skewness_{window}"] = skew_series
                else:
                    logger.info(
                        "跳过偏度特征（window=%s，稳定性=%.3f>=0.5）",
                        window,
                        skew_stability if pd.notna(skew_stability) else float("nan"),
                    )
            else:
                logger.info("跳过偏度特征（window=%s，样本不足）", window)

            if not kurt_series.dropna().empty:
                kurt_stability = kurt_series.rolling(window, min_periods=max(5, window // 2)).std().iloc[-1]
                if pd.notna(kurt_stability) and kurt_stability < 0.5:
                    features[f"kurtosis_{window}"] = kurt_series
                else:
                    logger.info(
                        "跳过峰度特征（window=%s，稳定性=%.3f>=0.5）",
                        window,
                        kurt_stability if pd.notna(kurt_stability) else float("nan"),
                    )
            else:
                logger.info("跳过峰度特征（window=%s，样本不足）", window)

            # 分位数特征（25%、50%、75%）
            # 文档要求：保留中位数（50%），其他需验证信息增益
            median_series = rolling_returns.quantile(0.5)
            features[f"quantile_50_{window}"] = median_series
            
            # 添加25%和75%分位数（在特征选择阶段会验证信息增益）
            q25_series = rolling_returns.quantile(0.25)
            q75_series = rolling_returns.quantile(0.75)
            features[f"quantile_25_{window}"] = q25_series
            features[f"quantile_75_{window}"] = q75_series

            # 最大值/最小值的位置
            rolling_close = close.rolling(window, min_periods=window)
            max_idx = rolling_close.apply(lambda x: x.argmax() if len(x) > 0 else np.nan, raw=False)
            min_idx = rolling_close.apply(lambda x: x.argmin() if len(x) > 0 else np.nan, raw=False)
            features[f"max_position_{window}"] = max_idx / window
            features[f"min_position_{window}"] = min_idx / window
    else:
        logger.info("跳过高级统计特征（样本量不足 %s < 100）", sample_count)

    # 滞后自相关特征（需样本>150）
    if sample_count >= 150:
        autocorr_window = 20
        for lag in [1, 5, 10]:
            shifted_returns = returns.shift(lag)
            rolling_autocorr = returns.rolling(autocorr_window, min_periods=autocorr_window).corr(shifted_returns)
            latest_autocorr = rolling_autocorr.dropna().iloc[-1] if not rolling_autocorr.dropna().empty else np.nan
            if pd.isna(latest_autocorr):
                logger.info("跳过自相关特征（lag=%s，缺少有效估计）", lag)
                continue
            std_error = 1 / np.sqrt(autocorr_window)
            z_score = abs(latest_autocorr) / std_error if std_error > 0 else np.inf
            if z_score > 1.96:
                features[f"autocorr_lag_{lag}"] = rolling_autocorr
            else:
                logger.info(
                    "跳过自相关特征（lag=%s，显著性不足：|z|=%.3f≤1.96）",
                    lag,
                    z_score,
                )
    else:
        logger.info("跳过自相关特征（样本量不足 %s < 150）", sample_count)

    # ========== 滞后特征 ==========
    for lag in [1, 3, 5]:
        features[f"close_lag_{lag}"] = close.shift(lag) / close
        features[f"return_lag_{lag}"] = returns.shift(lag)
        features[f"volume_lag_{lag}"] = volume.shift(lag) / volume.replace(0, np.nan)
    
    # ========== 滚动统计特征 ==========
    # 文档要求：滚动均值、标准差（窗口 5、10、20 日），剔除最大值、最小值（易受异常值影响）
    # 注意：SMA/EMA是技术指标的滚动均值，这里是收盘价的滚动统计作为独立特征
    for window in [5, 10, 20]:
        features[f"close_rolling_mean_{window}"] = close.rolling(window, min_periods=window).mean()
        features[f"close_rolling_std_{window}"] = close.rolling(window, min_periods=window).std()

    # ========== 时间特征 ==========
    if isinstance(df.index, pd.DatetimeIndex):
        features["day_of_week"] = df.index.dayofweek + 1  # 1-7
        features["month"] = df.index.month  # 1-12
        features["is_month_start"] = df.index.is_month_start.astype(int)
        features["is_month_end"] = df.index.is_month_end.astype(int)
        features["is_quarter_end"] = df.index.is_quarter_end.astype(int)
    else:
        # 如果不是时间索引，创建占位符
        features["day_of_week"] = np.nan
        features["month"] = np.nan
        features["is_month_start"] = np.nan
        features["is_month_end"] = np.nan
        features["is_quarter_end"] = np.nan

    # ========== 扩展成交量特征 ==========
    volume_change = volume.pct_change()
    price_change = close.pct_change()
    
    # 价量关系（滚动相关性）
    for window in [10, 20]:
        # 使用 DataFrame 计算滚动相关性
        temp_df = pd.DataFrame({"price": price_change, "volume": volume_change})
        features[f"price_volume_corr_{window}"] = temp_df["price"].rolling(
            window, min_periods=window
        ).corr(temp_df["volume"])
    
    features["volume_change_rate"] = volume_change
    features["volume_volatility_20"] = volume_change.rolling(20, min_periods=20).std()
    
    # OBV 变化率
    features["obv_change_rate"] = features["obv"].pct_change()
    
    # ========== VWAP（成交量加权平均价） ==========
    # VWAP是机构投资者常用的价格基准，通过成交量加权计算平均价格
    # 能够反映真实的交易成本，常用于判断当前价格是否偏离合理价值
    vwap = _vwap(high, low, close, volume)
    features["vwap"] = vwap
    # 当前价格相对于VWAP的偏差，正值表示价格高于VWAP，负值表示价格低于VWAP
    features["price_vwap_bias"] = (close - vwap) / vwap.replace(0, np.nan)

    # ========== 扩展波动率特征 ==========
    for window in [5, 10, 30, 60]:
        features[f"volatility_{window}"] = returns.rolling(window, min_periods=window).std()
        features[f"volatility_{window}_annualized"] = features[f"volatility_{window}"] * np.sqrt(252)
    
    # 波动率比率
    features["volatility_ratio_5_20"] = features["volatility_5"] / features["volatility_20"].replace(0, np.nan)
    features["volatility_ratio_10_30"] = features["volatility_10"] / features["volatility_30"].replace(0, np.nan)
    
    # 波动率变化率
    features["volatility_change_20"] = features["volatility_20"].pct_change()
    features["volatility_change_60"] = features["volatility_60"].pct_change()
    
    # 已实现波动率（使用日内高低价）
    features["realized_volatility"] = np.log(high / low).rolling(20, min_periods=20).std() * np.sqrt(252)
    
    # ========== Parkinson 波动率 ==========
    # Parkinson波动率基于日内高低价，比基于收盘价的传统波动率估计更准确
    # 因为它利用了更多的价格信息，能够更准确地反映市场波动
    parkinson_vol = _parkinson_volatility(high, low, window=20)
    features["parkinson_volatility_20"] = parkinson_vol
    # 年化Parkinson波动率
    features["parkinson_volatility_20_annualized"] = parkinson_vol * np.sqrt(252)
    
    # ========== 波动率锥（高优先级） ==========
    # 计算当前波动率在历史波动率分布中的百分位排名，用于判断当前波动率水平是否异常
    # 波动率锥范围0-100，值越大表示当前波动率越接近历史高位，帮助识别波动率的高低状态
    # 需要至少252日的历史数据来计算波动率锥
    if sample_count >= 252:
        # 20日波动率的波动率锥（基于252日历史分布）
        if "volatility_20" in features.columns:
            features["volatility_cone_20"] = _volatility_cone(features["volatility_20"], lookback_window=252)
        # 60日波动率的波动率锥（基于252日历史分布）
        if "volatility_60" in features.columns:
            features["volatility_cone_60"] = _volatility_cone(features["volatility_60"], lookback_window=252)
    else:
        logger.info("跳过波动率锥（样本量不足 %s < 252）", sample_count)
    
    # ========== Garman-Klass 波动率（中优先级） ==========
    # Garman-Klass波动率结合OHLC四个价格的波动率估计，比Parkinson波动率更准确
    # 能够更充分利用价格信息，公式更复杂但信息量更大
    gk_vol = _garman_klass_volatility(open_price, high, low, close, window=20)
    features["garman_klass_volatility_20"] = gk_vol
    # 年化Garman-Klass波动率
    features["garman_klass_volatility_20_annualized"] = gk_vol * np.sqrt(252)

    # 额外波动率窗口（保持向后兼容）
    if additional_windows:
        for window in additional_windows:
            if f"volatility_{window}" not in features.columns:
                features[f"volatility_{window}"] = returns.rolling(
                    window, min_periods=window
                ).std()

    # ========== 交互特征 ==========
    # 价格与成交量的交互
    features["price_volume_interaction"] = (close.pct_change() * volume_change).fillna(0)
    
    # RSI 与价格位置的交互
    features["rsi_price_position"] = features["rsi_14"] * features["price_position"]
    
    # 技术指标比率
    features["rsi_ratio"] = features["rsi_14"] / 100.0
    features["macd_over_atr"] = features["macd_dif"] / features["atr_14"].replace(0, np.nan)
    
    # MACD 与动量的交互
    features["macd_momentum_interaction"] = features["macd_dif"] * features["mom_10"]
    
    # 成交量与价格范围的交互
    features["volume_range_interaction"] = features["volume_ratio"] * features["price_range"]

    features.replace([np.inf, -np.inf], np.nan, inplace=True)

    # 记录数据丢失情况
    original_len = len(frame)
    before_fillna_len = len(features)
    
    # 使用填充策略减少数据丢失
    # 1. 对于数值特征，使用前向填充（ffill），如果前向填充失败，使用中位数填充
    # 2. 对于滚动窗口计算导致的缺失值（前N行），这些无法填充，保持为NaN
    # 3. 只在缺失值比例过高时才考虑删除
    
    # 计算每列的缺失值比例
    missing_ratios = features.isna().sum() / len(features)
    
    # 对于缺失值比例 < 50% 的列，使用前向填充
    # 对于缺失值比例 >= 50% 的列，使用中位数填充（如果可能）
    for col in features.columns:
        col_missing_ratio = missing_ratios[col]
        if col_missing_ratio > 0:
            if col_missing_ratio < 0.5:
                # 前向填充（使用 ffill() 方法，新版本 pandas 推荐）
                features[col] = features[col].ffill()
                # 如果前向填充后仍有缺失（如第一行），使用后向填充
                features[col] = features[col].bfill()
            else:
                # 缺失值比例过高，使用中位数填充
                median_value = features[col].median()
                if pd.notna(median_value):
                    features[col] = features[col].fillna(median_value)
                else:
                    # 如果中位数也是NaN，使用0填充
                    features[col] = features[col].fillna(0.0)
    
    after_fillna_len = len(features)
    remaining_missing = features.isna().sum().sum()
    
    # 如果仍有缺失值，且 drop_na=True，才删除
    if drop_na and remaining_missing > 0:
        # 只删除所有特征都为NaN的行
        features = features.dropna(how='all')
        after_dropna_len = len(features)
        lost_count = original_len - after_dropna_len
        lost_before_fillna = original_len - before_fillna_len
        lost_after_fillna = before_fillna_len - after_fillna_len
        lost_after_dropna = after_fillna_len - after_dropna_len
        
        if lost_count > 0:
            logger.info(
                f"特征工程数据丢失: 原始数据 {original_len} 条，"
                f"滚动计算后 {before_fillna_len} 条，"
                f"填充后 {after_fillna_len} 条，"
                f"dropna 后 {after_dropna_len} 条，"
                f"总计丢失 {lost_count} 条（{lost_count/original_len:.1%}）"
            )
            
            # 如果丢失超过10%，记录警告
            if lost_count > original_len * 0.1:
                logger.warning(
                    f"特征工程数据丢失较多（{lost_count/original_len:.1%}），"
                    f"可能影响模型训练。"
                    f"丢失原因：滚动窗口计算丢失 {lost_before_fillna} 条，"
                    f"填充后仍缺失 {lost_after_fillna} 条，"
                    f"dropna 删除 {lost_after_dropna} 条。"
                )
    else:
        # 不删除缺失值，只记录填充效果
        lost_count = original_len - after_fillna_len
        if lost_count > 0:
            logger.info(
                f"特征工程数据丢失: 原始数据 {original_len} 条，"
                f"滚动计算后 {before_fillna_len} 条，"
                f"填充后 {after_fillna_len} 条，"
                f"丢失 {lost_count} 条（{lost_count/original_len:.1%}，"
                f"由于滚动窗口计算，已使用填充策略减少丢失）"
            )
        
        # 如果仍有缺失值，记录警告
        if remaining_missing > 0:
            logger.warning(
                f"特征工程后仍有 {remaining_missing} 个缺失值（未删除），"
                f"可能影响模型训练。建议检查数据质量或启用 drop_na=True。"
            )

    return features

