#!/usr/bin/env python3
# etl/feature_engineer.py

"""
特征工程模块
"""

import argparse
import logging
from tqdm import tqdm
import pandas as pd
import numpy as np
from sqlalchemy import create_engine, text
from config import DB_CONFIG

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

BATCH_SIZE = 500
INDEX_CODES = ["SSE", "CSI300"]
MIN_DATA_POINTS = 10

# 获取数据库连接
def get_engine():
    uri = f"postgresql+psycopg2://{DB_CONFIG['user']}:{DB_CONFIG['password']}@{DB_CONFIG['host']}:{DB_CONFIG['port']}/{DB_CONFIG['database']}"
    return create_engine(uri)

engine = get_engine()

# 计算相对强弱指数(RSI)
def compute_rsi(series: pd.Series, period: int = 14) -> pd.Series:
    delta = series.diff()
    up = delta.clip(lower=0).fillna(0)
    down = -delta.clip(upper=0).fillna(0)
    ma_up = up.rolling(window=period, min_periods=1).mean()
    ma_down = down.rolling(window=period, min_periods=1).mean()
    rs = ma_up / (ma_down + 1e-9)
    rsi = 100 - (100 / (1 + rs))
    return rsi

# 准备指数收益数据
def prepare_index_returns():
    idx_returns = {}
    with engine.connect() as conn:
        for code in INDEX_CODES:
            df = pd.read_sql(
                text("SELECT trade_date, close FROM raw_index WHERE index_code = :code ORDER BY trade_date"),
                conn, params={"code": code}, parse_dates=["trade_date"]
            )
            if df.empty:
                idx_returns[code] = pd.DataFrame(columns=["trade_date", f"{code}_ret"])
                continue
            df[f"{code}_ret"] = df["close"].pct_change()
            idx_returns[code] = df[["trade_date", f"{code}_ret"]]
    return idx_returns
# 获取行业收益数据
def prepare_industry_returns():
    """
    计算行业平均收益
    """
    q = """
    SELECT d.trade_date, m.industry, AVG((d.close - d.open)/d.open) AS industry_ret
    FROM raw_daily d
    JOIN ticker_meta m ON d.ticker = m.ticker
    WHERE m.industry IS NOT NULL
    GROUP BY d.trade_date, m.industry
    """
    with engine.connect() as conn:
        df = pd.read_sql(q, conn, parse_dates=["trade_date"])
    return df

# 获取所有股票代码
def get_all_tickers():
    with engine.connect() as conn:
        df = pd.read_sql("SELECT DISTINCT ticker FROM raw_daily", conn)
    return df['ticker'].tolist()

# 获取单个股票的原始数据
def fetch_raw_for_ticker(ticker: str) -> pd.DataFrame:
    q = text("SELECT trade_date, open, high, low, close, volume FROM raw_daily WHERE ticker = :t ORDER BY trade_date")
    return pd.read_sql(q, engine, params={"t": ticker}, parse_dates=["trade_date"])

# 更新特征数据
def upsert_features(df: pd.DataFrame):
    if df.empty:
        logger.debug("Empty dataframe provided to upsert_features, skipping")
        return
        
    cols = [
        "ticker","trade_date","open","high","low","close","volume",
        "ret_1d","ret_5d","ma5","ma10","rsi14","vol_ma5",
        "sse_ret","csi300_ret","industry_ret","label_next_open_up","label_next_close_up"
    ]
    tuples = [tuple(row.get(col, None) for col in cols) for _, row in df.iterrows()]
    if not tuples:
        return

    insert_sql = """
    INSERT INTO features_daily (ticker, trade_date, open, high, low, close, volume,
        ret_1d, ret_5d, ma5, ma10, rsi14, vol_ma5,
        sse_ret, csi300_ret, industry_ret, label_next_open_up, label_next_close_up)
    VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
    ON CONFLICT (ticker, trade_date) DO UPDATE SET
        open=EXCLUDED.open,
        high=EXCLUDED.high,
        low=EXCLUDED.low,
        close=EXCLUDED.close,
        volume=EXCLUDED.volume,
        ret_1d=EXCLUDED.ret_1d,
        ret_5d=EXCLUDED.ret_5d,
        ma5=EXCLUDED.ma5,
        ma10=EXCLUDED.ma10,
        rsi14=EXCLUDED.rsi14,
        vol_ma5=EXCLUDED.vol_ma5,
        sse_ret=EXCLUDED.sse_ret,
        csi300_ret=EXCLUDED.csi300_ret,
        industry_ret=EXCLUDED.industry_ret,
        label_next_open_up=EXCLUDED.label_next_open_up,
        label_next_close_up=EXCLUDED.label_next_close_up
    """
    with engine.begin() as conn:
        conn.execute(insert_sql, tuples)

# 主处理函数
def process_ticker(ticker: str, idx_returns: dict, industry_rets: pd.DataFrame):
    try:
        df = fetch_raw_for_ticker(ticker)
        if df.empty or len(df) < MIN_DATA_POINTS:
            logger.warning(f"Not enough data for ticker {ticker}, skipping")
            return pd.DataFrame()

        df = df.sort_values("trade_date").reset_index(drop=True)
        
        # 计算技术指标
        df["ret_1d"] = df["close"].pct_change()
        df["ret_5d"] = df["close"].pct_change(5)
        df["ma5"] = df["close"].rolling(5).mean()
        df["ma10"] = df["close"].rolling(10).mean()
        df["rsi14"] = compute_rsi(df["close"], 14)
        df["vol_ma5"] = df["volume"].rolling(5).mean()

        # 合并指数收益数据
        for code in INDEX_CODES:
            if code in idx_returns and not idx_returns[code].empty:
                df = df.merge(idx_returns[code], on="trade_date", how="left")
                df.rename(columns={f"{code}_ret": f"{code.lower()}_ret"}, inplace=True)

        # 合并行业收益数据
        with engine.connect() as conn:
            meta = pd.read_sql(
                text("SELECT industry FROM ticker_meta WHERE ticker = :t"),
                conn, params={"t": ticker}
            )
        if not meta.empty and meta.iloc[0]["industry"] is not None:
            industry = meta.iloc[0]["industry"]
            industry_data = industry_rets[industry_rets["industry"] == industry][["trade_date", "industry_ret"]]
            if not industry_data.empty:
                df = df.merge(industry_data, on="trade_date", how="left")
            else:
                df["industry_ret"] = np.nan
        else:
            df["industry_ret"] = np.nan

        # 生成标签
        df["next_open"] = df["open"].shift(-1)
        df["next_close"] = df["close"].shift(-1)
        df["label_next_open_up"] = (df["next_open"] > df["open"]).astype(int)
        df["label_next_close_up"] = (df["next_close"] > df["close"]).astype(int)

        # 删除包含NaN的行（由于shift操作产生）
        out = df.dropna(subset=["label_next_open_up", "label_next_close_up"])
        out["ticker"] = ticker

        # 确保所有需要的列都存在
        cols_out = [
            "ticker","trade_date","open","high","low","close","volume",
            "ret_1d","ret_5d","ma5","ma10","rsi14","vol_ma5",
            "sse_ret","csi300_ret","industry_ret","label_next_open_up","label_next_close_up"
        ]
        for c in cols_out:
            if c not in out.columns:
                out[c] = None
                
        return out[cols_out]
    except Exception as e:
        logger.error(f"Error processing ticker {ticker}: {str(e)}")
        return pd.DataFrame()

def main(args):
    try:
        logger.info("Preparing industry returns...")
        # 获取行业收益数据
        industry_rets = prepare_industry_returns()
        logger.info("Preparing index returns...")
        # 获取指数收益数据
        idx_returns = prepare_index_returns()

        # 获取所有股票代码
        if args.all:
            tickers = get_all_tickers()
            logger.info(f"Processing all {len(tickers)} tickers")
        elif args.tickers: # 指定股票列表
            tickers = args.tickers.split(",")
            logger.info(f"Processing {len(tickers)} specified tickers")
        else:
            logger.error("Specify --all or --tickers")
            return

        processed = 0
        errors = 0
        
        # 处理每个股票代码
        for t in tqdm(tickers, desc="Processing tickers"):
            try:
                outdf = process_ticker(t, idx_returns, industry_rets)
                if not outdf.empty:
                    for i in range(0, len(outdf), BATCH_SIZE):
                        upsert_features(outdf.iloc[i:i+BATCH_SIZE])
                processed += 1
            except Exception as e:
                logger.error(f"Error processing {t}: {e}")
                errors += 1

        logger.info(f"✅ Processed {processed} tickers, {errors} errors occurred")

    except Exception as e:
        logger.error(f"Fatal error in main: {str(e)}")
        raise

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Feature engineering for stock data")
    parser.add_argument("--all", action="store_true", help="process all tickers")
    parser.add_argument("--tickers", type=str, help="comma separated list of tickers")
    args = parser.parse_args()
    main(args)