import os
import time
from datetime import datetime, timedelta
from typing import List, Optional

import pandas as pd
import numpy as np


# 输出文件名约定
DAILY_CSV = 'stock_daily_data.csv'
STOCKS_CSV = 'stocks.csv'
TECH_CSV = 'stock_technical_indicators.csv'
TECH_CLEAN_CSV = 'stock_technical_indicators_clean.csv'
PRED_CSV = 'ai_predictions.csv'
DEFAULT_CODES_FILE = 'stocks_list.txt'

# stocks.csv 7列
STOCK_BASIC_FIELDS = ['ts_code','symbol','name','area','industry','list_date']


def load_codes(path: Optional[str]) -> Optional[List[str]]:
    if not path:
        path = DEFAULT_CODES_FILE
    if not os.path.exists(path):
        return None
    with open(path, 'r', encoding='utf-8') as f:
        codes = [line.strip() for line in f if line.strip()]
    # 兼容含逗号分隔行
    flat = []
    for c in codes:
        flat.extend([x.strip() for x in c.split(',') if x.strip()])
    return sorted(list(dict.fromkeys(flat))) or None


def daterange_days(days: int) -> List[str]:
    today = datetime.now().date()
    dates = []
    for i in range(days):
        d = today - timedelta(days=i)
        dates.append(d.strftime('%Y%m%d'))
    return list(reversed(dates))


def fetch_daily_by_date(days: int = 30, rate_seconds: float = 1.5) -> pd.DataFrame:
    token = os.environ.get('TUSHARE_TOKEN')
    if not token:
        raise RuntimeError('缺少环境变量 TUSHARE_TOKEN，用于调用 Tushare。')
    import tushare as ts
    pro = ts.pro_api(token)

    frames = []
    dates = daterange_days(days)
    want_cols = ['ts_code','trade_date','open','high','low','close','vol','amount']
    for idx, d in enumerate(dates, 1):
        try:
            df = pro.daily(trade_date=d)
        except Exception as e:
            print(f'[WARN] pro.daily({d}) 调用失败: {e}; 跳过')
            time.sleep(rate_seconds)
            continue
        if df is None or df.empty:
            # 非交易日或无数据
            time.sleep(rate_seconds)
            continue
        # 对齐列
        keep = [c for c in want_cols if c in df.columns]
        dfi = df[keep].copy()
        # 统一类型
        dfi['trade_date'] = dfi['trade_date'].astype(str)
        for col in ['open','high','low','close','vol','amount']:
            if col in dfi.columns:
                dfi[col] = pd.to_numeric(dfi[col], errors='coerce')
        frames.append(dfi)
        print(f'[INFO] 获取 {d} 日线: {len(dfi)} 行 ({idx}/{len(dates)})')
        time.sleep(rate_seconds)

    if not frames:
        return pd.DataFrame(columns=want_cols)
    all_df = pd.concat(frames, ignore_index=True)
    # 去重: 同一 ts_code+trade_date 保留最后
    all_df = all_df.sort_values(['ts_code','trade_date']).drop_duplicates(['ts_code','trade_date'], keep='last')
    return all_df.reset_index(drop=True)


def filter_daily_by_codes(daily_df: pd.DataFrame, codes: Optional[List[str]]) -> pd.DataFrame:
    if not isinstance(daily_df, pd.DataFrame) or daily_df.empty:
        return daily_df
    if not codes:
        return daily_df
    code_set = set(codes)
    out = daily_df[daily_df['ts_code'].isin(code_set)].copy()
    return out.reset_index(drop=True)


def write_daily_csv(df: pd.DataFrame) -> pd.DataFrame:
    if df is None or df.empty:
        df = pd.DataFrame(columns=['id','ts_code','trade_date','open','high','low','close','vol','amount'])
        df.to_csv(DAILY_CSV, index=False)
        return df
    # 增加 id 列
    payload = df.copy()
    payload = payload[['ts_code','trade_date','open','high','low','close','vol','amount']]
    payload = payload.sort_values(['ts_code','trade_date']).reset_index(drop=True)
    payload.insert(0, 'id', range(1, len(payload) + 1))
    payload.to_csv(DAILY_CSV, index=False)
    return payload


def compute_indicators(daily_df: pd.DataFrame):
    # 以最近 60 天为窗口
    cutoff = (datetime.now() - timedelta(days=60)).strftime('%Y%m%d')
    d = daily_df.copy()
    if d.empty:
        tech_cols = ['id','ts_code','trade_date','ma5','ma20','rsi','macd','boll_upper','boll_lower']
        pd.DataFrame(columns=tech_cols).to_csv(TECH_CSV, index=False)
        pd.DataFrame(columns=tech_cols).to_csv(TECH_CLEAN_CSV, index=False)
        return pd.DataFrame(columns=tech_cols), pd.DataFrame(columns=tech_cols)
    d['trade_date'] = d['trade_date'].astype(str)
    d = d[d['trade_date'] >= cutoff]
    rows = []
    for code, grp in d.groupby('ts_code'):
        g = grp.sort_values('trade_date')
        close_s = pd.to_numeric(g['close'], errors='coerce')
        ma5 = close_s.rolling(window=5, min_periods=5).mean()
        ma20 = close_s.rolling(window=20, min_periods=20).mean()
        delta = close_s.diff()
        gain = delta.clip(lower=0)
        loss = -delta.clip(upper=0)
        avg_gain = gain.ewm(alpha=1/14, min_periods=14, adjust=False).mean()
        avg_loss = loss.ewm(alpha=1/14, min_periods=14, adjust=False).mean()
        rs = avg_gain / (avg_loss.replace(0, np.nan))
        rsi = 100 - (100 / (1 + rs))
        ema12 = close_s.ewm(span=12, adjust=False).mean()
        ema26 = close_s.ewm(span=26, adjust=False).mean()
        macd = ema12 - ema26
        mid = close_s.rolling(window=20, min_periods=20).mean()
        std = close_s.rolling(window=20, min_periods=20).std()
        boll_upper = mid + 2 * std
        boll_lower = mid - 2 * std
        out = pd.DataFrame({
            'ts_code': g['ts_code'],
            'trade_date': g['trade_date'],
            'ma5': ma5.round(2),
            'ma20': ma20.round(2),
            'rsi': rsi.round(2),
            'macd': macd.round(4),
            'boll_upper': boll_upper.round(2),
            'boll_lower': boll_lower.round(2)
        })
        rows.append(out)
    tech = pd.concat(rows, ignore_index=True) if rows else pd.DataFrame()
    tech = tech.sort_values(['ts_code','trade_date']).reset_index(drop=True)
    tech.insert(0, 'id', range(1, len(tech) + 1))
    tech = tech[['id','ts_code','trade_date','ma5','ma20','rsi','macd','boll_upper','boll_lower']]
    tech.to_csv(TECH_CSV, index=False)
    clean = tech.dropna(subset=['ma5','ma20','rsi','macd','boll_upper','boll_lower'], how='any')
    clean.to_csv(TECH_CLEAN_CSV, index=False)
    return tech, clean


def build_stocks_from_daily(daily_df: pd.DataFrame):
    # 从 daily 中提取 ts_code 列表，优先用 Tushare 获取 7 列，失败再回退 AkShare 补 name
    ts_codes = sorted(daily_df['ts_code'].dropna().unique().tolist()) if not daily_df.empty else []
    # 先尝试 Tushare 一次批量拉取
    df_ts = None
    token = os.environ.get('TUSHARE_TOKEN')
    if token and ts_codes:
        try:
            import tushare as ts
            pro = ts.pro_api(token)
            # 拉取全部上市状态股票基本信息
            df_all = pro.stock_basic(exchange='', list_status='L', fields='ts_code,symbol,name,area,industry,list_date')
            if df_all is not None and not df_all.empty:
                df_ts = df_all[df_all['ts_code'].isin(set(ts_codes))].copy()
        except Exception as e:
            print(f'[WARN] Tushare stock_basic 获取失败，将回退 AkShare: {e}')
            df_ts = None

    if df_ts is not None and not df_ts.empty:
        df_ts = df_ts[STOCK_BASIC_FIELDS].copy()
        df_ts.insert(0, 'id', range(1, len(df_ts) + 1))
        df_ts.to_csv(STOCKS_CSV, index=False)
        return df_ts

    # 回退：AkShare 映射 name，其余为空
    try:
        import akshare as ak
    except Exception:
        ak = None
    name_map = {}
    if ak is not None and ts_codes:
        for _ in range(3):
            try:
                raw = ak.stock_info_a_code_name()
                cols = list(raw.columns)
                code_candidates = ['symbol','股票代码','代码','code']
                name_candidates = ['name','股票简称','简称','security_name']
                sym_col = next((c for c in code_candidates if c in cols), None)
                name_col = next((c for c in name_candidates if c in cols), None)
                if sym_col and name_col:
                    raw[sym_col] = raw[sym_col].astype(str).str.strip()
                    raw[name_col] = raw[name_col].astype(str).str.strip()
                    name_map = dict(zip(raw[sym_col], raw[name_col]))
                    break
            except Exception:
                time.sleep(2)
    rows = []
    for c in ts_codes:
        sym = c.split('.')[0]
        nm = name_map.get(sym)
        rows.append({'ts_code': c, 'symbol': sym, 'name': nm if nm else None, 'area': None, 'industry': None, 'list_date': None})
    df = pd.DataFrame(rows, columns=STOCK_BASIC_FIELDS) if rows else pd.DataFrame(columns=STOCK_BASIC_FIELDS)
    df.insert(0, 'id', range(1, len(df) + 1))
    df.to_csv(STOCKS_CSV, index=False)
    return df


def build_predictions_from_indicators(tech_df: pd.DataFrame):
    predict_date = datetime.now().strftime('%Y%m%d')
    for_date = (datetime.now() + timedelta(days=1)).strftime('%Y%m%d')
    cols_needed = {'ts_code','trade_date','ma5','ma20','rsi'}
    if tech_df is None or tech_df.empty or not cols_needed.issubset(set(tech_df.columns)):
        out = pd.DataFrame(columns=['id','ts_code','predict_date','for_date','prediction_score'])
        out.to_csv(PRED_CSV, index=False)
        return out
    # 不丢弃缺失指标：对 NaN 使用稳健默认值，确保对每只股票产生一条预测
    tech = tech_df.copy()
    tech['trade_date'] = tech['trade_date'].astype(str)
    latest = tech.sort_values(['ts_code','trade_date']).groupby('ts_code', as_index=False).tail(1)
    ma5_v = pd.to_numeric(latest['ma5'], errors='coerce')
    ma20_v = pd.to_numeric(latest['ma20'], errors='coerce')
    # 当 ma20 缺失或为0 时，ma_diff 取 0（中性）
    with np.errstate(divide='ignore', invalid='ignore'):
        ma_diff = (ma5_v - ma20_v) / ma20_v
    ma_diff = ma_diff.replace([np.inf, -np.inf], np.nan).fillna(0.0)
    score_ma = 0.5 * (np.tanh(ma_diff * 5.0) + 1.0)
    rsi = pd.to_numeric(latest['rsi'], errors='coerce').fillna(50.0)
    score_rsi = np.clip(0.5 + (rsi - 50.0) / 100.0, 0.0, 1.0)
    score = np.round(0.5 * score_ma + 0.5 * score_rsi, 4)
    preds = pd.DataFrame({
        'ts_code': latest['ts_code'].astype(str),
        'predict_date': predict_date,
        'for_date': for_date,
        'prediction_score': score
    })
    preds.insert(0, 'id', range(1, len(preds) + 1))
    preds = preds[['id','ts_code','predict_date','for_date','prediction_score']]
    preds.to_csv(PRED_CSV, index=False)
    return preds


def main():
    import argparse
    parser = argparse.ArgumentParser(description='按日批量抓取→过滤→生成五张表（不改动现有代码）')
    parser.add_argument('--days', type=int, default=30, help='回溯的自然日天数，默认30')
    parser.add_argument('--codes', type=str, default=DEFAULT_CODES_FILE, help='代码清单文件路径，默认 stocks_list.txt；若不存在则不过滤')
    parser.add_argument('--rate', type=float, default=1.5, help='Tushare 调用间隔秒，默认1.5s')
    args = parser.parse_args()

    codes = load_codes(args.codes)
    print(f'[INFO] 代码清单: {len(codes) if codes else "未指定/未找到，使用全市场"}')

    print(f'[INFO] 开始按日抓取最近 {args.days} 天全市场日线，间隔 {args.rate}s')
    all_daily = fetch_daily_by_date(days=args.days, rate_seconds=args.rate)
    daily = filter_daily_by_codes(all_daily, codes)
    daily_out = write_daily_csv(daily)
    print(f'[INFO] 写入 {DAILY_CSV}: {len(daily_out)} 行')

    tech, clean = compute_indicators(daily_out)
    print(f'[INFO] 写入 {TECH_CSV}: {len(tech)} 行; {TECH_CLEAN_CSV}: {len(clean)} 行')

    stocks = build_stocks_from_daily(daily_out)
    print(f'[INFO] 写入 {STOCKS_CSV}: {len(stocks)} 行')

    preds = build_predictions_from_indicators(tech)
    print(f'[INFO] 写入 {PRED_CSV}: {len(preds)} 行')

    print('[DONE] 五张表已生成/覆盖:', STOCKS_CSV, DAILY_CSV, TECH_CSV, TECH_CLEAN_CSV, PRED_CSV)


if __name__ == '__main__':
    main()
