import akshare as ak
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime, timedelta


def fetch_etf_data(symbol, start_date, end_date):
    """
    Fetch ETF historical data from akshare
    """
    try:
        # For ETF funds, we use the fund_etf_hist_em interface
        df = ak.fund_etf_hist_em(symbol=symbol, period="daily",
                                 start_date=start_date, end_date=end_date)
        # Rename columns to standard names
        df.columns = [col.lower() for col in df.columns]
        if 'date' not in df.columns and '日期' in df.columns:
            df = df.rename(columns={'日期': 'date', '开盘': 'open', '收盘': 'close',
                                    '最高': 'high', '最低': 'low', '成交量': 'volume',
                                    '成交额': 'amount', '振幅': 'amplitude',
                                    '涨跌幅': 'change_pct', '涨跌额': 'change_amount'})

        # Convert date to datetime
        df['date'] = pd.to_datetime(df['date'])
        # Sort by date ascending
        df = df.sort_values('date')

        return df
    except Exception as e:
        print(f"Error fetching data: {e}")
        return None


def calculate_bias(df, periods=[34, 50, 70, 200]):
    """
    Calculate BIAS for multiple periods
    BIAS = (Close - MA) / MA * 100%
    """
    for period in periods:
        col_name = f'ma_{period}'
        bias_name = f'bias_{period}'
        df[col_name] = df['close'].rolling(window=period).mean()
        df[bias_name] = (df['close'] - df[col_name]) / df[col_name] * 100
    return df


def calculate_williams_r(df, periods=[34, 50, 70]):
    """
    Calculate Williams %R for multiple periods
    WR = (Highest High - Close) / (Highest High - Lowest Low) * -100
    """
    for period in periods:
        highest_high = df['high'].rolling(window=period).max()
        lowest_low = df['low'].rolling(window=period).min()
        df[f'wr_{period}'] = (highest_high - df['close']) / (highest_high - lowest_low) * (-100)
    return df


def calculate_z_score(df, window=10, ema_period=5):  # Changed from ema_period=3 to ema_period=5
    """
    Calculate Z-Score momentum indicator
    Z-Score = (Close - Lowest Low) / (Highest High - Lowest Low) * 100
    Smoothed with EMA
    """
    highest_high = df['high'].rolling(window=window).max()
    lowest_low = df['low'].rolling(window=window).min()
    df['z_score'] = (df['close'] - lowest_low) / (highest_high - lowest_low) * 100
    # Apply EMA smoothing
    df['z_score_smoothed'] = df['z_score'].ewm(span=ema_period).mean()
    return df


def calculate_volume_divergence(df):
    """
    Calculate volume divergence metrics
    - Volume comparison to previous lows
    - 5-day average turnover rate
    - Added detection of price new lows with declining volume (divergence)
    """
    # Calculate price changes
    df['price_change'] = df['close'].diff()

    # Mark local lows (where price change switches from negative to positive)
    df['is_local_low'] = (df['price_change'].shift(1) < 0) & (df['price_change'] > 0)

    # Calculate 5-day average turnover (using volume as proxy since turnover rate not directly available)
    df['vol_5d_avg'] = df['volume'].rolling(window=5).mean()

    # Check if volume is below 50% of 5-day average
    df['vol_below_50pct_avg'] = df['volume'] < (df['vol_5d_avg'] * 0.5)

    # Calculate relative volume (current volume compared to the volume at previous low)
    df['relative_volume'] = np.nan

    # Find previous low points and compare volumes
    low_points = df[df['is_local_low']].index.tolist()
    for i in range(1, len(low_points)):
        current_idx = low_points[i]
        prev_idx = low_points[i - 1]

        if current_idx > 0 and prev_idx > 0:
            relative_vol = df.loc[current_idx, 'volume'] / df.loc[prev_idx, 'volume'] * 100
            df.loc[current_idx, 'relative_volume'] = relative_vol

    # For analysis we'll consider volume divergence when price makes new low but volume is 30% lower
    df['vol_price_divergence'] = ((df['close'] < df['close'].shift(1)) &
                                  (df['volume'] < df['volume'].shift(1) * 0.7))

    # New: Add price new low detection
    df['price_new_low'] = df['close'] < df['close'].rolling(10).min()

    # New: Add volume decline detection (3-day vs 10-day)
    df['vol_decline'] = df['volume'].rolling(3).mean() < df['volume'].rolling(10).mean()

    # New: Mark volume-price divergence (price new low but volume declining)
    df['vol_price_divergence_enhanced'] = df['price_new_low'] & df['vol_decline']

    # New: Check if volume is near 20-day minimum (120% threshold)
    df['vol_near_min'] = df['volume'] < (df['volume'].rolling(20).min() * 1.2)

    return df


def calculate_bollinger_bands(df, period=20, num_std=2):
    """
    Calculate Bollinger Bands
    - Middle Band = 20-day simple moving average (SMA)
    - Upper Band = Middle Band + (20-day standard deviation of price x 2)
    - Lower Band = Middle Band - (20-day standard deviation of price x 2)
    - BB Width = (Upper Band - Lower Band) / Middle Band
    - %B = (Close - Lower Band) / (Upper Band - Lower Band)
    """
    # Calculate middle band (SMA)
    df[f'bb_middle'] = df['close'].rolling(window=period).mean()

    # Calculate standard deviation
    df[f'bb_std'] = df['close'].rolling(window=period).std()

    # Calculate upper and lower bands
    df[f'bb_upper'] = df['bb_middle'] + (df['bb_std'] * num_std)
    df[f'bb_lower'] = df['bb_middle'] - (df['bb_std'] * num_std)

    # Calculate Bollinger Band width
    df[f'bb_width'] = (df['bb_upper'] - df['bb_lower']) / df['bb_middle']

    # Calculate %B (shows where price is in relation to the bands)
    df['bb_percent_b'] = (df['close'] - df['bb_lower']) / (df['bb_upper'] - df['bb_lower'])

    return df


def calculate_moving_averages(df, periods=[5, 20, 34, 50, 70, 200]):
    """
    Calculate moving averages for multiple periods
    Added 5 and 20 day MAs for trend filtering
    """
    for period in periods:
        df[f'ma_{period}'] = df['close'].rolling(window=period).mean()
    return df


def identify_oversold_signals(df, etf_type='industry', etf_subtype=None):
    """
    Identify oversold signals based on all indicators with enhanced approach:
    - Relaxed thresholds
    - Two-phase validation
    - Scoring system
    - Subtype-specific adjustments
    - Trend filtering

    Parameters:
    -----------
    df : pandas DataFrame
        DataFrame with price and calculated indicators
    etf_type : str
        Either 'broad' for broad-based indices or 'industry' for industry indices
    etf_subtype : str or None
        Optional subtype for more granular parameter adjustments
    """
    # Base thresholds - relaxed compared to original values
    if etf_type == 'broad':
        # Broad-based indices thresholds
        bias_34_threshold = -10  # Relaxed from -12
        bias_50_threshold = -13  # Relaxed from -15
        wr_threshold = -80  # Kept the same
        bb_percent_b_threshold = 0.20  # Relaxed from 0.15
        volume_threshold = 0.50  # Kept the same
        bias_200_threshold = -13  # Relaxed from -15
    else:  # industry ETF thresholds
        # Industry indices thresholds
        bias_34_threshold = -13  # Relaxed from -15
        bias_50_threshold = -16  # Relaxed from -18
        wr_threshold = -83  # Relaxed from -80
        bb_percent_b_threshold = 0.15  # Relaxed from 0.10
        volume_threshold = 0.45  # Relaxed from 0.50
        bias_200_threshold = -16  # Relaxed from -18

    # Subtype-specific adjustments
    if etf_subtype == "consumer":
        bias_34_threshold += -2  # Make consumer sector more sensitive
        bias_50_threshold += -2
    elif etf_subtype == "new_energy":
        wr_threshold += -5  # Make Williams %R more sensitive for new energy
    elif etf_subtype == "tech":
        volume_threshold = 0.55  # Higher volume threshold for tech stocks

    # 1. Technical indicators oversold conditions
    # BIAS indicator
    df['bias_34_oversold'] = df['bias_34'] < bias_34_threshold
    df['bias_50_oversold'] = df['bias_50'] < bias_50_threshold
    df['bias_oversold'] = df['bias_34_oversold'] | df['bias_50_oversold']

    # Williams %R oversold
    df['wr_oversold'] = df['wr_34'] < wr_threshold

    # Z-Score oversold
    df['z_score_oversold'] = df['z_score_smoothed'] < 20

    # Bollinger Band oversold signal
    df['bb_oversold'] = df['bb_percent_b'] < bb_percent_b_threshold

    # Volume conditions - enhanced with multiple dimensions
    df['volume_ratio'] = df['volume'] / df['volume'].shift(1)
    df['vol_ratio_high'] = df['volume_ratio'] > 1.5

    # Enhanced volume conditions (satisfy any of these)
    df['vol_below_threshold'] = df['volume'] < (df['vol_5d_avg'] * volume_threshold)
    df['vol_condition'] = (df['vol_below_threshold'] | df['vol_near_min'])

    # Price deviation from long-term moving average (200-day) exceeds threshold%
    df['price_ma200_deviation'] = df['bias_200'] < bias_200_threshold

    # Two-phase validation approach
    # Phase 1: Technical indicators + Volume condition
    df['phase1_signal'] = (df['bias_oversold'] | df['wr_oversold'] |
                           df['z_score_oversold'] | df['bb_oversold']) & df['vol_condition']

    # Phase 2: Check if price deviation happens within 3 days
    df['rolling_price_deviation'] = df['bias_200'].rolling(3).min() < bias_200_threshold

    # Flexible scoring system (0.0 to 1.0)
    df['signal_score'] = (
            (df['bias_34'] < bias_34_threshold) * 0.3 +  # BIAS 30% weight
            (df['wr_34'] < wr_threshold) * 0.3 +  # WR 30% weight
            df['vol_condition'] * 0.2 +  # Volume 20% weight
            (df['bb_percent_b'] < bb_percent_b_threshold) * 0.2  # BB %B 20% weight
    )

    # Trend filters
    # 5/20 MA crossover (golden cross)
    df['ma5_ma20_cross'] = df['ma_5'] > df['ma_20']
    df['trend_filter'] = df['ma5_ma20_cross'].rolling(3).max() > 0

    # Low volatility detection (compressed Bollinger Bands)
    df['low_volatility'] = df['bb_width'] < 0.1

    # Combined signals
    # Main signal: Phase 1 + Rolling price deviation within 3 days
    df['triple_resonance_oversold'] = df['phase1_signal'] & df['rolling_price_deviation']

    # Alternative signal 1: High score with low volatility
    df['score_signal'] = (df['signal_score'] >= 0.7)

    # Alternative signal 2: Volume-price divergence with any technical oversold
    df['divergence_signal'] = df['vol_price_divergence_enhanced'] & (
            df['bias_oversold'] | df['wr_oversold'] | df['z_score_oversold']
    )

    # Final composite signal (any of the three approaches)
    df['final_oversold_signal'] = (
            df['triple_resonance_oversold'] |
            df['score_signal'] |
            (df['divergence_signal'] & df['low_volatility'])
    )

    return df


def calculate_macd(df):
    """
    Calculate MACD and Signal Line
    """
    short_window = 12
    long_window = 26
    signal_window = 9

    df['ema_short'] = df['close'].ewm(span=short_window, adjust=False).mean()
    df['ema_long'] = df['close'].ewm(span=long_window, adjust=False).mean()
    df['macd'] = df['ema_short'] - df['ema_long']
    df['signal'] = df['macd'].ewm(span=signal_window, adjust=False).mean()
    df['macd_ma5'] = df['macd'].rolling(window=5).mean()

    return df


def plot_macd(df):
    """
    Plot MACD with the stickline condition
    """
    plt.figure(figsize=(10, 6))
    plt.plot(df['date'], df['macd'], label='MACD', color='blue')
    plt.plot(df['date'], df['macd_ma5'], label='MA(MACD, 5)', color='orange')

    # Highlight the stickline condition
    for i in range(len(df)):
        if df['macd'].iloc[i] > df['macd_ma5'].iloc[i]:
            plt.plot(df['date'].iloc[i], df['macd'].iloc[i], 'ro')

    plt.title('MACD Daily Line')
    plt.xlabel('Date')
    plt.ylabel('Value')
    plt.legend()
    plt.grid()
    plt.show()


def main():
    # List of ETF symbols to analyze with enhanced subtypes
    # Classify ETFs as broad-based indices or industry indices with subtypes
    etf_symbols = {
        # Broad-based indices
        "159845": {"type": "broad"},  # 中证1000ETF
        "159949": {"type": "broad"},  # 创业板50ETF
        "159915": {"type": "broad"},  # 创业板50ETF
        "561560": {"type": "broad", "subtype": "tech"},  # 科创50ETF

        # Industry indices
        "512170": {"type": "industry"},  # 医疗ETF
        "512880": {"type": "industry"},  # 电力ETF
        "588000": {"type": "industry"},  # 军工ETF
        "512660": {"type": "industry", "subtype": "new_energy"},  # 新能源车ETF
        "159766": {"type": "industry", "subtype": "new_energy"},  # 光伏ETF
        "515790": {"type": "industry", "subtype": "tech"},  # 芯片ETF
        "159855": {"type": "industry"},  # 影视ETF
        "159865": {"type": "industry"},  # 养殖ETF
        "159742": {"type": "industry", "subtype": "tech"},  # 恒生科技指数ETF
        "159707": {"type": "industry"},  # 地产ETF
        "515000": {"type": "industry", "subtype": "tech"},  # 科技ETF
        "159937": {"type": "industry"},  # 黄金ETF基金
        "515210": {"type": "industry"},  # 钢铁ETF
        "515050": {"type": "industry", "subtype": "tech"},  # 5G通信ETF
        "159928": {"type": "industry", "subtype": "consumer"},  # 消费ETF
        "513050": {"type": "industry", "subtype": "tech"},  # 中概互联网ETF
    }

    # Dictionary to store signals for all ETFs
    all_signals = {}

    # Define the time period - extend to 2 years
    end_date = datetime.now().strftime("%Y%m%d")
    start_date = (datetime.now() - timedelta(days=730)).strftime("%Y%m%d")  # Last 24 months (2 years)

    print(f"Analyzing {len(etf_symbols)} ETFs from {start_date} to {end_date}")

    for symbol, etf_info in etf_symbols.items():
        try:
            etf_type = etf_info["type"]
            etf_subtype = etf_info.get("subtype", None)

            print(f"\n{'=' * 50}")
            print(f"Processing ETF: {symbol} (Type: {etf_type}, Subtype: {etf_subtype})")

            # Fetch data
            df = fetch_etf_data(symbol, start_date, end_date)

            if df is None or df.empty:
                print(f"Failed to fetch data or empty dataset returned for {symbol}, skipping...")
                continue

            print(f"Successfully fetched {len(df)} records for {symbol}")

            # Calculate all indicators
            df = calculate_bias(df)
            df = calculate_moving_averages(df)  # Add the new function
            df = calculate_williams_r(df)
            df = calculate_z_score(df)
            df = calculate_bollinger_bands(df)
            df = calculate_volume_divergence(df)
            df = calculate_macd(df)

            # Identify oversold signals with appropriate thresholds based on ETF type and subtype
            df = identify_oversold_signals(df, etf_type, etf_subtype)

            # Display results for oversold signals
            oversold_dates = df[df['final_oversold_signal']].copy()
            if not oversold_dates.empty:
                print(f"\n===== Oversold Signals Detected for {symbol} ({etf_type}) =====")
                for idx, row in oversold_dates.iterrows():
                    print(f"Date: {row['date'].strftime('%Y-%m-%d')}")
                    print(f"  Close: {row['close']}")

                    # Fix the formatting issues and handle NaN values
                    bias_34 = "nan" if np.isnan(row['bias_34']) else f"{row['bias_34']:.2f}%"
                    bias_50 = "nan" if np.isnan(row['bias_50']) else f"{row['bias_50']:.2f}%"
                    bias_200 = "nan" if np.isnan(row['bias_200']) else f"{row['bias_200']:.2f}%"
                    print(f"  BIAS(34): {bias_34}, BIAS(50): {bias_50}, BIAS(200): {bias_200}")

                    # Fix the formatting issues and handle NaN values for Williams %R
                    wr_34 = "nan" if np.isnan(row['wr_34']) else f"{row['wr_34']:.2f}"
                    print(f"  WR(34): {wr_34}")

                    # Handle NaN for Z-Score
                    z_score = "nan" if np.isnan(row['z_score_smoothed']) else f"{row['z_score_smoothed']:.2f}"
                    print(f"  Z-Score: {z_score}")

                    # Handle NaN for BB %B value
                    bb_percent_b = "nan" if np.isnan(row['bb_percent_b']) else f"{row['bb_percent_b']:.2f}"
                    print(f"  Bollinger Band %B: {bb_percent_b}")

                    # Handle volume metrics
                    vol_5d_ratio = "nan" if np.isnan(
                        row['volume'] / row['vol_5d_avg']) else f"{(row['volume'] / row['vol_5d_avg']):.2f}"
                    print(f"  Volume/5d Avg: {vol_5d_ratio}")

                    # Show signal scores and validation
                    score = "nan" if np.isnan(row['signal_score']) else f"{row['signal_score']:.2f}"
                    print(f"  Signal Score: {score}")

                    # Show three-resonance status and which signal triggered
                    triple_res = "YES" if row['triple_resonance_oversold'] else "NO"
                    score_sig = "YES" if row['score_signal'] else "NO"
                    div_sig = "YES" if row.get('divergence_signal', False) else "NO"

                    print(f"  Triple Resonance: {triple_res}, Score Signal: {score_sig}, Divergence: {div_sig}")
                    print("  --------------------")

                # Store oversold signals for this ETF
                all_signals[symbol] = oversold_dates
            else:
                print(f"\nNo oversold signals detected for {symbol} in the given time period.")

            # Save the results to CSV for further analysis
            output_path = f"../../data/output/oversold_analysis_{symbol}.csv"
            df.to_csv(output_path, index=False)
            print(f"Full analysis saved to {output_path}")

            # Create a simple visualization of the closing prices and oversold signals
            plt.figure(figsize=(12, 18))  # Adjusted figure size for 6 subplots

            # Plot closing prices with Bollinger Bands and 200-day MA
            plt.subplot(6, 1, 1)  # Changed from 5,1,1 to 6,1,1
            plt.plot(df['date'], df['close'], 'b-')
            plt.plot(df['date'], df['bb_upper'], 'r--', alpha=0.7)
            plt.plot(df['date'], df['bb_middle'], 'g--', alpha=0.7)
            plt.plot(df['date'], df['bb_lower'], 'r--', alpha=0.7)
            plt.plot(df['date'], df['ma_200'], 'c-', alpha=0.7)
            plt.scatter(df.loc[df['final_oversold_signal'], 'date'],
                        df.loc[df['final_oversold_signal'], 'close'],
                        color='r', marker='^', s=100)
            plt.title(f"{symbol} - Closing Price with Bollinger Bands, 200MA & Oversold Signals")
            plt.ylabel("Price")
            plt.legend(['Close', 'Upper BB', 'Middle BB', 'Lower BB', '200MA', 'Oversold Signal'], loc='upper left')

            # Plot BIAS indicators
            plt.subplot(6, 1, 2)  # Changed from 5,1,2 to 6,1,2
            plt.plot(df['date'], df['bias_34'], 'g-', label='BIAS-34')
            plt.plot(df['date'], df['bias_50'], 'r-', label='BIAS-50')
            plt.plot(df['date'], df['bias_70'], 'b-', label='BIAS-70')
            plt.axhline(y=-18, color='g', linestyle='--')
            plt.axhline(y=-20, color='r', linestyle='--')
            plt.axhline(y=-22, color='b', linestyle='--')
            plt.title("BIAS Indicators")
            plt.ylabel("BIAS %")
            plt.legend()

            # Plot Williams %R for all three periods
            plt.subplot(6, 1, 3)  # Changed from 5,1,3 to 6,1,3
            plt.plot(df['date'], df['wr_34'], 'g-', label='WR-34')
            plt.plot(df['date'], df['wr_50'], 'r-', label='WR-50')
            plt.plot(df['date'], df['wr_70'], 'b-', label='WR-70')
            plt.axhline(y=-90, color='k', linestyle='--')
            plt.title("Williams %R Indicators")
            plt.ylabel("WR Value")
            plt.legend()

            # Plot Z-Score in a separate subplot
            plt.subplot(6, 1, 4)  # Changed from 5,1,4 to 6,1,4
            plt.plot(df['date'], df['z_score_smoothed'], 'g-', label='Z-Score')
            plt.axhline(y=20, color='r', linestyle='--')
            plt.title("Z-Score Indicator")
            plt.ylabel("Value")
            plt.legend()

            # Plot Bollinger Band %B
            plt.subplot(6, 1, 5)  # Changed from 5,1,5 to 6,1,5
            plt.plot(df['date'], df['bb_percent_b'], 'b-', label='%B')
            plt.axhline(y=0, color='r', linestyle='--')
            plt.axhline(y=1, color='r', linestyle='--')
            plt.axhline(y=0.05, color='g', linestyle='--')
            plt.title("Bollinger Band %B")
            plt.ylabel("Value")
            plt.legend()

            # Add volume and 5-day volume average plot
            plt.subplot(6, 1, 6)  # This is already correct (6,1,6)
            plt.bar(df['date'], df['volume'], color='b', alpha=0.5)
            plt.plot(df['date'], df['vol_5d_avg'], 'r-', label='5d Avg Vol')
            plt.plot(df['date'], df['vol_5d_avg'] * 0.5, 'g--', label='50% of 5d Avg')
            plt.title("Volume with 5-day Average")
            plt.ylabel("Volume")
            plt.legend()

            plt.tight_layout()
            plt.savefig(f"../../data/output/oversold_analysis_{symbol}.png")
            print(f"Analysis chart saved as oversold_analysis_{symbol}.png")

            # Close the figure to free memory
            plt.close()

        except Exception as e:
            print(f"Error processing {symbol}: {e}")
            continue

    # Summarize all signals
    if all_signals:
        # Create a summary dataframe
        summary_rows = []
        for symbol, signals_df in all_signals.items():
            etf_type = etf_symbols.get(symbol, 'industry')  # Default to industry if not found
            for _, row in signals_df.iterrows():
                summary_rows.append({
                    'symbol': symbol,
                    'type': etf_type,
                    'date': row['date'],
                    'close': row['close'],
                    'bias_34': row['bias_34'],
                    'bias_50': row['bias_50'],
                    'bias_200': row['bias_200'],
                    'wr_34': row['wr_34'],
                    'z_score': row['z_score_smoothed'],
                    'bb_percent_b': row['bb_percent_b'],
                    'vol_5d_ratio': row['volume'] / row['vol_5d_avg'],
                    'triple_resonance': row['triple_resonance_oversold']
                })

        if summary_rows:
            summary_df = pd.DataFrame(summary_rows)
            summary_df = summary_df.sort_values(['date', 'symbol'], ascending=[False, True])
            summary_path = "../../data/output/all_etf_oversold_signals_summary.csv"
            summary_df.to_csv(summary_path, index=False)
            print(f"\nSummary of all ETF oversold signals saved to {summary_path}")

            # Print the most recent signals (last 7 days)
            recent_date = datetime.now() - timedelta(days=7)
            recent_signals = summary_df[summary_df['date'] >= pd.Timestamp(recent_date)]

            if not recent_signals.empty:
                print("\n===== Recent Oversold Signals (Last 7 Days) =====")
                for _, row in recent_signals.iterrows():
                    triple_res = "✓" if row['triple_resonance'] else " "
                    print(
                        f"ETF: {row['symbol']} (Type: {row['type']}) - Date: {row['date'].strftime('%Y-%m-%d')} - Close: {row['close']} - Triple Resonance: {triple_res}")
            else:
                print("\nNo ETF signals detected in the last 7 days.")
    else:
        print("\nNo oversold signals detected across any ETF in the given time period.")

    symbol = "159915"  # Example ETF symbol
    start_date = "20220101"
    end_date = "20221231"

    df = fetch_etf_data(symbol, start_date, end_date)
    if df is not None and not df.empty:
        df = calculate_macd(df)
        plot_macd(df)

    return all_signals


if __name__ == "__main__":
    main()
