from flask import Flask, jsonify, request
from flask_cors import CORS
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime, timedelta
import pandas as pd
from sqlalchemy import text
import math # Import math module for power calculation
import numpy as np # Import numpy for standard deviation calculation
import pandas_ta as ta # Import pandas_ta

app = Flask(__name__)
CORS(app)

# 数据库配置
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+mysqlconnector://root:a22wsybk@localhost/jinr'  # 默认库（基金）
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_BINDS'] = {
    'zhishu': 'mysql+mysqlconnector://root:a22wsybk@localhost/zhishu'  # 指数库
}
db = SQLAlchemy(app)

# 数据模型
class Consumption(db.Model):
    __tablename__ = 'consumption'
    id = db.Column(db.Integer, primary_key=True)
    ts_code = db.Column(db.String(20), nullable=False)
    trade_date = db.Column(db.Date, nullable=False)
    open = db.Column(db.Double, nullable=True)
    high = db.Column(db.Double, nullable=True)
    low = db.Column(db.Double, nullable=True)
    close = db.Column(db.Double, nullable=True)
    pre_close = db.Column(db.Double, nullable=True)
    price_change = db.Column(db.Double, nullable=True)
    pct_chg = db.Column(db.Double, nullable=True)
    vol = db.Column(db.Double, nullable=True)
    amount = db.Column(db.Double, nullable=True)
    category = db.Column(db.String(50), nullable=True)
    fund_name = db.Column(db.String(100), nullable=True)

# 指数库的 ts_code 到表名映射，增加 description 字段
index_code_board_map = {
    '399300.SZ': {'board': 'hushen300', 'description': '沪深300指数由沪深A股中规模大、流动性好的最具代表性的300只股票组成，综合反映中国A股市场上市股票价格的整体表现。'},
    '000001.SH': {'board': 'shanghai_stock_exchange_index', 'description': '上证指数全称上海证券交易所综合股价指数，是上海证券交易所主要的股指。'},
    '399001.SZ': {'board': 'shenzhen_component_index', 'description': '深证成指全称深圳证券交易所成分股指数，是由深圳证券市场中选取具有代表性的500家上市公司组成，用来反映深圳股市的运行状况。'},
    '000016.SH': {'board': 'sse50', 'description': '上证50指数是根据科学客观的方法，挑选上海证券市场规模大、流动性好的最具代表性的50只股票组成，综合反映上海证券市场最具市场影响力的一批龙头企业的整体状况。'},
    '000852.SH': {'board': 'zhongzheng1000', 'description': '中证1000指数由全部A股中剔除沪深300指数成份股及中证500指数成份股后的规模偏小且流动性好的1000只股票组成，综合反映中国A股市场小盘股的整体状况。'},
    '399905.SZ': {'board': 'zhongzheng500', 'description': '中证500指数由全部A股中剔除沪深300指数成份股，且总市值、成交金额排名在全部A股市场中居于前500名的中小市值股票组成，综合反映中国A股市场中小市值股票的整体表现。'},
    '000922.SH': {'board': 'zhongzhenga50', 'description': '中证A50指数旨在度量中国A股市场最具代表性的50只核心资产表现。'}
}

# Helper function to calculate technical indicators
def calculate_indicators(df, algorithm_rules):
    """
    Calculates technical indicators specified in the algorithm rules or needed for presets
    and adds them to the DataFrame.
    Args:
        df (pd.DataFrame): DataFrame with historical K-line data (must include 'open', 'high', 'low', 'close', 'volume', indexed by 'trade_date').
        algorithm_rules (list): List of algorithm rules from the frontend (used to determine needed indicators and parameters).

    Returns:
        pd.DataFrame: DataFrame with calculated indicators added.
    """
    df_calc = df.copy() # 处理副本以避免修改原始DataFrame

    # 确保pandas_ta的列名符合预期
    original_columns = df_calc.columns.tolist() # 存储原始列
    df_calc.columns = [col.lower() for col in df_calc.columns]
    # 确保pandas_ta的卷列命名为“volume”
    volume_renamed = False
    if 'vol' in df_calc.columns and 'volume' not in df_calc.columns:
        df_calc.rename(columns={'vol': 'volume'}, inplace=True)
        volume_renamed = True

    # 存储唯一指标配置的字典
    indicators_to_calculate = {}

    # 从自定义规则中提取指标和参数
    for rule in algorithm_rules:
        for condition_group in rule.get('condition_groups', []):
            for condition in condition_group:
                indicator_key = condition.get('indicator')
                parameters = condition.get('parameters', {})

                if indicator_key:
                    # Use a tuple of (indicator_key, frozenset(parameters.items())) as key to store unique configs
                    config_key = (indicator_key, frozenset(parameters.items()))
                    indicators_to_calculate[config_key] = { 'indicator': indicator_key, 'parameters': parameters }

    print("Indicators to calculate:", indicators_to_calculate) # Debug print
    print("DataFrame columns BEFORE calculating indicators:") # Debug print
    print(df_calc.columns.tolist()) # Debug print

    # Calculate each unique indicator
    for config in indicators_to_calculate.values():
        indicator_key = config['indicator']
        parameters = config['parameters']

        # *** MODIFIED PANDAS_TA CALL ***
        # 使用DataFrame的.ta访问器调用指标函数
        try:
            ta_method = getattr(df_calc.ta, indicator_key.lower(), None)

            if ta_method:
                 print(f"Calling pandas_ta method: {indicator_key.lower()} with parameters {parameters}") # Debug print
                 # 调用方法，显式要求将结果附加到DataFrame中。
                 # 将所需列作为关键字参数传递以提高清晰度和鲁棒性。
                 method_args = {}
                 if 'open' in df_calc.columns: method_args['open'] = df_calc['open']
                 if 'high' in df_calc.columns: method_args['high'] = df_calc['high']
                 if 'low' in df_calc.columns: method_args['low'] = df_calc['low']
                 if 'close' in df_calc.columns: method_args['close'] = df_calc['close']
                 if 'volume' in df_calc.columns: method_args['volume'] = df_calc['volume']

                 # 调用方法，解包参数和标准列，并附加到df_calc
                 ta_method(**parameters, **method_args, append=True) # Explicitly use append=True

                 print(f"Finished calculating indicator: {indicator_key}") # Debug print
            else:
                 print(f"Warning: Unknown indicator requested or not available in pandas_ta: {indicator_key}")

        except Exception as e:
            print(f"Error calculating indicator {indicator_key} with parameters {parameters}: {e}")
            # Optionally, handle specific indicator calculation errors


    # Rename 'volume' back to 'vol' if it was originally 'vol'
    if volume_renamed and 'volume' in df_calc.columns:
         df_calc.rename(columns={'volume': 'vol'}, inplace=True)

    print("DataFrame columns AFTER calculating indicators:") # Debug print
    print(df_calc.columns.tolist()) # Debug print

    # Debug: Print head and tail of the DataFrame with indicators
    print("DataFrame head with indicators:")
    print(df_calc.head())
    print("DataFrame tail with indicators:")
    print(df_calc.tail())

    return df_calc

# Helper function to evaluate algorithm rules and generate signals
def evaluate_rules(df, algorithm_rules):
    """
    Evaluates the algorithm rules on the DataFrame and generates buy/sell signals.
    Args:
        df (pd.DataFrame): DataFrame with historical K-line data and calculated indicators.
        algorithm_rules (list): List of algorithm rules from the frontend.

    Returns:
        list: A list of dictionaries, where each dictionary represents a signal (e.g., {'date': 'YYYY-MM-DD', 'type': 'buy'}).
    """
    signals = []
    # Ensure DataFrame index is datetime for easier date lookups
    if not isinstance(df.index, pd.DatetimeIndex):
        df.index = pd.to_datetime(df.index)

    # Iterate through the DataFrame to evaluate rules for each day
    # Start from the second row (index 1) to be able to compare with the previous day
    for i in range(1, len(df)):
        index = df.index[i]
        row = df.iloc[i]
        previous_row = df.iloc[i-1]

        current_date_str = index.strftime('%Y-%m-%d')

        for rule in algorithm_rules:
            signal_type = rule.get('signal_type')
            condition_groups = rule.get('condition_groups', [])

            rule_triggered = False

            # Evaluate condition groups (OR relationship between groups)
            for condition_group in condition_groups:
                group_triggered = True # Assume true for empty group

                # Evaluate conditions within a group (AND/OR relationship between conditions)
                group_conditions_met = []

                for j, condition in enumerate(condition_group):
                    indicator_key = condition.get('indicator')
                    signal_name = condition.get('signal')
                    # parameters = condition.get('parameters', {}) # Parameters are handled in calculate_indicators
                    logic_operator = condition.get('logic', 'AND') # Logic with previous condition IN THE GROUP

                    if not indicator_key or not signal_name:
                         # Skip invalid conditions
                         print(f"Skipping invalid condition: indicator={indicator_key}, signal={signal_name}")
                         continue

                    condition_met = False

                    # --- Implement specific logic for evaluating each indicator signal ---
                    # Note: pandas_ta appends indicator name and parameters to column names (e.g., SMA_10, MACD_12_26_9)
                    # We need to construct the expected column names based on the indicator_key and parameters

                    if indicator_key == 'MACD':
                        short_period = condition['parameters'].get('short_period', 12)
                        long_period = condition['parameters'].get('long_period', 26)
                        signal_period = condition['parameters'].get('signal_period', 9)
                        macd_col = f'MACD_{short_period}_{long_period}_{signal_period}'
                        signal_col = f'MACDs_{short_period}_{long_period}_{signal_period}'

                        if macd_col in row and signal_col in row and macd_col in previous_row and signal_col in previous_row:
                             # Ensure values are not NaN
                            if pd.notna(row[macd_col]) and pd.notna(row[signal_col]) and pd.notna(previous_row[macd_col]) and pd.notna(previous_row[signal_col]):
                                if signal_name == 'macd_golden_cross':
                                    # Golden Cross: MACD crosses above Signal line
                                    if previous_row[macd_col] <= previous_row[signal_col] and row[macd_col] > row[signal_col]:
                                        condition_met = True
                                elif signal_name == 'macd_death_cross':
                                    # Death Cross: MACD crosses below Signal line
                                    if previous_row[macd_col] >= previous_row[signal_col] and row[macd_col] < row[signal_col]:
                                        condition_met = True
                                # TODO: Add logic for other MACD signals (bar turn, zero cross) later if needed

                    elif indicator_key == 'KDJ':
                        n_period = condition['parameters'].get('n_period', 9)
                        k_period = condition['parameters'].get('k_period', 3)
                        d_period = condition['parameters'].get('d_period', 3)
                        k_col = f'K_{n_period}_{k_period}_{d_period}'
                        d_col = f'D_{n_period}_{k_period}_{d_period}'
                        # J line might be KDJ_{n}_{k}_{d} or calculated as 3*K - 2*D
                        # Let's use K and D for cross signals first

                        if k_col in row and d_col in row and k_col in previous_row and d_col in previous_row:
                             # Ensure values are not NaN
                            if pd.notna(row[k_col]) and pd.notna(row[d_col]) and pd.notna(previous_row[k_col]) and pd.notna(previous_row[d_col]):
                                if signal_name == 'kdj_golden_cross':
                                    # Golden Cross: K crosses above D
                                    if previous_row[k_col] <= previous_row[d_col] and row[k_col] > row[d_col]:
                                        condition_met = True
                                elif signal_name == 'kdj_death_cross':
                                    # Death Cross: K crosses below D
                                    if previous_row[k_col] >= previous_row[d_col] and row[k_col] < row[d_col]:
                                        condition_met = True
                                # TODO: Add logic for KDJ overbought/oversold later if needed
                    # --- End of specific logic for indicators ---

                    # Evaluate based on logic operator with the previous condition in the group
                    if j == 0: # First condition in the group
                         group_conditions_met.append(condition_met)
                    else:
                         if logic_operator == 'AND':
                             group_conditions_met[-1] = group_conditions_met[-1] and condition_met
                         else: # OR
                             group_conditions_met[-1] = group_conditions_met[-1] or condition_met

                # After evaluating all conditions in the group, check if the group was triggered
                # A group is triggered if the combined result of its conditions (AND/OR) is True
                if group_conditions_met and group_conditions_met[-1]:
                     group_triggered = True
                else:
                     group_triggered = False

                if group_triggered:
                    rule_triggered = True
                    break # If any condition group is triggered, the rule is triggered (OR relationship between groups)

            if rule_triggered:
                # To avoid adding duplicate signals for the same date and type from different rules,
                # we can check if a signal with the same date and type already exists.
                signal_exists = any(s['date'] == current_date_str and s['type'] == signal_type for s in signals)
                if not signal_exists:
                    signals.append({
                        'date': current_date_str,
                        'type': signal_type
                    })
                # If you want to stop after the first rule triggers for a day, add a break here.
                # break # Optional: Uncomment to trigger only one signal per day

    return signals

# New helper function to evaluate preset algorithms
def evaluate_preset_algorithm(df, preset_algorithm, parameters={}):
    """
    Evaluates a preset algorithm on the DataFrame and generates buy/sell signals.
    Args:
        df (pd.DataFrame): DataFrame with historical K-line data and calculated indicators.
        preset_algorithm (str): Identifier for the preset algorithm (e.g., 'MACD_CROSS').
        parameters (dict): Optional parameters for the preset algorithm.

    Returns:
        list: A list of dictionaries, where each dictionary represents a signal.
    """
    signals = []
    if not isinstance(df.index, pd.DatetimeIndex):
        df.index = pd.to_datetime(df.index)

    # Implement logic for each preset algorithm here
    if preset_algorithm == 'MACD_CROSS':
        print("Evaluating Preset MACD_CROSS algorithm...") # Debug print
        # Default MACD parameters
        short_period = parameters.get('short_period', 12)
        long_period = parameters.get('long_period', 26)
        signal_period = parameters.get('signal_period', 9)

        macd_col = f'MACD_{short_period}_{long_period}_{signal_period}'
        signal_col = f'MACDs_{short_period}_{long_period}_{signal_period}'

        # Check if required columns exist
        if macd_col not in df.columns or signal_col not in df.columns:
            print(f"Error: Required MACD columns ({macd_col}, {signal_col}) not found in DataFrame.")
            return []

        # Iterate through the DataFrame to find MACD crosses
        for i in range(1, len(df)):
            index = df.index[i]
            row = df.iloc[i]
            previous_row = df.iloc[i-1]
            current_date_str = index.strftime('%Y-%m-%d')

            # Ensure values are not NaN before comparison
            if pd.notna(row[macd_col]) and pd.notna(row[signal_col]) and pd.notna(previous_row[macd_col]) and pd.notna(previous_row[signal_col]):
                # MACD Golden Cross: Current MACD > Signal and Prev MACD <= Prev Signal
                if row[macd_col] > row[signal_col] and previous_row[macd_col] <= previous_row[signal_col]:
                    signals.append({'date': current_date_str, 'type': 'buy'})
                    print(f"  MACD Golden Cross signal generated on {current_date_str}") # Debug print

                # MACD Death Cross: Current MACD < Signal and Prev MACD >= Prev Signal
                elif row[macd_col] < row[signal_col] and previous_row[macd_col] >= previous_row[signal_col]:
                    signals.append({'date': current_date_str, 'type': 'sell'})
                    print(f"  MACD Death Cross signal generated on {current_date_str}") # Debug print

    elif preset_algorithm == 'RSI_SIGNALS':
        print("Evaluating Preset RSI_SIGNALS algorithm...") # Debug print
        # Default RSI parameters
        length = parameters.get('length', 14) # Default RSI period is 14
        buy_threshold = parameters.get('buy_threshold', 30) # Default buy threshold
        sell_threshold = parameters.get('sell_threshold', 70) # Default sell threshold

        # Calculate RSI indicator
        rsi_col = f'RSI_{length}'
        df.ta.rsi(length=length, append=True) # Calculate and append RSI

        # Check if required columns exist after calculation
        if rsi_col not in df.columns:
            print(f"Error: Required RSI column ({rsi_col}) not found in DataFrame after calculation.")
            return []

        # Iterate through the DataFrame to find RSI signals
        # Start from index `length` to avoid NaN values in RSI
        for i in range(length, len(df)):
            index = df.index[i]
            row = df.iloc[i]
            previous_row = df.iloc[i-1]
            current_date_str = index.strftime('%Y-%m-%d')

            # Ensure values are not NaN before comparison
            if pd.notna(row[rsi_col]) and pd.notna(previous_row[rsi_col]):
                # Buy Signal: RSI crosses above the buy_threshold
                if previous_row[rsi_col] <= buy_threshold and row[rsi_col] > buy_threshold:
                    signals.append({'date': current_date_str, 'type': 'buy'})
                    print(f"  RSI Buy signal generated on {current_date_str} (RSI: {row[rsi_col]:.2f})") # Debug print

                # Sell Signal: RSI crosses below the sell_threshold
                elif previous_row[rsi_col] >= sell_threshold and row[rsi_col] < sell_threshold:
                    signals.append({'date': current_date_str, 'type': 'sell'})
                    print(f"  RSI Sell signal generated on {current_date_str} (RSI: {row[rsi_col]:.2f})") # Debug print

    # Add logic for other preset algorithms here later

    return signals

# API路由
@app.route('/')
def index():
    return 'Flask backend is running.'

@app.route('/api/v1/fund/chart-data')
def get_chart_data():
    try:
        interval = request.args.get('interval', '1W')
        fund_type = request.args.get('fund_type', '必需消费')
        board = request.args.get('board', 'consumption')

        # 支持所有板块表名
        allowed_boards = [
            'consumption', 'science', 'finance', 'healthcare', 'newenergy',
            'cyclecommodities', 'realestateinfrastructure', 'internationalregional',
            'emergingindustries', 'traditionalindustries'
        ]
        if board not in allowed_boards:
            return jsonify({'code': 400, 'message': '不支持的板块', 'data': None})

        base_sql = f"""
            SELECT ts_code, trade_date, pre_close, open, high, low, close, price_change, pct_chg, vol, amount, category, fund_name
            FROM {board}
            WHERE category = :fund_type
        """
        params = {'fund_type': fund_type}

        if interval and interval != 'all':
            # 获取最大日期
            max_date_result = db.session.execute(db.text(f"SELECT MAX(trade_date) FROM {board}"))
            end_date = max_date_result.scalar()
            if not end_date:
                return jsonify({'code': 200, 'message': '暂无数据', 'data': None})

            if interval == '1W':
                start_date = end_date - timedelta(days=7)
                time_unit = 'day'
            elif interval == '1M':
                start_date = end_date - timedelta(days=30)
                time_unit = 'week'
            else:  # 1Y
                start_date = end_date - timedelta(days=365)
                time_unit = 'month'

            base_sql += " AND trade_date >= :start_date AND trade_date <= :end_date"
            params['start_date'] = start_date.strftime('%Y-%m-%d')
            params['end_date'] = end_date.strftime('%Y-%m-%d')
        else:
            time_unit = 'day'  # 默认

        base_sql += " ORDER BY trade_date"

        query = db.text(base_sql)
        result = db.session.execute(query, params)

        df = pd.DataFrame(result.fetchall())
        if df.empty:
            return jsonify({'code': 404, 'message': '未找到数据', 'data': None})
        df.columns = ['ts_code', 'trade_date', 'pre_close', 'open', 'high', 'low', 'close', 'price_change', 'pct_chg', 'vol', 'amount', 'category', 'fund_name']

        dates = df['trade_date'].tolist()
        date_strs = [d.strftime('%Y-%m-%d') for d in dates]
        changes = (df['close'] - df['close'].shift(1)).fillna(0).tolist()

        response_data = {
            'fund_info': {
                'ts_code': df['ts_code'].iloc[0],
                'fund_name': df['fund_name'].iloc[0] if 'fund_name' in df.columns else '消费基金',
                'category': fund_type
            },
            'dateRange': {
                'earliest': date_strs[0],
                'latest': date_strs[-1]
            },
            'klineData': df.to_dict('records'),
            'dates': date_strs,
            'changes': changes,
            'volumes': df['vol'].tolist(),
            'amounts': df['amount'].tolist(),
            'timeUnit': time_unit
        }

        return jsonify({'code': 200, 'message': 'success', 'data': response_data})

    except Exception as e:
        return jsonify({'code': 500, 'message': str(e), 'data': None})

@app.route('/api/v1/fund/types')
def get_fund_types():
    try:
        board = request.args.get('board', 'consumption')
        allowed_boards = [
            'consumption', 'science', 'finance', 'healthcare', 'newenergy',
            'cyclecommodities', 'realestateinfrastructure', 'internationalregional',
            'emergingindustries', 'traditionalindustries'
        ]
        if board not in allowed_boards:
            return jsonify({'code': 400, 'message': '不支持的板块', 'data': None})

        query = db.text(f'SELECT DISTINCT category FROM {board}')
        result = db.session.execute(query)
        types = [row[0] for row in result]

        return jsonify({
            'code': 200,
            'message': 'success',
            'data': {
                'types': types
            }
        })
    except Exception as e:
        return jsonify({
            'code': 500,
            'message': str(e),
            'data': None
        })

@app.route('/api/register', methods=['POST'])
def register():
    data = request.json
    username = data.get('username')
    password = data.get('password')
    if not username or not password:
        return jsonify({'code': 400, 'message': '用户名和密码不能为空'})
    # 检查用户是否已存在
    exists = db.session.execute(
        db.text('SELECT id FROM users WHERE username=:username'),
        {'username': username}
    ).first()
    if exists:
        return jsonify({'code': 409, 'message': '用户名已存在'})
    # 插入新用户
    db.session.execute(
        db.text('INSERT INTO users (username, password_hash) VALUES (:username, :password_hash)'),
        {'username': username, 'password_hash': password}
    )
    db.session.commit()
    return jsonify({'code': 200, 'message': '注册成功'})

@app.route('/api/login', methods=['POST'])
def login():
    data = request.json
    username = data.get('username')
    password = data.get('password')

    if not username or not password:
        return jsonify({'code': 400, 'message': '用户名或密码不能为空'})

    # 在 users 表中查询匹配的用户名和密码
    user = db.session.execute(
        db.text('SELECT id FROM users WHERE username=:username AND password_hash=:password_hash'),
        {'username': username, 'password_hash': password} # 注意：这里是明文密码比对，实际应用中应使用哈希加密后的密码
    ).first()

    if user:
        # Return user ID on successful login
        return jsonify({'code': 200, 'message': '登录成功', 'data': {'user_id': user[0]}})
    else:
        return jsonify({'code': 401, 'message': '用户名或密码错误'})

@app.route('/api/v1/index/compare')
def compare_indices():
    try:
        codes = request.args.get('codes', '')
        # 获取时间区间参数
        interval = request.args.get('interval', '1W')

        code_list = [c.strip() for c in codes.split(',') if c.strip()]
        if not code_list:
            return jsonify({'code': 400, 'message': '未指定指数代码', 'data': None})

        engine = db.get_engine(app, bind='zhishu')
        all_data = {}
        with engine.connect() as conn:
            for code in code_list:
                board_name = index_code_board_map.get(code)
                if not board_name:
                    print(f"Warning: Unknown index code {code}")
                    continue

                print(f"Processing code: {code}")

                # Step 1: Fetch daily data for the specific code
                sql = text(f"SELECT trade_date, close FROM {board_name['board']} WHERE ts_code = :code ORDER BY trade_date")
                result = conn.execute(sql, {'code': code})
                daily_data = pd.DataFrame(result.fetchall(), columns=['trade_date', 'close'])

                if daily_data.empty:
                    print(f"No daily data found for {code} in {board_name['board']}")
                    continue

                print(f"Fetched {len(daily_data)} daily records for {code}")

                # Step 2: Perform aggregation based on interval
                daily_data['trade_date'] = pd.to_datetime(daily_data['trade_date'])
                daily_data = daily_data.sort_values('trade_date')

                aggregated_data = None
                aggregated_dates = []
                aggregated_closes = []

                if interval == '1W':
                    # 周K聚合：取每周最后一天的收盘价
                    daily_data['week_start'] = daily_data['trade_date'] - pd.to_timedelta(daily_data['trade_date'].dt.weekday, unit='D')
                    grouped = daily_data.groupby('week_start', sort=True)
                    aggregated_data = grouped.apply(lambda g: pd.Series({
                         'trade_date': g.iloc[-1]['trade_date'].strftime('%Y-%m-%d'), # 取周最后一天的日期
                         'close': g.iloc[-1]['close'] # 取周最后一天的收盘价
                    })).reset_index(drop=True)
                    aggregated_dates = aggregated_data['trade_date'].tolist()
                    aggregated_closes = aggregated_data['close'].tolist()

                elif interval == '1M':
                    # 月K聚合：取每月最后一天的收盘价
                    daily_data['year_month'] = daily_data['trade_date'].dt.strftime('%Y-%m')
                    grouped = daily_data.groupby('year_month', sort=True)
                    aggregated_data = grouped.apply(lambda g: pd.Series({
                        'trade_date': g.iloc[-1]['trade_date'].strftime('%Y-%m-%d'), # 取月最后一天的日期
                         'close': g.iloc[-1]['close'] # 取月最后一天的收盘价
                    })).reset_index(drop=True)
                    aggregated_dates = aggregated_data['trade_date'].tolist()
                    aggregated_closes = aggregated_data['close'].tolist()

                elif interval == '1Y':
                    # 年K聚合：取每年最后一天的收盘价
                    daily_data['year'] = daily_data['trade_date'].dt.strftime('%Y')
                    grouped = daily_data.groupby('year', sort=True)
                    aggregated_data = grouped.apply(lambda g: pd.Series({
                         'trade_date': g.iloc[-1]['trade_date'].strftime('%Y-%m-%d'), # 取年最后一天的日期
                         'close': g.iloc[-1]['close'] # 取年最后一天的收盘价
                    })).reset_index(drop=True)
                    aggregated_dates = aggregated_data['trade_date'].tolist()
                    aggregated_closes = aggregated_data['close'].tolist()

                else: # Default to daily if interval is not recognized (though frontend only sends 1W, 1M, 1Y)
                    aggregated_dates = daily_data['trade_date'].dt.strftime('%Y-%m-%d').tolist()
                    aggregated_closes = daily_data['close'].tolist()

                if aggregated_dates:
                    all_data[code] = {
                        'dates': aggregated_dates,
                        'closes': aggregated_closes
                    }

        if not all_data:
            return jsonify({'code': 404, 'message': '未找到数据', 'data': None})
        return jsonify({'code': 200, 'message': 'success', 'data': all_data})
    except Exception as e:
        print(f"Error in compare_indices: {e}") # Log the actual error on the server side
        return jsonify({'code': 500, 'message': str(e), 'data': None})

@app.route('/api/v1/index/kline')
def index_kline():
    try:
        board = request.args.get('board', 'hushen300')
        interval = request.args.get('interval', '1W')
        allowed_boards = [
            'hushen300', 'shanghai_stock_exchange_index', 'shenzhen_component_index',
            'sse50', 'zhongzheng1000', 'zhongzheng500', 'zhongzhenga50'
        ]
        if board not in allowed_boards:
            return jsonify({'code': 400, 'message': '不支持的板块', 'data': None})

        engine = db.get_engine(app, bind='zhishu')
        with engine.connect() as conn:
            sql = text(f"""
                SELECT trade_date, open, close, high, low, vol, amount
                FROM {board}
                ORDER BY trade_date
            """)
            result = conn.execute(sql)
            rows = result.fetchall()
            if not rows:
                return jsonify({'code': 404, 'message': '未找到数据', 'data': None})
            df = pd.DataFrame(rows, columns=['trade_date', 'open', 'close', 'high', 'low', 'vol', 'amount'])
            df['trade_date'] = pd.to_datetime(df['trade_date'])
            df = df.sort_values('trade_date')

            print('总数据行数:', len(df), '最早日期:', df['trade_date'].min(), '最晚日期:', df['trade_date'].max())

            if interval == '1W':
                # 周K分组（以每周一为起点，周一~周日为一组）
                df['week_start'] = df['trade_date'] - pd.to_timedelta(df['trade_date'].dt.weekday, unit='D')
                grouped = df.groupby('week_start', sort=True)
                agg = grouped.apply(lambda g: pd.Series({
                    'open': g.iloc[0]['open'],
                    'close': g.iloc[-1]['close'],
                    'high': g['high'].max(),
                    'low': g['low'].min(),
                    'vol': g['vol'].sum(),
                    'amount': g['amount'].sum(),
                    'trade_date': g.iloc[0]['week_start'].strftime('%Y-%m-%d')
                })).reset_index(drop=True)
                dates = agg['trade_date'].tolist()
            elif interval == '1M':
                # 月K分组（YYYY-MM）
                df['year_month'] = df['trade_date'].dt.strftime('%Y-%m')
                grouped = df.groupby('year_month', sort=True)
                agg = grouped.apply(lambda g: pd.Series({
                    'open': g.iloc[0]['open'],
                    'close': g.iloc[-1]['close'],
                    'high': g['high'].max(),
                    'low': g['low'].min(),
                    'vol': g['vol'].sum(),
                    'amount': g['amount'].sum(),
                    'trade_date': g.iloc[0]['year_month']
                })).reset_index(drop=True)
                dates = agg['trade_date'].tolist()
            elif interval == '1Y':
                # 年K分组（YYYY）
                df['year'] = df['trade_date'].dt.strftime('%Y')
                grouped = df.groupby('year', sort=True)
                agg = grouped.apply(lambda g: pd.Series({
                    'open': g.iloc[0]['open'],
                    'close': g.iloc[-1]['close'],
                    'high': g['high'].max(),
                    'low': g['low'].min(),
                    'vol': g['vol'].sum(),
                    'amount': g['amount'].sum(),
                    'trade_date': g.iloc[0]['year']
                })).reset_index(drop=True)
                dates = agg['trade_date'].tolist()
            else:
                # 默认日K
                agg = df.copy()
                agg['trade_date'] = agg['trade_date'].dt.strftime('%Y-%m-%d')
                dates = agg['trade_date'].tolist()

            print('聚合后行数:', len(agg), 'x轴样例:', agg['trade_date'].head())

            kline = [
                {
                    'trade_date': row['trade_date'],
                    'open': row['open'],
                    'close': row['close'],
                    'high': row['high'],
                    'low': row['low'],
                    'vol': row['vol'],
                    'amount': row['amount']
                }
                for _, row in agg.iterrows()
            ]
            volumes = agg['vol'].tolist()
            return jsonify({'code': 200, 'message': 'success', 'data': {
                'kline': kline,
                'dates': dates,
                'volumes': volumes
            }})
    except Exception as e:
        return jsonify({'code': 500, 'message': str(e), 'data': None})

# 修改智能分析接口，支持自定义日期范围、计算年化收益率和波动性
@app.route('/api/v1/index/analyze')
def analyze_indices():
    try:
        codes = request.args.get('codes', '')
        interval = request.args.get('interval', '1W') # 仍需要interval来知道聚合类型
        start_date_str_param = request.args.get('start_date') # 获取自定义开始日期参数
        end_date_str_param = request.args.get('end_date')     # 获取自定义结束日期参数


        code_list = [c.strip() for c in codes.split(',') if c.strip()]
        if not code_list:
            return jsonify({'code': 400, 'message': '未指定指数代码进行分析', 'data': None})

        engine = db.get_engine(app, bind='zhishu')
        # 用于存储每个指数的分析数据，以便后续进行相对表现分析
        individual_analyses_data = []

        with engine.connect() as conn:
            for code in code_list:
                index_info = index_code_board_map.get(code)
                if not index_info:
                    print(f"Warning: Unknown index code {code} for analysis")
                    analysis_items = []
                    analysis_items.append({'label': '错误', 'value': f"未找到指数 {code} 的信息。"})
                    individual_analyses_data.append({
                        'code': code,
                        'name': code,
                        'analysis_items': analysis_items,
                        'price_change_pct': None
                    })
                    continue

                board_name = index_info['board']
                description = index_info['description']
                name = index_info.get('name', code)

                # Fetch daily data for the specific code
                sql = text(f"""
                    SELECT trade_date, close, open, high, low, vol, amount
                    FROM {board_name}
                    WHERE ts_code = :code
                    ORDER BY trade_date
                """)
                result = conn.execute(sql, {'code': code})
                daily_data = pd.DataFrame(result.fetchall(), columns=['trade_date', 'close', 'open', 'high', 'low', 'vol', 'amount'])

                analysis_text = description
                current_price_change_pct = None # Store for relative analysis

                if not daily_data.empty:
                    daily_data['trade_date'] = pd.to_datetime(daily_data['trade_date'])
                    daily_data = daily_data.sort_values('trade_date')

                    # --- 应用自定义日期范围过滤 ---
                    # 在应用聚合之前进行过滤
                    if start_date_str_param and end_date_str_param:
                         try:
                              start_date_filter = datetime.strptime(start_date_str_param, '%Y-%m-%d')
                              end_date_filter = datetime.strptime(end_date_str_param, '%Y-%m-%d')
                              daily_data = daily_data[(daily_data['trade_date'] >= start_date_filter) & (daily_data['trade_date'] <= end_date_filter)].copy()
                              daily_data.reset_index(drop=True, inplace=True) # Reset index after filtering
                         except ValueError:
                              print(f"Warning: Invalid date format for custom analysis range: {start_date_str_param} to {end_date_str_param}")
                              analysis_text += " 无效的日期格式，无法进行自定义区间分析。"
                              analysis_items = []
                              analysis_items.append({'label': '错误', 'value': "无效的日期格式，无法进行自定义区间分析。"})
                              individual_analyses_data.append({
                                   'code': code,
                                   'name': name,
                                   'analysis_items': analysis_items,
                                   'price_change_pct': None
                              })
                              continue # Move to next code
                    # --- 过滤结束 ---

                    # --- Reuse aggregation logic from index_kline ---
                    agg = pd.DataFrame()
                    aggregated_dates = []

                    if interval != 'daily' and not daily_data.empty:
                        if interval == '1W':
                             daily_data['week_start'] = daily_data['trade_date'] - pd.to_timedelta(daily_data['trade_date'].dt.weekday, unit='D')
                             grouped = daily_data.groupby('week_start', sort=True)
                             agg = grouped.apply(lambda g: pd.Series({
                                 'open': g.iloc[0]['open'],
                                 'close': g.iloc[-1]['close'],
                                 'high': g['high'].max(),
                                 'low': g['low'].min(),
                                 'vol': g['vol'].sum(),
                                 'amount': g['amount'].sum(),
                                 'trade_date': g.iloc[0]['week_start'].strftime('%Y-%m-%d')
                             })).reset_index(drop=True)
                             aggregated_dates = agg['trade_date'].tolist()
                        elif interval == '1M':
                             daily_data['year_month'] = daily_data['trade_date'].dt.strftime('%Y-%m')
                             grouped = daily_data.groupby('year_month', sort=True)
                             agg = grouped.apply(lambda g: pd.Series({
                                 'open': g.iloc[0]['open'],
                                 'close': g.iloc[-1]['close'],
                                 'high': g['high'].max(),
                                 'low': g['low'].min(),
                                 'vol': g['vol'].sum(),
                                 'amount': g['amount'].sum(),
                                 'trade_date': g.iloc[0]['year_month']
                             })).reset_index(drop=True)
                             aggregated_dates = agg['trade_date'].tolist()
                        elif interval == '1Y':
                             daily_data['year'] = daily_data['trade_date'].dt.strftime('%Y')
                             grouped = daily_data.groupby('year', sort=True)
                             agg = grouped.apply(lambda g: pd.Series({
                                 'open': g.iloc[0]['open'],
                                 'close': g.iloc[-1]['close'],
                                 'high': g['high'].max(),
                                 'low': g['low'].min(),
                                 'vol': g['vol'].sum(),
                                 'amount': g['amount'].sum(),
                                 'trade_date': g.iloc[0]['year']
                             })).reset_index(drop=True)
                             aggregated_dates = agg['trade_date'].tolist()
                        else:
                             # Should not happen if interval is validated on frontend
                             agg = daily_data.copy() # If interval is 'daily' or unrecognized, use filtered daily data
                             agg['trade_date'] = agg['trade_date'].dt.strftime('%Y-%m-%d')
                             aggregated_dates = agg['trade_date'].tolist()
                    else:
                         # If interval is 'daily' or no aggregation needed, use filtered daily data directly
                         agg = daily_data.copy()
                         agg['trade_date'] = agg['trade_date'].dt.strftime('%Y-%m-%d')
                         aggregated_dates = agg['trade_date'].tolist()
                    # --- End of aggregation logic ---


                    if not agg.empty:
                        first_data_point = agg.iloc[0]
                        last_data_point = agg.iloc[-1]

                        first_close = first_data_point['close']
                        last_close = last_data_point['close']
                        start_date_for_calc = pd.to_datetime(first_data_point['trade_date']) # Use actual date from aggregated data for calculation
                        end_date_for_calc = pd.to_datetime(last_data_point['trade_date'])   # Use actual date from aggregated data for calculation
                        start_date_to_display = start_date_str_param if start_date_str_param else first_data_point['trade_date'] # Prefer custom date for display
                        end_date_to_display = end_date_str_param if end_date_str_param else last_data_point['trade_date']     # Prefer custom date for display


                        if pd.notna(first_close) and pd.notna(last_close) and first_close != 0:
                            price_change_pct = ((last_close - first_close) / first_close) * 100
                            change_direction = "上涨" if price_change_pct >= 0 else "下跌"
                            analysis_text += f" 在 {start_date_to_display} 到 {end_date_to_display} 的时间区间内，该指数的总{change_direction}幅度约为 {abs(price_change_pct):.2f}%。"

                            # --- 计算年化收益率 ---
                            time_difference = (end_date_for_calc - start_date_for_calc).days
                            if time_difference > 0:
                                years = time_difference / 365.25 # Use 365.25 for leap years
                                total_return_decimal = price_change_pct / 100.0

                                # Avoid issues with log(negative) in power calculation if total_return_decimal is -1 or less
                                if (1 + total_return_decimal) > 0: # Use > 0 instead of >= 0 to handle -100% case
                                     annualized_return = ((1 + total_return_decimal)**(1.0 / years)) - 1
                                     annualized_return_pct = annualized_return * 100
                                     annualized_direction = "年化上涨" if annualized_return_pct >= 0 else "年化下跌"
                                     analysis_text += f" {annualized_direction}幅度约为 {abs(annualized_return_pct):.2f}%。"
                                else:
                                     analysis_text += " 无法计算年化收益率（总收益为-100%或更低）。" # Clarify reason
                            else:
                                analysis_text += " 分析区间时长不足，无法计算年化收益率。"
                            # --- 年化收益率计算结束 ---

                            # --- 计算波动性 (标准差) ---
                            # 确保agg['close']包含数值且数量大于1才能计算标准差
                            if not agg['close'].empty and len(agg['close']) > 1:
                                # 排除可能存在的非数值，尽管 pandas to_numeric 通常会处理
                                valid_closes = pd.to_numeric(agg['close'], errors='coerce').dropna()
                                if len(valid_closes) > 1:
                                     # 使用 pandas 的 std() 方法，默认自由度为1 (样本标准差)
                                     volatility = valid_closes.std()
                                     if pd.notna(volatility):
                                        # 修改此处，增加对标准差的解释
                                        analysis_text += f" 在该时间区间内，指数价格的波动性（标准差）约为 {volatility:.2f}。标准差反映了价格波动的剧烈程度，数值越大，波动越剧烈。" # Add explanation for std dev
                                     else:
                                        analysis_text += " 在该时间区间内，无法计算波动性（标准差）。"
                                else:
                                     analysis_text += " 在该时间区间内，数据点不足，无法计算波动性（标准差）。"
                            else:
                                analysis_text += " 在该时间区间内，数据点不足，无法计算波动性（标准差）。"
                            # --- 波动性计算结束 ---

                            # --- 计算最大回撤 ---
                            if not agg['close'].empty:
                                cumulative_max = agg['close'].cummax()
                                drawdown = pd.Series(0, index=agg.index)
                                if not cumulative_max.eq(0).any():
                                     drawdown = (agg['close'] / cumulative_max - 1) * 100

                                max_drawdown = drawdown.min()

                                if pd.notna(max_drawdown):
                                    analysis_text += f" 最大回撤约为 {abs(max_drawdown):.2f}%。最大回撤反映了在该区间内从最高点下跌的最大幅度。"
                                else:
                                     analysis_text += " 无法计算最大回撤。"
                            else:
                                analysis_text += " 数据点不足，无法计算最大回撤。"
                            # --- 最大回撤计算结束 ---

                            # --- 简化的成交量/额分析 (平均值) ---
                            if not agg.empty:
                                valid_vol = pd.to_numeric(agg['vol'], errors='coerce').dropna()
                                valid_amount = pd.to_numeric(agg['amount'], errors='coerce').dropna()

                                if not valid_vol.empty:
                                    avg_vol = valid_vol.mean()
                                    formatted_vol = f"{avg_vol:.0f}"
                                    if avg_vol >= 1e8: formatted_vol = f"{avg_vol/1e8:.2f}亿"
                                    elif avg_vol >= 1e4: formatted_vol = f"{avg_vol/1e4:.2f}万"
                                    analysis_text += f" 平均成交量约为 {formatted_vol}手。" # Assuming 'vol' is in lots (手)
                                else:
                                     analysis_text += " 无法计算平均成交量。"

                                if not valid_amount.empty:
                                    avg_amount = valid_amount.mean()
                                    formatted_amount = f"{avg_amount:.0f}"
                                    if avg_amount >= 1e8: formatted_amount = f"{avg_amount/1e8:.2f}亿"
                                    elif avg_amount >= 1e4: formatted_amount = f"{avg_amount/1e4:.2f}万"
                                    analysis_text += f" 平均成交额约为 {formatted_amount}元。"
                                else:
                                    analysis_text += " 无法计算平均成交额。"
                            else:
                                analysis_text += " 数据点不足，无法计算平均成交量/额。"
                            # --- 成交量/额分析结束 ---

                        else:
                            analysis_text += f" 在 {start_date_to_display} 到 {end_date_to_display} 的时间区间内，无法计算涨跌幅度、波动性、最大回撤或成交量/额。"
                    else:
                        analysis_text += " 在当前时间区间内，未找到足够数据进行分析，无法计算涨跌幅度、波动性、最大回撤或成交量/额。"

                # Store structured analysis data for the current index
                analysis_items = []
                analysis_items.append({'label': '分析', 'value': analysis_text})
                analysis_items.append({'label': '涨跌幅', 'value': f"{change_direction}幅度约为 {abs(price_change_pct):.2f}%。"})
                analysis_items.append({'label': '年化收益率', 'value': analysis_text})
                analysis_items.append({'label': '波动性 (标准差)', 'value': analysis_text})
                analysis_items.append({'label': '最大回撤', 'value': analysis_text})
                analysis_items.append({'label': '成交量/额', 'value': analysis_text})

                # Store structured analysis data for the current index
                individual_analyses_data.append({
                    'code': code,
                    'name': name,
                    'analysis_items': analysis_items,
                    'price_change_pct': current_price_change_pct # Keep for relative analysis calculation
                })

        # --- 相对表现分析 ---
        if len(individual_analyses_data) > 1:
            valid_analyses = [a for a in individual_analyses_data if a.get('price_change_pct') is not None]
            if len(valid_analyses) > 1:
                 sorted_analyses = sorted(valid_analyses, key=lambda x: x['price_change_pct'], reverse=True)
                 best_performer = sorted_analyses[0]
                 worst_performer = sorted_analyses[-1]

                 relative_summary = f" 与其他所选指数相比，在该时间区间内，{best_performer['name']} (涨跌幅 {best_performer['price_change_pct']:.2f}%) 表现最好，而 {worst_performer['name']} (涨跌幅 {worst_performer['price_change_pct']:.2f}%) 表现相对较差。"

                 # Append relative summary as a new analysis item to each valid analysis
                 for analysis_data in individual_analyses_data:
                     if analysis_data.get('price_change_pct') is not None:
                         analysis_data['analysis_items'].append({'label': '相对表现', 'value': relative_summary.strip()}) # Strip leading space


            elif len(valid_analyses) == 1:
                 original_entry = next((a for a in individual_analyses_data if a.get('price_change_pct') is not None), None)
                 if original_entry:
                      original_entry['analysis_items'].append({'label': '提示', 'value': "您选择了多个指数，但仅有当前指数在所选区间内有有效数据进行分析。"}) # Add message as a new item
             # If len(valid_analyses) is 0, no summary is needed as individual errors cover it


        # --- 返回最终结果 ---
        # Return the structured data, excluding the temporary 'price_change_pct' used for relative analysis
        final_results = []
        for analysis_data in individual_analyses_data:
            final_results.append({
                'code': analysis_data['code'],
                'name': analysis_data['name'],
                'analysis_items': analysis_data['analysis_items'] # Return the list of analysis items
            })


        if not final_results:
             return jsonify({'code': 404, 'message': '未找到任何指数的数据进行分析', 'data': None})

        return jsonify({'code': 200, 'message': 'success', 'data': final_results})

    except Exception as e:
        print(f"Error in analyze_indices: {e}")
        return jsonify({'code': 500, 'message': str(e), 'data': None})

@app.route('/test_db')
def test_db():
    try:
        # 测试主数据库连接
        db.session.execute(text('SELECT 1'))
        # 测试指数库连接
        engine = db.get_engine(app, bind='zhishu')
        with engine.connect() as conn:
            conn.execute(text('SELECT 1'))
        return jsonify({
            'code': 200,
            'message': '数据库连接正常',
            'data': {
                'main_db': '连接成功',
                'index_db': '连接成功'
            }
        })
    except Exception as e:
        return jsonify({
            'code': 500,
            'message': f'数据库连接失败: {str(e)}',
            'data': None
        })

@app.route('/api/v1/favorites/add', methods=['POST'])
def add_favorite():
    try:
        data = request.json
        user_id = data.get('user_id')
        item_type = data.get('item_type')
        item_code = data.get('item_code')
        # Get board and category from request data
        board = data.get('board', None)
        category = data.get('category', None)

        if not all([user_id, item_type, item_code]):
            return jsonify({'code': 400, 'message': '缺少必要的参数', 'data': None})

        if item_type not in ['fund', 'index']:
             return jsonify({'code': 400, 'message': '不支持的项目类型', 'data': None})

        # Check if the item already exists for the user
        exists = db.session.execute(
            db.text('SELECT id FROM user_favorites WHERE user_id=:user_id AND item_type=:item_type AND item_code=:item_code'),
            {'user_id': user_id, 'item_type': item_type, 'item_code': item_code}
        ).first()

        if exists:
            return jsonify({'code': 409, 'message': '该项目已存在于您的自选列表中', 'data': None})

        # Insert the new favorite, including board and category
        db.session.execute(
            db.text('INSERT INTO user_favorites (user_id, item_type, item_code, board, category) VALUES (:user_id, :item_type, :item_code, :board, :category)'),
            {'user_id': user_id, 'item_type': item_type, 'item_code': item_code, 'board': board, 'category': category}
        )
        db.session.commit()

        return jsonify({'code': 200, 'message': '添加自选成功', 'data': None})

    except Exception as e:
        db.session.rollback() # Rollback in case of error
        print(f"Error adding favorite: {e}")
        return jsonify({'code': 500, 'message': str(e), 'data': None})

@app.route('/api/v1/favorites/remove', methods=['POST'])
def remove_favorite():
    try:
        data = request.json
        user_id = data.get('user_id') # Assuming user_id is provided in the request body
        item_type = data.get('item_type')
        item_code = data.get('item_code')

        if not all([user_id, item_type, item_code]):
            return jsonify({'code': 400, 'message': '缺少必要的参数', 'data': None})

        # Delete the favorite
        result = db.session.execute(
            db.text('DELETE FROM user_favorites WHERE user_id=:user_id AND item_type=:item_type AND item_code=:item_code'),
            {'user_id': user_id, 'item_type': item_type, 'item_code': item_code}
        )
        db.session.commit()

        if result.rowcount == 0:
             return jsonify({'code': 404, 'message': '未找到对应的自选项目', 'data': None})

        return jsonify({'code': 200, 'message': '移除自选成功', 'data': None})

    except Exception as e:
        db.session.rollback()
        print(f"Error removing favorite: {e}")
        return jsonify({'code': 500, 'message': str(e), 'data': None})

@app.route('/api/v1/favorites/list', methods=['GET'])
def list_favorites():
    try:
        user_id = request.args.get('user_id', type=int) # Assuming user_id is provided as a query parameter

        if not user_id:
            return jsonify({'code': 400, 'message': '缺少用户 ID', 'data': None})

        # Retrieve the user's favorites with board and category
        result = db.session.execute(
            db.text('SELECT item_type, item_code, board, category FROM user_favorites WHERE user_id=:user_id ORDER BY created_at DESC'),
            {'user_id': user_id}
        ).fetchall()

        favorites_list_raw = [{'item_type': row[0], 'item_code': row[1], 'board': row[2], 'category': row[3]} for row in result]

        # Now, fetch detailed info for each favorite
        final_favorites_list = []
        for item in favorites_list_raw:
            detailed_info = item.copy() # Start with existing info
            item_type = item['item_type']
            item_code = item['item_code']
            board = item['board']
            category = item['category']

            if item_type == 'fund':
                # For funds, try to get name from fund_basic or a specific board table
                fund_name = item_code # Default to code if name not found
                # Attempt to find fund name in fund_basic (if it exists and has fund_name)
                try:
                    fund_info_result = db.session.execute(
                        db.text('SELECT fund_name FROM fund_basic WHERE ts_code=:item_code LIMIT 1'),
                        {'item_code': item_code}
                    ).first()
                    if fund_info_result and fund_info_result[0]:
                        fund_name = fund_info_result[0]
                except Exception as e:
                     # fund_basic might not exist or have fund_name column
                     print(f"Warning: Could not query fund_basic for {item_code}: {e}")

                # If name not found in fund_basic, can try to find it in the specific board table
                # This requires knowing which board table to check, which we now have.
                if fund_name == item_code and board:
                    try:
                        # Assuming fund_name column exists in board tables like consumption, science, etc.
                         board_fund_info = db.session.execute(
                             db.text(f'SELECT fund_name FROM {board} WHERE ts_code=:item_code LIMIT 1'),
                             {'item_code': item_code}
                         ).first()
                         if board_fund_info and board_fund_info[0]:
                             fund_name = board_fund_info[0]
                    except Exception as e:
                         print(f"Warning: Could not query board table {board} for {item_code}: {e}")

                detailed_info['name'] = fund_name
                detailed_info['category'] = category # Include category for funds

            elif item_type == 'index':
                # For indices, get name from index_code_board_map
                index_info = index_code_board_map.get(item_code)
                detailed_info['name'] = index_info['description'] if index_info else item_code # Use description as name if available
                # board is already stored, no category for indices

            final_favorites_list.append(detailed_info)

        return jsonify({'code': 200, 'message': '获取自选列表成功', 'data': final_favorites_list})

    except Exception as e:
        print(f"Error listing favorites: {e}")
        return jsonify({'code': 500, 'message': str(e), 'data': None})

@app.route('/api/v1/prediction/calculate_signals', methods=['POST'])
def calculate_prediction_signals():
    try:
        data = request.json
        item_code = data.get('item_code')
        item_type = data.get('item_type')
        board = data.get('board')
        algorithm_rules = data.get('algorithm_rules') # For custom algorithm
        preset_algorithm = data.get('preset_algorithm') # For preset algorithm
        # parameters for preset algorithms can also be included in data if needed

        if not item_code or not item_type or not board or (not algorithm_rules and not preset_algorithm):
            return jsonify({'code': 400, 'message': '缺少必要的参数或算法配置'}), 400

        print(f"Calculating signals for {item_type}: {item_code} on board {board}. Preset Algorithm: {preset_algorithm}, Rules provided: {bool(algorithm_rules)}")

        # 1. Fetch historical K line data
        if item_type == 'fund':
            table_name = board # Fund board name is the table name

            # Check if table exists and is allowed (keep this check)
            allowed_fund_boards = [
                'consumption', 'science', 'finance', 'healthcare', 'newenergy',
                'cyclecommodities', 'realestateinfrastructure', 'internationalregional',
                'emergingindustries', 'traditionalindustries'
            ]
            if table_name not in allowed_fund_boards:
                 return jsonify({'code': 400, 'message': '不支持的基金板块'}), 400

            # *** MODIFIED SQL QUERY ***
            # Use ts_code to query historical data within the specified board table
            sql = text(f"SELECT trade_date, open, high, low, close, vol FROM {table_name} WHERE ts_code = :ts_code ORDER BY trade_date")
            params = {'ts_code': item_code} # Use item_code (which is ts_code) as parameter
            engine = db.engine # Use default engine for fund tables

        elif item_type == 'index':
             # Assuming item_code for index is the ts_code
             # Map ts_code to board name and table name from index_code_board_map
             index_info = index_code_board_map.get(item_code)
             if not index_info or index_info['board'] != board:
                  return jsonify({'code': 400, 'message': '无效的指数代码或板块'}), 400

             table_name = board # Index board name is the table name in 'zhishu' bind
             sql = text(f"SELECT trade_date, open, high, low, close, vol FROM {table_name} WHERE ts_code = :ts_code ORDER BY trade_date")
             params = {'ts_code': item_code}
             engine = db.get_engine(app, bind='zhishu') # Use 'zhishu' bind for index tables
        else:
            return jsonify({'code': 400, 'message': '不支持的项目类型'}), 400

        with engine.connect() as conn:
            result = conn.execute(sql, params)
            df = pd.DataFrame(result.fetchall(), columns=['trade_date', 'open', 'high', 'low', 'close', 'vol'])

        if df.empty:
            return jsonify({'code': 404, 'message': '未找到历史数据'}), 404

        # Ensure trade_date is datetime type and set as index
        df['trade_date'] = pd.to_datetime(df['trade_date'])
        df = df.set_index('trade_date')

        # Ensure df has required columns before passing to calculate_indicators
        required_cols = ['open', 'high', 'low', 'close', 'vol']
        if not all(col in df.columns.str.lower() for col in required_cols):
             return jsonify({'code': 500, 'message': '获取的数据缺少必要的列 (open, high, low, close, vol)'}), 500

        # Ensure index is datetime before passing to calculation functions
        if not isinstance(df.index, pd.DatetimeIndex):
             df.index = pd.to_datetime(df.index)

        # 2. Calculate technical indicators
        # Need to determine which indicators are needed based on preset or custom rules
        indicators_needed = set()
        if preset_algorithm:
            if preset_algorithm == 'MACD_CROSS':
                indicators_needed.add('MACD')
            # Add other preset algorithms and their needed indicators here
            # parameters for preset algorithms should be passed to calculate_indicators if they affect calculation
            # For MACD_CROSS, parameters are only used in evaluation, so no need to pass them to calculate_indicators yet

        elif algorithm_rules:
             # For custom rules, extract all unique indicators from the rules
             for rule in algorithm_rules:
                 for condition_group in rule.get('condition_groups', []):
                     for condition in condition_group:
                         indicator_key = condition.get('indicator')
                         if indicator_key:
                             indicators_needed.add(indicator_key)

        # Pass the set of needed indicators to calculate_indicators
        # We need to modify calculate_indicators to accept and use this list
        # TODO: Refactor calculate_indicators to calculate only needed indicators for efficiency

        # For now, we'll continue to pass the algorithm_rules structure or an empty list
        # But we should ensure MACD is calculated if MACD_CROSS preset is selected
        # Let's adjust calculate_indicators call to always calculate needed preset indicators

        # Create a dummy algorithm_rules structure to force calculation of needed preset indicators
        dummy_rules_for_calculation = []
        if preset_algorithm == 'MACD_CROSS':
             # Add a dummy rule to ensure MACD is calculated with default parameters
             dummy_rules_for_calculation.append({
                 'signal_type': 'buy', # Dummy type
                 'condition_groups': [[{
                     'indicator': 'MACD',
                     'parameters': { # Use default parameters for calculation
                         'short_period': 12,
                         'long_period': 26,
                         'signal_period': 9
                     }
                 }]]
             })
        # Add dummy rules for other preset algorithms here if they need specific indicators calculated

        # Use the original algorithm_rules if provided, otherwise use the dummy rules for presets
        rules_to_pass_to_calculator = algorithm_rules if algorithm_rules else dummy_rules_for_calculation

        df_with_indicators = calculate_indicators(df, rules_to_pass_to_calculator)


        # 3. Evaluate rules and generate signals
        generated_signals = []
        if preset_algorithm:
            # Evaluate the selected preset algorithm
            # Pass relevant parameters if needed by the preset algorithm
            generated_signals = evaluate_preset_algorithm(df_with_indicators, preset_algorithm, data.get('parameters', {})) # Pass parameters from request if any
        elif algorithm_rules:
            # Evaluate the custom algorithm rules
            generated_signals = evaluate_rules(df_with_indicators, algorithm_rules)


        # 4. Return signals
        df_with_indicators = df_with_indicators.reset_index()
        response_data = {
            'dates': df_with_indicators['trade_date'].dt.strftime('%Y-%m-%d').tolist(),
            'kline': df_with_indicators[['open', 'close', 'high', 'low', 'vol']].to_dict('records'),
            'signals': generated_signals
        }

        return jsonify({'code': 200, 'message': '信号计算完成', 'data': response_data}), 200

    except Exception as e:
        print(f"Error in calculate_prediction_signals: {e}")
        return jsonify({'code': 500, 'message': str(e), 'data': None}), 500

if __name__ == '__main__':
    app.run(host='0.0.0.0', port=5000, debug=True)
