import datetime
import numpy as np
import pandas as pd
import os
from scipy import stats # type: ignore

# 导入我们重构后的所有核心模块
from data_handler import DataHandler
from optimizer import Optimizer
# Strategy 和 BacktestEngine 会被 Optimizer 内部调用，但显式导入没有坏处
from reporter import Reporter
from strategy import Strategy
from backtest_engine import BacktestEngine

def analyze_data_distribution(data_handler):
    """
    分析真实数据的分布情况，返回关键统计信息
    
    参数:
    - data_handler: DataHandler实例
    
    返回:
    - dict: 包含分布统计信息的字典
    """
    if data_handler.data_df.empty:
        return None
    
    # 获取收盘价数据
    close_prices = data_handler.data_df['close'].dropna()
    
    if close_prices.empty:
        return None
    
    # 计算基础统计信息
    stats_info = {
        'min': close_prices.min(),
        'max': close_prices.max(),
        'mean': close_prices.mean(),
        'median': close_prices.median(),
        'std': close_prices.std(),
        'count': len(close_prices)
    }
    
    # 计算分位数
    percentiles = [5, 10, 25, 50, 75, 90, 95]
    for p in percentiles:
        stats_info[f'p{p}'] = close_prices.quantile(p/100)
    
    # 计算密度分布（划分区间并统计频率）
    hist, bin_edges = np.histogram(close_prices, bins=20)
    stats_info['hist'] = hist
    stats_info['bin_edges'] = bin_edges
    
    return stats_info

def generate_data_driven_thresholds(stats_info, orders_to_test):
    """
    基于真实数据分布生成阈值种子列表
    
    参数:
    - stats_info: 数据分布统计信息
    - orders_to_test: 要测试的阶数列表
    
    返回:
    - list: 生成的阈值组合列表
    """
    if not stats_info:
        return []
    
    generated_thresholds = []
    
    print(f"\n[数据分析] 指数分布统计:")
    print(f"  范围: {stats_info['min']:.0f} - {stats_info['max']:.0f}")
    print(f"  均值: {stats_info['mean']:.0f}, 中位数: {stats_info['median']:.0f}")
    print(f"  标准差: {stats_info['std']:.0f}")
    print(f"  25%-75%分位数: {stats_info['p25']:.0f} - {stats_info['p75']:.0f}")
    
    # 方法1: 基于分位数的阈值生成
    for order in orders_to_test:
        if order == 3:
            # 3阶：使用25%, 50%, 75%分位数
            thresholds = [
                int(stats_info['p25']),
                int(stats_info['p50']),
                int(stats_info['p75'])
            ]
        elif order == 4:
            # 4阶：使用10%, 35%, 65%, 90%分位数
            percentiles = [10, 35, 65, 90]
            thresholds = []
            for p in percentiles:
                value = np.interp(p, [0, 100], [stats_info['min'], stats_info['max']])
                # 更精确地计算分位数
                close_prices = stats_info.get('raw_data', [])
                if len(close_prices) == 0:
                    # 如果没有原始数据，使用插值估算
                    thresholds.append(int(value))
                else:
                    actual_value = np.percentile(close_prices, p)
                    thresholds.append(int(actual_value))
        elif order == 5:
            # 5阶：使用5%, 25%, 50%, 75%, 95%分位数
            thresholds = [
                int(stats_info['p5']),
                int(stats_info['p25']),
                int(stats_info['p50']),
                int(stats_info['p75']),
                int(stats_info['p95'])
            ]
        else:  # order >= 6
            # 均匀分布在5%-95%分位数之间
            start_val = stats_info['p5']
            end_val = stats_info['p95']
            thresholds = np.linspace(start_val, end_val, num=order, dtype=int).tolist()
        
        # 确保阈值唯一且有序
        thresholds = sorted(list(set(thresholds)))
        generated_thresholds.append(thresholds)
        print(f"  {order}阶分位数法: {thresholds}")
    
    # 方法2: 基于数据密度的阈值生成
    hist = stats_info['hist']
    bin_edges = stats_info['bin_edges']
    
    # 找到密度较高的区间，在这些区间设置更多阈值
    for order in orders_to_test:
        density_thresholds = []
        
        # 计算累积密度
        cumulative_density = np.cumsum(hist) / np.sum(hist)
        
        # 根据累积密度等间隔选择阈值
        target_densities = np.linspace(0.1, 0.9, order)
        
        for target in target_densities:
            # 找到最接近目标累积密度的bin
            closest_idx = np.argmin(np.abs(cumulative_density - target))
            threshold_value = int(bin_edges[closest_idx])
            density_thresholds.append(threshold_value)
        
        density_thresholds = sorted(list(set(density_thresholds)))
        generated_thresholds.append(density_thresholds)
        print(f"  {order}阶密度法: {density_thresholds}")
    
    # 方法3: 基于标准差的阈值生成
    mean_val = stats_info['mean']
    std_val = stats_info['std']
    
    for order in orders_to_test:
        # 在均值±1.5标准差范围内生成阈值
        start_val = max(stats_info['min'], mean_val - 1.5 * std_val)
        end_val = min(stats_info['max'], mean_val + 1.5 * std_val)
        
        std_thresholds = np.linspace(start_val, end_val, num=order, dtype=int).tolist()
        std_thresholds = sorted(list(set(std_thresholds)))
        generated_thresholds.append(std_thresholds)
        print(f"  {order}阶标准差法: {std_thresholds}")
    
    return generated_thresholds

def generate_adaptive_max_ratios(num_points):
    """
    根据阈值点数量生成对应的最大持仓比例
    采用递减模式，但保持合理的风控
    
    参数:
    - num_points: 阈值点数量
    
    返回:
    - 最大持仓比例列表（长度为num_points+1）
    """
    # 生成递减的持仓比例，从高仓位到低仓位/空仓
    # 当指数在低位时，持仓比例高；指数在高位时，持仓比例低
    
    if num_points <= 2:
        return [1.0, 0.5, 0.0]
    elif num_points == 3:
        return [1.0, 0.7, 0.3, 0.0]
    elif num_points == 4:
        return [1.0, 0.8, 0.5, 0.2, 0.0]
    elif num_points == 5:
        return [1.0, 0.8, 0.6, 0.3, 0.1, 0.0]
    else:  # num_points >= 6
        # 使用平滑递减函数
        ratios = np.linspace(1.0, 0.0, num_points + 1)
        # 应用平滑变换，前期下降慢，后期下降快
        ratios = 1.0 - np.power(np.linspace(0, 1, num_points + 1), 1.5)
        ratios[-1] = 0.0  # 确保最后一个是0
        return [round(r, 2) for r in ratios]

def run_multi_period_backtest(data_handler, best_params):
    """
    运行多周期回测，获取3年、7年、10年的详细结果
    """
    multi_period_results = {}
    
    # 定义回测周期
    periods = [
        ('3y_avg', '2020-01-01', '2023-12-31', '3年期 (2020-2022)'),
        ('7y_avg', '2018-01-01', '2024-12-31', '7年期 (2018-2024)'),
        ('full', '2016-01-01', '2025-08-01', '全周期 (2016-2025)')
    ]
    
    print("\n[多周期回测] 正在运行多周期回测分析...")
    
    for period_key, start_date, end_date, description in periods:
        print(f"  执行 {description} 回测...")
        
        try:
            strategy = Strategy(params=best_params)
            engine = BacktestEngine(
                start_date=start_date,
                end_date=end_date,
                data_handler=data_handler,
                strategy=strategy
            )
            metrics, history_df, trade_records_df = engine.run_backtest()
            
            if metrics and not history_df.empty:
                # 保存完整的回测结果
                multi_period_results[period_key] = {
                    'metrics': metrics,
                    'history_df': history_df,
                    'trade_records': trade_records_df,
                    'description': description
                }
                
                # 打印关键指标
                cagr = metrics.get('annualized_return_cagr', 0)
                max_dd = metrics.get('max_drawdown', 0)
                sharpe = metrics.get('sharpe_ratio', 0)
                print(f"    年化收益: {cagr:.2%}, 最大回撤: {max_dd:.2%}, 夏普: {sharpe:.3f}")
            else:
                print(f"    {description} 回测失败，无有效数据")
                
        except Exception as e:
            print(f"    {description} 回测出错: {str(e)}")
    
    return multi_period_results

def calculate_multi_period_averages(multi_period_results):
    """
    计算多周期的平均指标（用于报告显示）
    """
    averaged_results = {}
    
    # 如果有多个3年期结果，计算平均值（这里简化为单个周期）
    if '3y_avg' in multi_period_results:
        averaged_results['3y_avg'] = multi_period_results['3y_avg']['metrics']
    
    # 如果有多个7年期结果，计算平均值（这里简化为单个周期）
    if '7y_avg' in multi_period_results:
        averaged_results['7y_avg'] = multi_period_results['7y_avg']['metrics']
    
    # 全周期结果
    if 'full' in multi_period_results:
        averaged_results['full'] = multi_period_results['full']['metrics']
    
    return averaged_results

def run_optimization():
    """
    执行完整的回测优化流程。
    """
    # ==============================================================================
    # ===== 1. 核心配置区 (All configurations are here) =====
    # ==============================================================================

    # --- 文件与数据配置 ---
    CSV_FILE_PATH = 'data.csv'

    # --- 优化器（遗传算法）算法参数配置 ---
    GA_PARAMS = {
        'n_generations': 0,      # 进化的代数
        'population_size': 10,    # 每代种群的大小
        'crossover_rate': 0.2,    # 交叉概率
        'mutation_rate': 0.3,     # 变异概率
        'elitism_size': 2         # 精英保留数量
    }

    # ==============================================================================
    # ===== 2. 执行流程 (Execution Flow) =====
    # ==============================================================================
    
    print("--- 开始执行回测优化任务 ---")

    # 步骤 0: 检查并创建虚拟数据文件（如果不存在）
    if not os.path.exists(CSV_FILE_PATH):
        print(f"未找到数据文件 '{CSV_FILE_PATH}'。正在创建一个用于演示的虚拟数据文件...")
        date_range = pd.date_range(start='2015-12-01', end='2025-08-01', freq='B')
        
        # 创建更真实的指数数据，大部分时间在3000-4000之间
        np.random.seed(42)  # 设置随机种子以保证可重复性
        base_index = 3500  # 基准指数
        index_values = []
        current_index = base_index
        
        for i in range(len(date_range)):
            # 添加随机波动，但约束在合理范围内
            daily_change = np.random.normal(0, 0.02)  # 日均变化2%标准差
            current_index *= (1 + daily_change)
            
            # 约束在2800-5800范围内，并倾向于回归3000-4000区间
            if current_index < 2800:
                current_index = 2800 + np.random.uniform(0, 200)
            elif current_index > 5800:
                current_index = 5800 - np.random.uniform(0, 200)
            elif current_index > 4500:  # 高位时有下行压力
                current_index *= (1 - np.random.uniform(0, 0.01))
            elif current_index < 3000:  # 低位时有上行支撑
                current_index *= (1 + np.random.uniform(0, 0.01))
            
            index_values.append(current_index)
        
        mock_data = {
            '日期': date_range.strftime('%Y-%m-%d'),
            '开盘': index_values,
            '收盘': index_values,
            '最高': [v * 1.01 for v in index_values],
            '最低': [v * 0.99 for v in index_values],
            '成交量': 1, '成交额': 1, '振幅': 1, '涨跌幅': 1, '涨跌额': 1, '换手率': 1,
            '10年期国债收益率': 1, '滚动市盈率': 1,
            '股债利差': np.random.uniform(2, 8, size=len(date_range)),
            '沪深300ETF': [v/1000 for v in index_values]  # ETF价格大约是指数的1/1000
        }
        mock_df = pd.DataFrame(mock_data)
        mock_df.to_csv(CSV_FILE_PATH, index=False)
        print("虚拟数据文件创建成功。")
        print(f"指数范围: {min(index_values):.0f} - {max(index_values):.0f}")
        print(f"指数均值: {np.mean(index_values):.0f}")

    # 步骤 1: 初始化数据处理器并分析数据分布
    print("\n[步骤 1/5] 初始化数据处理器并分析数据分布...")
    data_handler = DataHandler(csv_filepath=CSV_FILE_PATH)
    if data_handler.data_df.empty:
        print("数据加载失败，程序终止。")
        return

    # 分析真实数据的分布情况
    stats_info = analyze_data_distribution(data_handler)
    if not stats_info:
        print("数据分析失败，程序终止。")
        return
    
    # 保存原始数据用于更精确的分位数计算
    stats_info['raw_data'] = data_handler.data_df['close'].dropna().values

    # 步骤 2: 基于真实数据生成阈值种子
    print("\n[步骤 2/5] 基于真实数据分布生成阈值种子...")
    
    # --- 策略参数搜索空间 (Param Grid) 配置 ---
    orders_to_test = [3, 4, 5, 6]  # 测试3阶、4阶...的分段函数
    
    # 使用真实数据分布生成阈值
    generated_thresholds = generate_data_driven_thresholds(stats_info, orders_to_test)
    
    # 生成对应的持仓比例
    generated_ratios = []
    for thresholds in generated_thresholds:
        ratios = generate_adaptive_max_ratios(len(thresholds))
        generated_ratios.append(ratios)
    
    # 去重（将列表转换为元组进行去重，然后转回列表）
    unique_generated_thresholds = list(map(list, set(map(tuple, generated_thresholds))))
    unique_generated_ratios = list(map(list, set(map(tuple, generated_ratios))))
    
    print(f"\n[阈值生成] 共生成 {len(unique_generated_thresholds)} 组唯一阈值组合")
    print(f"[阈值生成] 共生成 {len(unique_generated_ratios)} 组唯一仓位组合")

    # 【关键修改】定义完整的参数网格 - 适配新的策略参数体系
    param_grid = {
        # 触发阈值参数（保持不变）
        'buy_threshold': np.round(np.arange(4.1, 8, 0.1), 2).tolist(),  
        'sell_threshold': np.round(np.arange(1, 4, 0.1), 2).tolist(),  
        
        'initial_buy_ratio': np.round(np.arange(0.001, 0.003, 0.001), 3).tolist(),  
        'initial_sell_ratio': np.round(np.arange(0.00, 0.003, 0.001), 3).tolist(),  
        'buy_base_factor': np.round(np.arange(0.5, 1.5, 0.1), 2).tolist(),          
        'sell_base_factor': np.round(np.arange(0.5,1.5, 0.1), 2).tolist(),     
        
        # 线性模型权重参数
        'buy_spread_factor': np.round(np.arange(0.5, 2, 0.05), 3).tolist(), 
        'sell_spread_factor': np.round(np.arange(0.5, 5, 0.05), 3).tolist(),              
        'buy_etf_factor': np.round(np.arange(-0.005, 0.005, 0.001), 4).tolist(),           
        'sell_etf_factor': np.round(np.arange(-0.005, 0.005, 0.001), 4).tolist(),      
        # 风控参数
        'max_trade_ratio': [0.005, 0.01, 0.05],                                      # 单次交易上限
        
        # 【基于真实数据的风控参数】
        'index_thresholds': unique_generated_thresholds,
        'max_ratios': unique_generated_ratios
    }
    
    seed_params = []
    
    # 【更新】种子参数配置 - 适配新参数体系
    if len(unique_generated_thresholds) >= 1:
        # 选择中等复杂度的阈值组合
        selected_thresholds = unique_generated_thresholds[len(unique_generated_thresholds)//2] if len(unique_generated_thresholds) > 1 else unique_generated_thresholds[0]
        selected_ratios = generate_adaptive_max_ratios(len(selected_thresholds))
        
        seed_params.append({
            # 触发阈值
            'buy_threshold': 6,              # 股债利差触发买入阈值
            'sell_threshold': 3,             # 股债利差触发卖出阈值
            
            # 【新增】初始交易比率
            'initial_buy_ratio': 0.003,        # 初始买入比率 0.3%
            'initial_sell_ratio': 0.003,       # 初始卖出比率 0.3%
            
            # 【更新】调整因子（原base_ratio）
            'buy_base_factor': 1.0,            # 买入基础调整因子
            'sell_base_factor': 1.0,           # 卖出基础调整因子
            
            # 线性模型权重
            'buy_spread_factor': 2,              # 股债利差权重因子
            'sell_etf_factor': 0,              # ETF价格权重因子
             'sell_spread_factor': 5.3,              # 股债利差权重因子
            'buy_etf_factor': 0,  
            # 风控参数
            'max_trade_ratio': 0.05,           # 单次最大交易比例1%
            'index_thresholds': selected_thresholds,
            'max_ratios': selected_ratios
        })
    
    print("\n[种子参数] 更新的种子参数配置（买卖独立因子）:")
    if seed_params:
        seed = seed_params[0]
        print(f"  购买阈值: {seed['buy_threshold']} (股债利差触发)")
        print(f"  卖出阈值: {seed['sell_threshold']}")
        print(f"  初始买入比率: {seed['initial_buy_ratio']:.1%} (新增参数)")
        print(f"  初始卖出比率: {seed['initial_sell_ratio']:.1%} (新增参数)")
        print(f"  买入调整因子: {seed['buy_base_factor']} (原base_ratio)")
        print(f"  卖出调整因子: {seed['sell_base_factor']} (原base_ratio)")
        print(f"  买入利差因子: {seed['buy_spread_factor']} (买卖独立)")
        print(f"  卖出利差因子: {seed['sell_spread_factor']} (买卖独立)")
        print(f"  买入ETF因子: {seed['buy_etf_factor']} (买卖独立)")
        print(f"  卖出ETF因子: {seed['sell_etf_factor']} (买卖独立)")
        print(f"  指数阈值: {seed['index_thresholds']}")
        print(f"  仓位配置: {seed['max_ratios']}")
        print(f"  新公式: 交易比率 = 初始比率 × [基础因子 + (偏离度 × 独立spread_factor) + (etf_price × 独立etf_factor)]")

    # 步骤 3: 初始化优化器
    print("\n[步骤 3/5] 初始化参数优化器...")
    optimizer = Optimizer(data_handler=data_handler, param_grid=param_grid)

    # 步骤 4: 运行遗传算法优化
    print("\n[步骤 4/5] 启动遗传算法...")
    best_params, best_full_metrics = optimizer.run_genetic_algorithm(
        n_generations=GA_PARAMS['n_generations'],
        population_size=GA_PARAMS['population_size'],
        crossover_rate=GA_PARAMS['crossover_rate'],
        mutation_rate=GA_PARAMS['mutation_rate'],
        elitism_size=GA_PARAMS['elitism_size'],
        seed_params=seed_params
    )

    # 步骤 5: 打印最终结果
    print("\n" + "="*50)
    print("      回测优化任务全部完成")
    print("="*50)
    if best_params:
        print("\n[最优参数组合]:")
        # 使用pandas Series打印，格式更美观
        print(pd.Series(best_params))
        
        print("\n[最优参数在2016-01-01至2025-08-01全周期下的表现]:")
        for key, value in best_full_metrics.items():
            if isinstance(value, float):
                # 对百分比和普通浮点数进行格式化
                if 'return' in key or 'drawdown' in key or 'ratio' in key or 'volatility' in key:
                    print(f"  - {key:<30}: {value:.2%}")
                else:
                    print(f"  - {key:<30}: {value:.4f}")
            else:
                print(f"  - {key:<30}: {value}")
    else:
        print("\n本次优化未能找到有效的参数组合。")
    print("\n" + "="*50)
    
    if best_params:
        print("\n[步骤 5/5] 正在使用最优参数生成详细回测报告...")

        # === 新增：为本次回测创建独立结果目录 ===
        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
        result_dir = os.path.join("results", f"backtest_{timestamp}")
        os.makedirs(result_dir, exist_ok=True)

        # 1. 保存最优参数文件和数据分析结果
        params_file = os.path.join(result_dir, "best_parameters.txt")
        with open(params_file, 'w', encoding='utf-8') as f:
            f.write("="*50 + "\n")
            f.write("最优参数组合 - 新策略参数体系\n")
            f.write("="*50 + "\n")
            f.write("策略公式: 交易比率 = 初始比率 × [基础因子 + (偏离度 × spread_factor) + (etf_price × etf_factor)]\n\n")
            for key, value in best_params.items():
                f.write(f"{key}: {value}\n")
            f.write("\n" + "="*50 + "\n")
            f.write("性能指标\n")
            f.write("="*50 + "\n")
            for key, value in best_full_metrics.items():
                if isinstance(value, float):
                    if 'return' in key or 'drawdown' in key or 'ratio' in key or 'volatility' in key:
                        f.write(f"{key}: {value:.2%}\n")
                    else:
                        f.write(f"{key}: {value:.4f}\n")
                else:
                    f.write(f"{key}: {value}\n")
            
            # 添加数据分析结果
            f.write("\n" + "="*50 + "\n")
            f.write("真实数据分布分析\n")
            f.write("="*50 + "\n")
            f.write(f"指数范围: {stats_info['min']:.0f} - {stats_info['max']:.0f}\n")
            f.write(f"均值: {stats_info['mean']:.0f}, 中位数: {stats_info['median']:.0f}\n")
            f.write(f"标准差: {stats_info['std']:.0f}\n")
            f.write(f"25%-75%分位数: {stats_info['p25']:.0f} - {stats_info['p75']:.0f}\n")
            f.write(f"5%-95%分位数: {stats_info['p5']:.0f} - {stats_info['p95']:.0f}\n")
        print(f"最优参数已保存至: {params_file}")

        # 2. 运行多周期回测
        multi_period_results = run_multi_period_backtest(data_handler, best_params)
        
        # 3. 运行最终全周期回测（用于报告生成）
        final_strategy = Strategy(params=best_params)
        final_engine = BacktestEngine(
            start_date='2016-01-01',
            end_date='2025-08-01',
            data_handler=data_handler,
            strategy=final_strategy
        )
        metrics, history_df, trade_records_df = final_engine.run_backtest()

        if not history_df.empty:
            # 4. 计算多周期平均结果（用于报告）
            averaged_multi_period_results = calculate_multi_period_averages(multi_period_results)
            
            # 5. 生成增强版 HTML 报告
            report_generator = Reporter(
                history_df=history_df,
                trade_records=trade_records_df,
                best_params=best_params,
                metrics=metrics,
                multi_period_results=averaged_multi_period_results  # 传递多周期结果
            )
            report_file = os.path.join(result_dir, "new_strategy_backtest_report.html")
            report_generator.generate_report(filename=report_file)

            # 6. 保存每日回测数据
            daily_data_file = os.path.join(result_dir, "daily_backtest_data.csv")
            history_df.to_csv(daily_data_file, encoding='utf-8-sig')
            
            # 7. 保存多周期回测结果
            multi_period_summary_file = os.path.join(result_dir, "multi_period_summary.txt")
            with open(multi_period_summary_file, 'w', encoding='utf-8') as f:
                f.write("多周期回测结果摘要 - 新策略参数体系\n")
                f.write("="*50 + "\n\n")
                
                for period_key, period_data in multi_period_results.items():
                    if 'metrics' in period_data:
                        f.write(f"{period_data['description']}:\n")
                        f.write("-" * 30 + "\n")
                        metrics = period_data['metrics']
                        f.write(f"  总收益率: {metrics.get('total_return', 0):.2%}\n")
                        f.write(f"  年化收益率(CAGR): {metrics.get('annualized_return_cagr', 0):.2%}\n")
                        f.write(f"  最大回撤: {metrics.get('max_drawdown', 0):.2%}\n")
                        f.write(f"  夏普比率: {metrics.get('sharpe_ratio', 0):.3f}\n")
                        f.write(f"  总交易次数: {metrics.get('total_trades', 0)}\n")
                        f.write("\n")

            # 8. 更新说明文档
            desc_file = os.path.join(result_dir, "description.txt")
            with open(desc_file, 'w', encoding='utf-8') as f:
                f.write("新策略参数体系回测说明文档\n")
                f.write("="*50 + "\n")
                f.write(f"执行时间: {datetime.datetime.now()}\n")
                f.write(f"数据文件: {CSV_FILE_PATH}\n")
                f.write(f"遗传算法参数: {GA_PARAMS}\n")
                f.write("策略公式变更: 初始交易比率 × 线性调整因子\n")
                f.write("新增参数: initial_buy_ratio, initial_sell_ratio\n")
                f.write("更新参数: buy_base_factor, sell_base_factor (原buy_base_ratio, sell_base_ratio)\n")
                f.write(f"参数搜索空间 keys: {list(param_grid.keys())}\n")
                f.write(f"最大交易比例: {param_grid['max_trade_ratio']}\n")
                f.write("种子参数: 单一配置，适配新的乘法公式\n")
                f.write("初始交易比率: 0.3% (基础交易规模)\n")
                f.write("调整因子: 1.0 (基础调整倍数)\n")
                f.write("备注: 新策略使用初始比率乘以动态计算的调整因子来确定最终交易量。\n")

            print(f"所有结果已保存至: {result_dir}")
            print(f"报告已适配新的策略参数体系和公式")
        else:
            print("未能生成报告，因为最终回测没有产生有效数据。")

# --- 程序入口 ---
if __name__ == "__main__":
    run_optimization()