#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
QTorch GPU加速演示脚本
展示QEngine框架如何自动使用GPU加速量化交易回测
"""

import logging
import time
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime
import os
import gc

# 设置日志格式
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)

# 导入QTorch组件
from qtorch.core.qengine import QEngine
from qtorch.strategy.dual_moving_average import DualMovingAverageStrategy
from qtorch.utils.device_utils import get_device, is_gpu_available, memory_safety


@memory_safety(threshold=0.85)
def run_cpu_backtest():
    """使用CPU运行回测"""
    start_time = time.time()
    
    logging.info("===== 使用CPU运行回测 =====")
    
    # 创建引擎实例，强制使用CPU
    engine = QEngine(force_cpu=True)
    
    # 设置回测参数
    engine.set_cash(1000000)  # 设置初始资金
    engine.set_commission(0.0003)  # 设置交易佣金
    
    # 添加数据
    engine.add_data(
        symbol='000001',  # 平安银行
        start_date='2020-01-01',
        end_date='2022-12-31'
    )
    
    # 添加策略
    engine.add_strategy(
        DualMovingAverageStrategy,
        short_window=20,
        long_window=60
    )
    
    # 运行回测
    result = engine.run()
    
    # 计算运行时间
    cpu_time = time.time() - start_time
    logging.info(f"CPU回测运行时间: {cpu_time:.2f}秒")
    
    # 打印回测结果摘要
    result.summary()
    
    # 生成回测图表
    output_dir = "./output/cpu_backtest"
    os.makedirs(output_dir, exist_ok=True)
    result.plot(output_dir=output_dir, save=True)
    
    return result, cpu_time


@memory_safety(threshold=0.85)
def run_gpu_backtest():
    """使用GPU运行回测"""
    start_time = time.time()
    
    logging.info("===== 使用GPU运行回测 =====")
    
    # 检查GPU是否可用
    if is_gpu_available():
        device = get_device()
        logging.info(f"检测到可用设备: {device}")
    else:
        logging.warning("系统上没有检测到可用的GPU，将使用CPU进行计算")
    
    # 创建引擎实例，自动使用GPU（如果可用）
    engine = QEngine()
    
    # 设置回测参数
    engine.set_cash(1000000)  # 设置初始资金
    engine.set_commission(0.0003)  # 设置交易佣金
    
    # 设置GPU相关参数
    engine.set_mixed_precision(True)  # 启用混合精度计算
    engine.set_gpu_batch_size(1024)  # 设置GPU批处理大小
    engine.enable_benchmark_mode(True)  # 启用性能对比模式
    
    # 添加数据
    engine.add_data(
        symbol='000001',  # 平安银行
        start_date='2020-01-01',
        end_date='2022-12-31'
    )
    
    # 添加策略
    engine.add_strategy(
        DualMovingAverageStrategy,
        short_window=20,
        long_window=60
    )
    
    # 运行回测
    result = engine.run()
    
    # 计算运行时间
    gpu_time = time.time() - start_time
    logging.info(f"GPU回测运行时间: {gpu_time:.2f}秒")
    
    # 打印回测结果摘要
    result.summary()
    
    # 打印GPU性能统计
    result.print_performance_stats()
    
    # 生成回测图表
    output_dir = "./output/gpu_backtest"
    os.makedirs(output_dir, exist_ok=True)
    result.plot(output_dir=output_dir, save=True, show_gpu_stats=True)
    
    return result, gpu_time


@memory_safety(threshold=0.85)
def plot_performance_comparison(cpu_result, gpu_result, cpu_time, gpu_time):
    """绘制性能对比图表"""
    # 准备对比数据
    metrics_to_compare = [
        '年化收益率', '最大回撤', '夏普比率', '交易次数'
    ]
    
    cpu_metrics = [cpu_result.metrics.get(m, 0) for m in metrics_to_compare]
    gpu_metrics = [gpu_result.metrics.get(m, 0) for m in metrics_to_compare]
    
    # 创建对比图表
    fig, axes = plt.subplots(2, 1, figsize=(12, 10))
    
    # 1. 运行时间对比
    speedup = cpu_time / gpu_time if gpu_time > 0 else 0
    
    axes[0].bar(['CPU模式', 'GPU加速模式'], [cpu_time, gpu_time], color=['blue', 'green'])
    axes[0].set_title(f'运行时间对比 (加速比: {speedup:.2f}x)')
    axes[0].set_ylabel('运行时间 (秒)')
    axes[0].grid(axis='y', linestyle='--', alpha=0.7)
    
    # 添加数值标签
    for i, v in enumerate([cpu_time, gpu_time]):
        axes[0].text(i, v + 0.1, f"{v:.2f}s", ha='center')
    
    # 2. 回测指标对比
    x = np.arange(len(metrics_to_compare))
    width = 0.35
    
    # 处理百分比显示
    cpu_display = []
    gpu_display = []
    for i, m in enumerate(metrics_to_compare):
        if m in ['年化收益率', '最大回撤']:
            cpu_display.append(cpu_metrics[i] * 100)  # 转为百分比
            gpu_display.append(gpu_metrics[i] * 100)  # 转为百分比
        else:
            cpu_display.append(cpu_metrics[i])
            gpu_display.append(gpu_metrics[i])
    
    bar1 = axes[1].bar(x - width/2, cpu_display, width, label='CPU模式', color='blue')
    bar2 = axes[1].bar(x + width/2, gpu_display, width, label='GPU加速模式', color='green')
    
    axes[1].set_title('回测指标对比')
    axes[1].set_xticks(x)
    axes[1].set_xticklabels(metrics_to_compare)
    axes[1].legend()
    axes[1].grid(axis='y', linestyle='--', alpha=0.7)
    
    # 添加数值标签
    def add_labels(bars):
        for bar in bars:
            height = bar.get_height()
            if height >= 1:
                axes[1].text(bar.get_x() + bar.get_width()/2., height + 0.1,
                        f"{height:.2f}", ha='center', va='bottom')
            else:
                axes[1].text(bar.get_x() + bar.get_width()/2., height + 0.01,
                        f"{height:.4f}", ha='center', va='bottom')
    
    add_labels(bar1)
    add_labels(bar2)
    
    plt.tight_layout()
    
    # 保存图表
    os.makedirs("./output", exist_ok=True)
    output_path = f'./output/qtorch_gpu_comparison_{datetime.now().strftime("%Y%m%d_%H%M%S")}.png'
    plt.savefig(output_path)
    plt.close()
    
    logging.info(f"性能对比图表已保存至: {output_path}")
    return output_path


@memory_safety(threshold=0.85)
def run_multi_assets_gpu_backtest():
    """使用GPU运行多资产回测演示"""
    start_time = time.time()
    
    logging.info("===== 多资产GPU加速回测演示 =====")
    
    # 创建引擎实例
    engine = QEngine()
    
    # 设置回测参数
    engine.set_cash(1000000)  # 设置初始资金
    
    # 添加多个不同资产类型
    # 股票
    engine.add_data(
        symbol='000001',  # 平安银行
        start_date='2020-01-01',
        end_date='2022-12-31',
        asset_type='stock'
    )
    engine.set_commission(0.0003, asset_type='stock')
    
    # 期货（模拟）
    engine.add_data(
        symbol='000016',  # 上证50，这里用作期货模拟
        start_date='2020-01-01',
        end_date='2022-12-31',
        asset_type='futures'
    )
    engine.set_commission(0.00005, asset_type='futures')
    engine.set_margin(0.08, asset_type='futures')
    engine.set_contract_multiplier(300, asset_type='futures')
    
    # 添加策略
    engine.add_strategy(
        DualMovingAverageStrategy,
        short_window=20,
        long_window=60
    )
    
    # 启用性能对比
    engine.enable_benchmark_mode(True)
    
    # 运行回测
    result = engine.run()
    
    # 计算运行时间
    total_time = time.time() - start_time
    logging.info(f"多资产回测完成，耗时: {total_time:.2f}秒")
    
    # 打印回测结果摘要
    result.summary()
    
    # 打印GPU性能统计
    result.print_performance_stats()
    
    # 生成回测图表
    output_dir = "./output/multi_asset_gpu"
    os.makedirs(output_dir, exist_ok=True)
    result.plot(output_dir=output_dir, save=True, show_gpu_stats=True)
    
    logging.info(f"多资产回测报告已保存至: {output_dir}")
    
    return result


@memory_safety(threshold=0.85)
def main():
    """主函数"""
    logging.info("===== QTorch GPU加速演示 =====")
    
    # 运行CPU回测
    cpu_result, cpu_time = run_cpu_backtest()
    
    # 运行GPU加速回测
    gpu_result, gpu_time = run_gpu_backtest()
    
    # 绘制性能对比图
    comparison_chart = plot_performance_comparison(
        cpu_result, gpu_result, cpu_time, gpu_time
    )
    
    # 打印性能加速比
    speedup = cpu_time / gpu_time if gpu_time > 0 else 0
    logging.info(f"\n===== 性能对比 =====")
    logging.info(f"CPU模式运行时间: {cpu_time:.2f}秒")
    logging.info(f"GPU加速模式运行时间: {gpu_time:.2f}秒")
    logging.info(f"加速比: {speedup:.2f}x")
    
    # 运行多资产GPU加速演示
    multi_asset_result = run_multi_assets_gpu_backtest()
    
    logging.info(f"\n===== 演示完成 =====")
    logging.info(f"性能对比图表: {comparison_chart}")
    logging.info(f"CPU回测报告: ./output/cpu_backtest/")
    logging.info(f"GPU回测报告: ./output/gpu_backtest/")
    logging.info(f"多资产GPU回测报告: ./output/multi_asset_gpu/")


def run_memory_stress_test():
    """显存压力测试：测试显存保护机制"""
    import torch
    from qtorch.utils.device_utils import MemoryMonitor, get_device, is_gpu_available
    
    logging.info("===== 显存压力测试 =====")
    
    # 检查GPU是否可用
    if not is_gpu_available():
        logging.warning("系统上没有检测到可用的GPU，无法进行显存压力测试")
        return
    
    device = get_device()
    device_name = "cuda" if str(device).startswith("cuda") else "mps"
    
    # 创建显存监控器
    monitor = MemoryMonitor(device_name=device_name, warning_threshold=0.7, critical_threshold=0.85)
    monitor.start_monitoring()
    
    # 创建张量列表
    tensors = []
    
    try:
        # 记录初始显存使用
        initial_usage = monitor.get_memory_usage()
        logging.info(f"初始显存使用: {initial_usage['current'] / 1024**2:.1f}MB ({initial_usage['current_percent']:.1%})")
        
        # 逐步分配显存，直到达到警告阈值
        allocation_size = 100 * 1024 * 1024  # 100MB
        max_iterations = 20
        
        logging.info(f"开始分配显存，每次 {allocation_size / 1024**2:.1f}MB")
        
        for i in range(max_iterations):
            try:
                # 分配一个大张量
                tensor = torch.zeros(allocation_size // 4, dtype=torch.float32, device=device)
                tensors.append(tensor)
                
                # 获取当前显存使用
                usage = monitor.get_memory_usage()
                logging.info(f"第 {i+1} 次分配后显存使用: {usage['current'] / 1024**2:.1f}MB ({usage['current_percent']:.1%})")
                
                # 如果达到临界阈值，停止分配
                if usage['current_percent'] >= monitor.critical_threshold:
                    logging.info("达到临界阈值，停止分配")
                    break
                
                # 模拟计算
                _ = tensor + 1.0
                
            except Exception as e:
                logging.error(f"显存分配失败: {str(e)}")
                break
        
        # 测试显存自动清理
        logging.info("测试显存自动清理...")
        monitor._handle_critical_memory()
        
        # 获取清理后的显存使用
        final_usage = monitor.get_memory_usage()
        logging.info(f"清理后显存使用: {final_usage['current'] / 1024**2:.1f}MB ({final_usage['current_percent']:.1%})")
        
        # 计算显存回收率
        if initial_usage['current'] < final_usage['current']:
            recovery_rate = (monitor.peak_memory - final_usage['current']) / (monitor.peak_memory - initial_usage['current'])
            logging.info(f"显存回收率: {recovery_rate:.1%}")
        
    finally:
        # 停止监控
        monitor.stop_monitoring()
        # 清理所有张量
        tensors.clear()
        if device_name == "cuda":
            torch.cuda.empty_cache()
        elif device_name == "mps" and hasattr(torch.mps, 'empty_cache'):
            torch.mps.empty_cache()
        gc.collect()
    
    logging.info("显存压力测试完成")


@memory_safety(threshold=0.85)
def run_all_tests():
    """运行所有测试"""
    # 运行CPU回测
    cpu_result, cpu_time = run_cpu_backtest()
    
    # 运行GPU加速回测
    gpu_result, gpu_time = run_gpu_backtest()
    
    # 绘制性能对比图
    comparison_chart = plot_performance_comparison(
        cpu_result, gpu_result, cpu_time, gpu_time
    )
    
    # 运行多资产GPU加速演示
    multi_asset_result = run_multi_assets_gpu_backtest()
    
    # 运行显存压力测试
    run_memory_stress_test()
    
    # 打印性能加速比
    speedup = cpu_time / gpu_time if gpu_time > 0 else 0
    logging.info(f"\n===== 性能对比 =====")
    logging.info(f"CPU模式运行时间: {cpu_time:.2f}秒")
    logging.info(f"GPU加速模式运行时间: {gpu_time:.2f}秒")
    logging.info(f"加速比: {speedup:.2f}x")
    
    logging.info(f"\n===== 测试完成 =====")
    logging.info(f"性能对比图表: {comparison_chart}")
    logging.info(f"CPU回测报告: ./output/cpu_backtest/")
    logging.info(f"GPU回测报告: ./output/gpu_backtest/")
    logging.info(f"多资产GPU回测报告: ./output/multi_asset_gpu/")


if __name__ == "__main__":
    # 可以选择运行所有测试或单独运行main函数
    run_all_tests()
    # 或者只运行原来的main函数
    # main()