#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
QTorch量化交易框架 - 机器学习和因子分析演示
展示如何使用QML和QFactor模块进行量化策略研究
"""

import os
import logging
import argparse
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime

from qtorch.ml.qml import QML, QMLFeatureExtractor, QMLModel, QTorchMLStrategy
from qtorch.factor.qfactor import QFactor, Factor, MultiFactorModel
from qtorch.data.aksharestockerfetcher import AKShareStockFetcher
from qtorch.core.qengine import QEngine

# 设置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[logging.StreamHandler()]
)

def load_data(symbol='000001', start_date='2020-01-01', end_date='2022-12-31'):
    """加载市场数据"""
    data_fetcher = AKShareStockFetcher(cache_dir='./data_cache')
    data = data_fetcher.get_market_data(symbol, start_date, end_date)
    
    if data is None or data.empty:
        raise ValueError(f"获取 {symbol} 数据失败")
        
    logging.info(f"成功加载 {symbol} 数据，共 {len(data)} 个交易日")
    return data

def ml_strategy_demo(data, train_ratio=0.7, model_name='random_forest', mode='train'):
    """机器学习策略演示"""
    logging.info("开始机器学习策略演示...")
    
    # 创建输出目录
    os.makedirs('./output/ml', exist_ok=True)
    
    # 初始化QML
    qml = QML()
    
    # 默认特征配置
    features_config = {
        'sma': {'periods': [5, 10, 20, 50]},
        'rsi': {'period': 14},
        'macd': {},
        'volatility': {'period': 20},
        'bollinger': {'period': 20, 'std_dev': 2.0},
        'lagged_features': {'columns': ['close'], 'lags': [1, 2, 3, 5]},
        'time_features': {}
    }
    
    # 提取特征
    feature_extractor = QMLFeatureExtractor()
    features = feature_extractor.extract_features(data, features_config)
    
    # 生成目标变量（未来5日收益率正负）
    future_returns = data['close'].pct_change(5).shift(-5)
    target = pd.Series(np.where(future_returns > 0, 1, 0), index=data.index)
    
    # 准备训练和测试数据
    split_idx = int(len(features) * train_ratio)
    train_features = features.iloc[:split_idx]
    train_target = target.iloc[:split_idx]
    test_features = features.iloc[split_idx:]
    test_target = target.iloc[split_idx:]
    
    logging.info(f"特征数量: {features.shape[1]}，目标变量: {target.name}，训练集: {len(train_features)}，测试集: {len(test_features)}")
    
    # 模型路径
    model_path = './output/ml/ml_strategy_model.pkl'
    
    if mode == 'train' or not os.path.exists(model_path):
        # 训练模型
        logging.info(f"训练 {model_name} 模型...")
        
        # 数据预处理
        preprocessor = qml.preprocessor
        prep_data = preprocessor.prepare_data(train_features, train_target, test_size=0.2)
        
        # 创建并训练模型
        model = qml.create_model(model_type='classifier', model_name=model_name)
        model.fit(prep_data['X_train_scaled'], prep_data['y_train'])
        
        # 评估模型
        train_metrics = model.evaluate(prep_data['X_train_scaled'], prep_data['y_train'])
        val_metrics = model.evaluate(prep_data['X_test_scaled'], prep_data['y_test'])
        
        logging.info(f"训练集评估指标: {train_metrics}")
        logging.info(f"验证集评估指标: {val_metrics}")
        
        # 保存模型
        model.save(model_path)
        logging.info(f"模型已保存至: {model_path}")
        
        # 可视化模型结果
        fig = qml.visualize_results(model, prep_data['X_test_scaled'], prep_data['y_test'])
        plt.savefig('./output/ml/model_evaluation.png')
        plt.close(fig)
    else:
        # 加载已有模型
        logging.info(f"加载预训练模型: {model_path}")
        model = QMLModel.load(model_path)
    
    # 创建基于ML的交易策略
    ml_strategy = QTorchMLStrategy(model_path=model_path, features_config=features_config)
    
    # 在测试集上生成交易信号
    signals = ml_strategy.generate_signals(data.iloc[split_idx:])
    
    # 回测策略
    test_data = data.iloc[split_idx:].copy()
    test_data['signal'] = signals
    test_data['position'] = test_data['signal'].shift(1).fillna(0)
    test_data['returns'] = test_data['close'].pct_change()
    test_data['strategy_returns'] = test_data['position'] * test_data['returns']
    
    # 计算累积收益
    test_data['cum_returns'] = (1 + test_data['returns']).cumprod()
    test_data['cum_strategy_returns'] = (1 + test_data['strategy_returns']).cumprod()
    
    # 绘制策略收益曲线
    plt.figure(figsize=(12, 6))
    plt.plot(test_data.index, test_data['cum_returns'], label='Buy & Hold')
    plt.plot(test_data.index, test_data['cum_strategy_returns'], label='ML策略')
    plt.title('机器学习策略回测结果')
    plt.xlabel('日期')
    plt.ylabel('累积收益')
    plt.legend()
    plt.grid(True)
    plt.savefig('./output/ml/ml_strategy_backtest.png')
    plt.close()
    
    # 计算策略指标
    total_return = test_data['cum_strategy_returns'].iloc[-1] - 1
    annual_return = (1 + total_return) ** (252 / len(test_data)) - 1
    daily_returns = test_data['strategy_returns']
    volatility = daily_returns.std() * np.sqrt(252)
    sharpe_ratio = annual_return / volatility if volatility > 0 else 0
    win_rate = len(daily_returns[daily_returns > 0]) / len(daily_returns[daily_returns != 0])
    
    # 计算最大回撤
    cum_returns = test_data['cum_strategy_returns']
    running_max = cum_returns.cummax()
    drawdown = (cum_returns - running_max) / running_max
    max_drawdown = drawdown.min()
    
    logging.info(f"策略总收益: {total_return:.2%}")
    logging.info(f"策略年化收益: {annual_return:.2%}")
    logging.info(f"策略波动率: {volatility:.2%}")
    logging.info(f"策略夏普比率: {sharpe_ratio:.2f}")
    logging.info(f"策略胜率: {win_rate:.2%}")
    logging.info(f"策略最大回撤: {max_drawdown:.2%}")
    
    # 使用QEngine运行回测
    engine = QEngine()
    engine.add_data(symbol=data.index.name, start_date=test_data.index[0].strftime('%Y-%m-%d'), end_date=test_data.index[-1].strftime('%Y-%m-%d'))
    
    # 创建自定义信号策略
    from qtorch.core.qstrategy import QStrategy
    class PredefinedSignalStrategy(QStrategy):
        def __init__(self, predefined_signals):
            super().__init__()
            self.signals = predefined_signals
            self.current_idx = 0
            
        def next(self):
            if self.current_idx < len(self.signals):
                signal = self.signals[self.current_idx]
                self.current_idx += 1
                return signal
            return 0
            
    # 添加策略
    engine.add_strategy(PredefinedSignalStrategy, predefined_signals=signals)
    
    # 运行回测
    result = engine.run()
    result.plot(output_dir='./output/ml')
    
    return {
        'model': model,
        'strategy': ml_strategy,
        'backtest_data': test_data,
        'metrics': {
            'total_return': total_return,
            'annual_return': annual_return,
            'volatility': volatility,
            'sharpe_ratio': sharpe_ratio,
            'win_rate': win_rate,
            'max_drawdown': max_drawdown
        }
    }

def factor_analysis_demo(data):
    """因子分析演示"""
    logging.info("开始因子分析演示...")
    
    # 创建输出目录
    os.makedirs('./output/factor', exist_ok=True)
    
    # 初始化QFactor
    qfactor = QFactor()
    
    # 计算涨跌幅和收益率
    data['returns'] = data['close'].pct_change()
    future_returns = data['returns'].shift(-1)  # 下一日收益率
    
    # 创建技术分析因子
    factors = qfactor.create_technical_factors(data)
    
    logging.info(f"创建了 {len(factors)} 个技术分析因子")
    
    # 单因子分析
    factor_name = "momentum_1m"
    factor = qfactor.get_factor(factor_name)
    
    if factor is not None and factor.data is not None:
        # 计算IC值
        ic_result = factor.calc_ic(future_returns)
        logging.info(f"因子 {factor_name} 的IC值: {ic_result}")
        
        # 回测
        backtest_result = factor.backtest(future_returns)
        
        # 绘制单因子分析图表
        fig = factor.plot(future_returns)
        plt.savefig(f'./output/factor/{factor_name}_analysis.png')
        plt.close(fig)
        
        # 打印关键回测指标
        if 'annual_long_short' in factor.stats:
            logging.info(f"因子 {factor_name} 多空组合年化收益率: {factor.stats['annual_long_short']:.2%}")
        if 'sharpe_long_short' in factor.stats:
            logging.info(f"因子 {factor_name} 多空组合夏普比率: {factor.stats['sharpe_long_short']:.2f}")
    
    # 多因子相关性分析
    corr_matrix = qfactor.correlation_matrix()
    
    # 绘制相关性矩阵热力图
    fig = qfactor.library.plot_correlation_matrix()
    plt.savefig('./output/factor/factor_correlation.png')
    plt.close(fig)
    
    # 因子IC比较
    try:
        fig = qfactor.library.plot_ic_comparison(future_returns)
        plt.savefig('./output/factor/factor_ic_comparison.png')
        plt.close(fig)
    except Exception as e:
        logging.error(f"绘制IC比较图表失败: {str(e)}")
    
    # 因子收益率比较
    try:
        fig = qfactor.library.plot_returns_comparison(future_returns)
        plt.savefig('./output/factor/factor_returns_comparison.png')
        plt.close(fig)
    except Exception as e:
        logging.error(f"绘制收益率比较图表失败: {str(e)}")
    
    # 创建多因子模型
    model = qfactor.create_model("技术分析多因子模型")
    
    # 选择表现最好的因子
    top_factors = ["momentum_1m", "rsi_14", "ma_cross", "volume_ratio"]
    
    for factor_name in top_factors:
        factor = qfactor.get_factor(factor_name)
        if factor is not None and factor.data is not None:
            model.add_factor(factor)
    
    # 使用等权重方法组合因子
    combined_factor = model.combine_factors(method='equal_weight')
    
    # 回测组合因子
    backtest_result = model.backtest(future_returns)
    
    # 绘制多因子模型分析图表
    fig = model.plot(future_returns)
    plt.savefig('./output/factor/multi_factor_model.png')
    plt.close(fig)
    
    return {
        'factors': factors,
        'model': model,
        'backtest_result': backtest_result
    }

def pytorch_ml_demo(data, epochs=50):
    """PyTorch神经网络模型演示"""
    try:
        import torch
        import torch.nn as nn
    except ImportError:
        logging.warning("PyTorch未安装，跳过PyTorch演示")
        return None
    
    logging.info("开始PyTorch神经网络模型演示...")
    
    # 创建输出目录
    os.makedirs('./output/pytorch', exist_ok=True)
    
    # 初始化QML
    qml = QML()
    
    # 默认特征配置
    features_config = {
        'sma': {'periods': [5, 10, 20, 50]},
        'rsi': {'period': 14},
        'macd': {},
        'volatility': {'period': 20}
    }
    
    # 提取特征
    feature_extractor = QMLFeatureExtractor()
    features = feature_extractor.extract_features(data, features_config)
    
    # 生成目标变量（未来5日收益率）
    target = data['close'].pct_change(5).shift(-5)
    
    # 删除缺失值
    mask = ~(features.isna().any(axis=1) | target.isna())
    features = features[mask]
    target = target[mask]
    
    # 标准化特征
    from sklearn.preprocessing import StandardScaler
    scaler = StandardScaler()
    features_scaled = pd.DataFrame(
        scaler.fit_transform(features),
        index=features.index,
        columns=features.columns
    )
    
    # 划分训练集和测试集
    train_size = int(0.8 * len(features_scaled))
    X_train = features_scaled.iloc[:train_size]
    y_train = target.iloc[:train_size]
    X_test = features_scaled.iloc[train_size:]
    y_test = target.iloc[train_size:]
    
    # 创建PyTorch模型
    from qtorch.ml.qml import QTorchPyTorchModel
    
    input_dim = X_train.shape[1]
    hidden_dims = [64, 32]
    output_dim = 1  # 回归问题
    
    model = QTorchPyTorchModel(
        input_dim=input_dim,
        hidden_dims=hidden_dims,
        output_dim=output_dim,
        dropout=0.2,
        learning_rate=0.001
    )
    
    # 训练模型
    history = model.fit(
        X_train, y_train,
        batch_size=32,
        epochs=epochs,
        validation_split=0.2,
        early_stopping=True,
        patience=10,
        verbose=1
    )
    
    # 绘制损失曲线
    plt.figure(figsize=(10, 6))
    plt.plot(history['train_loss'], label='训练损失')
    if 'val_loss' in history:
        plt.plot(history['val_loss'], label='验证损失')
    plt.title('模型训练损失')
    plt.xlabel('Epoch')
    plt.ylabel('损失')
    plt.legend()
    plt.grid(True)
    plt.savefig('./output/pytorch/training_loss.png')
    plt.close()
    
    # 模型预测
    y_pred = model.predict(X_test)
    
    # 计算均方误差
    from sklearn.metrics import mean_squared_error, r2_score
    mse = mean_squared_error(y_test, y_pred)
    r2 = r2_score(y_test, y_pred)
    
    logging.info(f"测试集MSE: {mse:.6f}")
    logging.info(f"测试集R²: {r2:.4f}")
    
    # 绘制预测结果散点图
    plt.figure(figsize=(10, 6))
    plt.scatter(y_test, y_pred, alpha=0.5)
    plt.plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], 'r--')
    plt.title('预测值 vs 实际值')
    plt.xlabel('实际收益率')
    plt.ylabel('预测收益率')
    plt.grid(True)
    plt.savefig('./output/pytorch/prediction_scatter.png')
    plt.close()
    
    # 创建回测策略
    # 根据预测值生成交易信号
    signals = np.zeros(len(X_test))
    signals[y_pred > 0.01] = 1  # 预测收益率大于1%买入
    signals[y_pred < -0.01] = -1  # 预测收益率小于-1%卖出
    
    # 回测
    test_data = data.loc[X_test.index].copy()
    test_data['signal'] = signals
    test_data['position'] = test_data['signal'].shift(1).fillna(0)
    test_data['returns'] = test_data['close'].pct_change()
    test_data['strategy_returns'] = test_data['position'] * test_data['returns']
    
    # 计算累积收益
    test_data['cum_returns'] = (1 + test_data['returns']).cumprod()
    test_data['cum_strategy_returns'] = (1 + test_data['strategy_returns']).cumprod()
    
    # 绘制策略收益曲线
    plt.figure(figsize=(12, 6))
    plt.plot(test_data.index, test_data['cum_returns'], label='Buy & Hold')
    plt.plot(test_data.index, test_data['cum_strategy_returns'], label='PyTorch策略')
    plt.title('PyTorch神经网络策略回测结果')
    plt.xlabel('日期')
    plt.ylabel('累积收益')
    plt.legend()
    plt.grid(True)
    plt.savefig('./output/pytorch/pytorch_strategy_backtest.png')
    plt.close()
    
    return {
        'model': model,
        'history': history,
        'metrics': {
            'mse': mse,
            'r2': r2
        },
        'backtest_data': test_data
    }

def main():
    """主函数"""
    parser = argparse.ArgumentParser(description='QTorch量化交易框架 - 机器学习和因子分析演示')
    parser.add_argument('--mode', type=str, default='ml', 
                        choices=['ml', 'factor', 'pytorch', 'all'],
                        help='演示模式')
    parser.add_argument('--symbol', type=str, default='000001',
                        help='股票代码')
    parser.add_argument('--start', type=str, default='2018-01-01',
                        help='回测开始日期')
    parser.add_argument('--end', type=str, default='2022-12-31',
                        help='回测结束日期')
    parser.add_argument('--train', action='store_true',
                        help='是否重新训练模型')
    parser.add_argument('--epochs', type=int, default=50,
                        help='训练轮数(PyTorch模式)')
                        
    args = parser.parse_args()
    
    # 创建输出目录
    os.makedirs('./output', exist_ok=True)
    
    # 加载数据
    data = load_data(args.symbol, args.start, args.end)
    
    # 设置索引名称为股票代码，方便后续使用
    data.index.name = args.symbol
    
    try:
        if args.mode == 'ml' or args.mode == 'all':
            ml_strategy_demo(data, mode='train' if args.train else 'test')
            
        if args.mode == 'factor' or args.mode == 'all':
            factor_analysis_demo(data)
            
        if args.mode == 'pytorch' or args.mode == 'all':
            pytorch_ml_demo(data, epochs=args.epochs)
            
        logging.info(f"所有演示任务已完成！结果保存在: ./output")
            
    except Exception as e:
        logging.error(f"演示过程中出错: {str(e)}", exc_info=True)
        raise

if __name__ == '__main__':
    main()