"""
基于强化学习的量化交易策略
策略逻辑：
1. 使用PPO算法训练智能体
2. 状态空间包含价格、技术指标和仓位信息
3. 动作空间包括做多、做空、平仓
4. 奖励函数基于风险调整后的收益
"""

import numpy as np
import torch
import torch.nn as nn
from stable_baselines3 import PPO
from stable_baselines3.common.envs import DummyVecEnv

class TradingEnvironment:
    def __init__(self, prices, features, initial_balance=100000):
        self.prices = prices
        self.features = features
        self.balance = initial_balance
        self.position = 0
        self.current_step = 0
        self.max_steps = len(prices) - 1
        
    def reset(self):
        self.balance = 100000
        self.position = 0
        self.current_step = 0
        return self._get_observation()
        
    def _get_observation(self):
        """获取当前状态观察值"""
        return np.concatenate([
            self.features[self.current_step],
            [self.position / 10]  # 标准化仓位
        ])
        
    def step(self, action):
        """执行动作并返回新状态、奖励、是否终止"""
        done = self.current_step >= self.max_steps
        if done:
            return self._get_observation(), 0, True, {}
            
        # 执行交易动作
        price = self.prices[self.current_step]
        if action == 0:  # 做多
            self.position = 10
        elif action == 1:  # 做空
            self.position = -10
        else:  # 平仓
            self.position = 0
            
        # 移动到下一步
        self.current_step += 1
        new_price = self.prices[self.current_step]
        
        # 计算收益
        pnl = self.position * (new_price - price)
        self.balance += pnl
        
        # 计算奖励(考虑风险调整)
        reward = pnl / (abs(self.position) * price + 1e-5)  # 单位风险收益
        
        return self._get_observation(), reward, self.current_step >= self.max_steps, {}

class RLPolicy(nn.Module):
    """自定义策略网络"""
    def __init__(self, input_dim, output_dim):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(input_dim, 64),
            nn.ReLU(),
            nn.Linear(64, 64),
            nn.ReLU(),
            nn.Linear(64, output_dim)
        )
        
    def forward(self, x):
        return self.net(x)

class RLTradingStrategy:
    def __init__(self, ticker):
        self.ticker = ticker
        self.model = None
        
    def train(self, prices, features):
        """训练强化学习模型"""
        env = DummyVecEnv([lambda: TradingEnvironment(prices, features)])
        
        policy_kwargs = dict(
            features_extractor_class=RLPolicy,
            features_extractor_kwargs=dict(input_dim=features.shape[1]+1, output_dim=64)
        )
        
        self.model = PPO("MlpPolicy", env, policy_kwargs=policy_kwargs, verbose=1)
        self.model.learn(total_timesteps=10000)
        
    def predict(self, observation):
        """预测交易动作"""
        if self.model is None:
            return 2  # 默认平仓
        return self.model.predict(observation, deterministic=True)[0]

if __name__ == '__main__':
    # 示例用法
    strategy = RLTradingStrategy('AAPL')
    
    # 模拟数据
    np.random.seed(42)
    prices = np.cumsum(np.random.randn(1000)) + 100
    features = np.random.randn(1000, 5)  # 5个特征
    
    # 训练模型
    strategy.train(prices, features)
    
    # 测试预测
    test_obs = np.concatenate([features[0], [0]])
    action = strategy.predict(test_obs)
    print(f"预测交易动作: {['做多', '做空', '平仓'][action]}")