"""
深度强化学习组合优化策略
策略逻辑：
1. 使用A2C算法优化多资产组合权重
2. 状态空间包含各资产历史收益率、波动率和相关性
3. 动作空间为资产配置权重(0-1)
4. 奖励函数基于夏普比率
"""

import numpy as np
import pandas as pd
from stable_baselines3 import A2C
from stable_baselines3.common.envs import DummyVecEnv

class PortfolioEnv:
    def __init__(self, returns_data, lookback=60):
        self.returns = returns_data
        self.lookback = lookback
        self.current_step = lookback
        self.n_assets = returns_data.shape[1]
        
    def reset(self):
        self.current_step = self.lookback
        return self._get_state()
        
    def _get_state(self):
        """获取当前状态(标准化历史数据)"""
        window = self.returns[self.current_step-self.lookback:self.current_step]
        means = window.mean(axis=0)
        stds = window.std(axis=0)
        corr = np.corrcoef(window.T)
        return np.concatenate([
            means, stds, corr[np.triu_indices(self.n_assets)]
        ])
        
    def step(self, action):
        """执行动作并返回新状态、奖励、是否终止"""
        weights = np.clip(action, 0, 1)
        weights /= weights.sum()  # 归一化
        
        # 计算组合收益
        current_returns = self.returns[self.current_step]
        portfolio_return = np.dot(weights, current_returns)
        
        # 计算夏普比率(年化)
        window = self.returns[self.current_step-self.lookback:self.current_step]
        portfolio_returns = window @ weights
        sharpe = portfolio_returns.mean() / (portfolio_returns.std() + 1e-6) * np.sqrt(252)
        
        # 移动到下一步
        self.current_step += 1
        done = self.current_step >= len(self.returns) - 1
        
        return self._get_state(), sharpe, done, {}

class DRLPortfolioStrategy:
    def __init__(self, asset_names):
        self.asset_names = asset_names
        self.model = None
        
    def train(self, returns_data):
        """训练深度强化学习模型"""
        env = DummyVecEnv([lambda: PortfolioEnv(returns_data)])
        self.model = A2C("MlpPolicy", env, verbose=1)
        self.model.learn(total_timesteps=10000)
        
    def predict_weights(self, state):
        """预测最优资产权重"""
        if self.model is None:
            return np.ones(len(self.asset_names))/len(self.asset_names)  # 等权重
        weights, _ = self.model.predict(state, deterministic=True)
        return np.clip(weights, 0, 1)

if __name__ == '__main__':
    # 示例用法
    assets = ['SPY', 'GLD', 'TLT', 'QQQ']
    strategy = DRLPortfolioStrategy(assets)
    
    # 模拟收益率数据
    np.random.seed(42)
    returns = np.random.randn(1000, len(assets)) * 0.01
    
    # 训练模型
    strategy.train(returns)
    
    # 测试预测
    test_state = np.random.randn(60 * (2*len(assets) + len(assets)*(len(assets)-1)//2))
    weights = strategy.predict_weights(test_state)
    print("最优资产权重:", dict(zip(assets, weights)))