"""
基于强化学习的期权定价策略
策略特点：
1. 使用PPO算法优化期权定价模型
2. 动态调整定价参数适应市场变化
3. 支持美式和欧式期权定价
"""

import torch
import numpy as np
from stable_baselines3 import PPO
from stable_baselines3.common.envs import DummyVecEnv

class OptionPricingEnv:
    def __init__(self, underlying_prices, risk_free=0.02, max_steps=100):
        """
        underlying_prices: 标的资产历史价格
        risk_free: 无风险利率
        max_steps: 最大步数
        """
        self.prices = underlying_prices
        self.risk_free = risk_free
        self.max_steps = max_steps
        self.current_step = 0
        self.current_price = underlying_prices[0]
        
        # 状态空间: [当前价格, 执行价, 剩余时间, 波动率, 无风险利率]
        self.state_dim = 5
        
    def reset(self):
        """重置环境"""
        self.current_step = 0
        self.current_price = self.prices[0]
        return self._get_state()
        
    def _get_state(self):
        """获取当前状态"""
        remaining_time = (len(self.prices) - self.current_step) / 252  # 年化
        volatility = np.std(self.prices[:self.current_step+1]) if self.current_step > 0 else 0.2
        strike_price = self.current_price * np.random.uniform(0.9, 1.1)  # 随机执行价
        
        return np.array([
            self.current_price,
            strike_price,
            remaining_time,
            volatility,
            self.risk_free
        ])
        
    def step(self, action):
        """
        action: [定价偏差系数, 波动率调整系数]
        """
        # 解析动作
        price_bias = action[0] * 0.1  # 限制调整幅度
        vol_adjust = 1.0 + action[1] * 0.05
        
        # 计算期权理论价 (简化BS模型)
        S, K, T, sigma, r = self._get_state()
        d1 = (np.log(S/K) + (r + 0.5*sigma**2)*T) / (sigma*np.sqrt(T))
        d2 = d1 - sigma*np.sqrt(T)
        call_price = S * norm.cdf(d1) - K*np.exp(-r*T)*norm.cdf(d2)
        
        # 应用策略调整
        adjusted_price = call_price * (1 + price_bias)
        adjusted_vol = sigma * vol_adjust
        
        # 移动到下一步
        self.current_step += 1
        self.current_price = self.prices[self.current_step]
        
        # 计算奖励 (基于定价误差和市场反馈)
        market_price = call_price * np.random.uniform(0.95, 1.05)  # 模拟市场价格
        price_error = abs(adjusted_price - market_price)
        reward = -price_error  # 最小化定价误差
        
        # 检查终止条件
        done = self.current_step >= self.max_steps - 1
        
        return self._get_state(), reward, done, {
            'theoretical_price': call_price,
            'adjusted_price': adjusted_price,
            'market_price': market_price
        }

class RLOptionPricing:
    def __init__(self):
        self.model = None
        
    def train(self, underlying_prices, total_timesteps=50000):
        """训练定价策略"""
        env = DummyVecEnv([lambda: OptionPricingEnv(underlying_prices)])
        self.model = PPO("MlpPolicy", env, verbose=1)
        self.model.learn(total_timesteps=total_timesteps)
        
    def price_option(self, state):
        """为期权定价"""
        if self.model is None:
            S, K, T, sigma, r = state
            d1 = (np.log(S/K) + (r + 0.5*sigma**2)*T) / (sigma*np.sqrt(T))
            d2 = d1 - sigma*np.sqrt(T)
            return S * norm.cdf(d1) - K*np.exp(-r*T)*norm.cdf(d2)
            
        action, _ = self.model.predict(state, deterministic=True)
        S, K, T, sigma, r = state
        d1 = (np.log(S/K) + (r + 0.5*sigma**2)*T) / (sigma*np.sqrt(T))
        d2 = d1 - sigma*np.sqrt(T)
        call_price = S * norm.cdf(d1) - K*np.exp(-r*T)*norm.cdf(d2)
        return call_price * (1 + action[0]*0.1)

if __name__ == '__main__':
    # 示例用法
    np.random.seed(42)
    prices = np.cumprod(1 + np.random.randn(1000)*0.01) * 100  # 模拟价格路径
    
    strategy = RLOptionPricing()
    strategy.train(prices)
    
    # 测试定价
    test_state = [100, 105, 0.5, 0.2, 0.02]  # [现价, 执行价, 剩余时间, 波动率, 无风险利率]
    print("RL定价:", strategy.price_option(test_state))
    
    # 对比BS模型
    S, K, T, sigma, r = test_state
    d1 = (np.log(S/K) + (r + 0.5*sigma**2)*T) / (sigma*np.sqrt(T))
    d2 = d1 - sigma*np.sqrt(T)
    bs_price = S * norm.cdf(d1) - K*np.exp(-r*T)*norm.cdf(d2)
    print("BS模型定价:", bs_price)