"""
基于强化学习的期权做市策略
策略特点：
1. 使用PPO算法优化报价策略
2. 考虑波动率曲面和希腊值风险
3. 实现动态价差调整
"""

import torch
import torch.nn as nn
import numpy as np
from torch.distributions import Normal
import gym
from gym import spaces

class OptionMarketEnv(gym.Env):
    def __init__(self, num_strikes=5, num_expiries=3):
        super().__init__()
        # 状态空间：各执行价和到期日的买卖盘口+希腊值
        self.observation_space = spaces.Box(
            low=-np.inf, high=np.inf, 
            shape=(num_strikes*num_expiries*8,))
        
        # 动作空间：各合约的买卖报价偏移量
        self.action_space = spaces.Box(
            low=-0.1, high=0.1,
            shape=(num_strikes*num_expiries*2,))
        
        self.num_strikes = num_strikes
        self.num_expiries = num_expiries
        
    def reset(self):
        # 初始化市场状态
        self.state = np.random.randn(
            self.num_strikes*self.num_expiries*8)
        return self.state.copy()
    
    def step(self, action):
        # 执行报价动作
        # 计算PnL和风险指标
        pnl = np.random.randn() * 0.01
        risk_penalty = np.abs(action).mean() * 0.001
        
        # 更新市场状态
        self.state = self.state * 0.9 + np.random.randn(
            self.num_strikes*self.num_expiries*8) * 0.1
        
        reward = pnl - risk_penalty
        done = False
        info = {}
        
        return self.state.copy(), reward, done, info

class PolicyNetwork(nn.Module):
    def __init__(self, input_dim, output_dim):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(input_dim, 256),
            nn.ReLU(),
            nn.Linear(256, 256),
            nn.ReLU(),
            nn.Linear(256, output_dim*2)  # 输出均值和标准差
        )
        
    def forward(self, x):
        params = self.net(x)
        mean = params[..., :output_dim]
        std = torch.exp(params[..., output_dim:])
        return Normal(mean, std)

class OptionMarketMaker:
    def __init__(self):
        self.env = OptionMarketEnv()
        self.policy = PolicyNetwork(
            self.env.observation_space.shape[0],
            self.env.action_space.shape[0])
        self.optimizer = torch.optim.Adam(
            self.policy.parameters(), lr=0.0003)
        
    def train(self, episodes=1000, batch_size=64):
        for episode in range(episodes):
            states = []
            actions = []
            rewards = []
            
            state = self.env.reset()
            done = False
            
            while not done:
                # 选择动作
                dist = self.policy(torch.FloatTensor(state))
                action = dist.sample().numpy()
                
                # 执行动作
                next_state, reward, done, _ = self.env.step(action)
                
                # 存储经验
                states.append(state)
                actions.append(action)
                rewards.append(reward)
                
                state = next_state
                
                # 批量更新
                if len(states) >= batch_size:
                    self._update_policy(states, actions, rewards)
                    states, actions, rewards = [], [], []
                    
    def _update_policy(self, states, actions, rewards):
        states = torch.FloatTensor(np.array(states))
        actions = torch.FloatTensor(np.array(actions))
        rewards = torch.FloatTensor(np.array(rewards))
        
        # 计算优势
        rewards = (rewards - rewards.mean()) / (rewards.std() + 1e-8)
        
        # 计算新旧策略概率比
        old_dist = self.policy(states)
        new_dist = self.policy(states)
        ratio = (new_dist.log_prob(actions) - 
                old_dist.log_prob(actions)).exp()
        
        # PPO损失函数
        clip_epsilon = 0.2
        policy_loss = -torch.min(
            ratio * rewards,
            torch.clamp(ratio, 1-clip_epsilon, 1+clip_epsilon) * rewards
        ).mean()
        
        # 更新策略
        self.optimizer.zero_grad()
        policy_loss.backward()
        self.optimizer.step()
        
    def get_quotes(self, market_state):
        with torch.no_grad():
            dist = self.policy(torch.FloatTensor(market_state))
            return dist.mean.numpy()

if __name__ == '__main__':
    # 示例用法
    market_maker = OptionMarketMaker()
    market_maker.train(episodes=1000)
    
    # 获取报价
    test_state = np.random.randn(5*3*8)
    quotes = market_maker.get_quotes(test_state)
    print("最优报价调整量:", quotes[:5])  # 显示前5个合约