"""
基于生成对抗网络的市场模拟策略
策略特点：
1. 使用WGAN-GP生成逼真的市场数据
2. 支持多资产联合分布建模
3. 实现条件生成和异常检测
"""

import torch
import torch.nn as nn
import numpy as np
from torch.autograd import grad

class Generator(nn.Module):
    """生成器网络"""
    def __init__(self, latent_dim=64, output_dim=10, condition_dim=5):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(latent_dim + condition_dim, 128),
            nn.LeakyReLU(0.2),
            nn.Linear(128, 256),
            nn.LeakyReLU(0.2),
            nn.Linear(256, 512),
            nn.LeakyReLU(0.2),
            nn.Linear(512, output_dim),
            nn.Tanh()  # 输出归一化到[-1,1]
        )
        
    def forward(self, z, c):
        """z: 噪声向量, c: 条件向量"""
        x = torch.cat([z, c], dim=1)
        return self.net(x)

class Discriminator(nn.Module):
    """判别器网络"""
    def __init__(self, input_dim=10, condition_dim=5):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(input_dim + condition_dim, 512),
            nn.LeakyReLU(0.2),
            nn.Linear(512, 256),
            nn.LeakyReLU(0.2),
            nn.Linear(256, 128),
            nn.LeakyReLU(0.2),
            nn.Linear(128, 1)
        )
        
    def forward(self, x, c):
        x = torch.cat([x, c], dim=1)
        return self.net(x)

class MarketGAN:
    def __init__(self, asset_names, lookback=20):
        """
        asset_names: 资产名称列表
        lookback: 历史窗口长度
        """
        self.asset_names = asset_names
        self.num_assets = len(asset_names)
        self.lookback = lookback
        self.generator = Generator(output_dim=num_assets)
        self.discriminator = Discriminator(input_dim=num_assets)
        self.optimizer_G = torch.optim.Adam(self.generator.parameters(), lr=0.0001)
        self.optimizer_D = torch.optim.Adam(self.discriminator.parameters(), lr=0.0001)
        
    def _compute_gradient_penalty(self, real_samples, fake_samples, conditions):
        """计算梯度惩罚项(WGAN-GP)"""
        alpha = torch.rand(real_samples.size(0), 1)
        interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples)).requires_grad_(True)
        d_interpolates = self.discriminator(interpolates, conditions)
        
        gradients = grad(outputs=d_interpolates, inputs=interpolates,
                        grad_outputs=torch.ones_like(d_interpolates),
                        create_graph=True, retain_graph=True)[0]
        
        gradients = gradients.view(gradients.size(0), -1)
        gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
        return gradient_penalty
        
    def train(self, dataloader, epochs=100, n_critic=5, lambda_gp=10):
        """训练GAN"""
        for epoch in range(epochs):
            for i, (real_returns, conditions) in enumerate(dataloader):
                
                # 训练判别器
                self.optimizer_D.zero_grad()
                
                # 生成假样本
                z = torch.randn(real_returns.size(0), 64)
                fake_returns = self.generator(z, conditions)
                
                # 计算判别器损失
                real_validity = self.discriminator(real_returns, conditions)
                fake_validity = self.discriminator(fake_returns.detach(), conditions)
                gradient_penalty = self._compute_gradient_penalty(
                    real_returns.data, fake_returns.data, conditions.data)
                
                d_loss = -torch.mean(real_validity) + torch.mean(fake_validity) + lambda_gp * gradient_penalty
                d_loss.backward()
                self.optimizer_D.step()
                
                # 每n_critic次训练一次生成器
                if i % n_critic == 0:
                    self.optimizer_G.zero_grad()
                    
                    # 生成假样本
                    z = torch.randn(real_returns.size(0), 64)
                    fake_returns = self.generator(z, conditions)
                    
                    # 计算生成器损失
                    g_loss = -torch.mean(self.discriminator(fake_returns, conditions))
                    g_loss.backward()
                    self.optimizer_G.step()
                    
    def generate_samples(self, condition, num_samples=100):
        """生成模拟市场数据"""
        z = torch.randn(num_samples, 64)
        c = torch.FloatTensor(condition).repeat(num_samples, 1)
        with torch.no_grad():
            samples = self.generator(z, c).numpy()
        return samples
        
    def detect_anomalies(self, real_data, condition, threshold=2.5):
        """检测市场异常"""
        real_scores = self.discriminator(real_data, condition)
        fake_data = self.generate_samples(condition, len(real_data))
        fake_scores = self.discriminator(fake_data, condition)
        
        mean_score = fake_scores.mean()
        std_score = fake_scores.std()
        
        anomalies = real_scores < (mean_score - threshold * std_score)
        return anomalies

if __name__ == '__main__':
    # 示例用法
    assets = ['SPY', 'QQQ', 'GLD', 'TLT', 'IWM']
    gan = MarketGAN(assets)
    
    # 模拟训练数据
    np.random.seed(42)
    returns = np.random.randn(1000, len(assets)) * 0.01
    conditions = np.random.randn(1000, 5)  # 市场状态条件
    
    # 创建数据加载器
    dataset = torch.utils.data.TensorDataset(
        torch.FloatTensor(returns),
        torch.FloatTensor(conditions))
    dataloader = torch.utils.data.DataLoader(dataset, batch_size=32, shuffle=True)
    
    # 训练GAN
    gan.train(dataloader)
    
    # 生成样本
    test_condition = np.random.randn(5)
    samples = gan.generate_samples(test_condition)
    print("生成样本:", samples[:5])
    
    # 异常检测
    test_data = torch.FloatTensor(np.random.randn(10, len(assets)) * 0.01)
    anomalies = gan.detect_anomalies(test_data, test_condition)
    print("异常点:", anomalies)