import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import random
from collections import deque
import copy

# ===================== 神经网络定义 =====================
class Actor(nn.Module):
    """策略网络：输入状态，输出各品种仓位调整动作 (-1到1)"""
    def __init__(self, state_dim, action_dim, hidden_dim=256):
        super(Actor, self).__init__()
        self.net = nn.Sequential(
            nn.Linear(state_dim, hidden_dim),
            nn.ReLU(),
            nn.LayerNorm(hidden_dim),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, action_dim),
            nn.Tanh()  # 输出在[-1,1]范围
        )
    
    def forward(self, state):
        return self.net(state)

class Critic(nn.Module):
    """价值网络：输入状态和所有智能体的动作，输出Q值"""
    def __init__(self, state_dim, action_dim, num_agents, hidden_dim=512):
        super(Critic, self).__init__()
        total_actions = action_dim * num_agents
        self.net = nn.Sequential(
            nn.Linear(state_dim + total_actions, hidden_dim),
            nn.ReLU(),
            nn.LayerNorm(hidden_dim),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1)
        )
    
    def forward(self, state, actions):
        # 拼接所有智能体动作
        actions = actions.view(actions.size(0), -1)
        x = torch.cat([state, actions], dim=1)
        return self.net(x)

# ===================== 智能体定义 =====================
class DQMADDPGAgent:
    def __init__(self, config):
        self.config = config
        self.state_dim = config['state_dim']
        self.action_dim = config['action_dim']
        self.num_agents = config['num_agents']
        
        # 创建Actor和Critic网络
        self.actor = Actor(self.state_dim, self.action_dim)
        self.actor_target = copy.deepcopy(self.actor)
        self.critic1 = Critic(self.state_dim, self.action_dim, self.num_agents)
        self.critic2 = Critic(self.state_dim, self.action_dim, self.num_agents)
        self.critic1_target = copy.deepcopy(self.critic1)
        self.critic2_target = copy.deepcopy(self.critic2)
        
        # 优化器
        self.actor_optim = optim.Adam(self.actor.parameters(), lr=config['actor_lr'])
        self.critic1_optim = optim.Adam(self.critic1.parameters(), lr=config['critic_lr'])
        self.critic2_optim = optim.Adam(self.critic2.parameters(), lr=config['critic_lr'])
        
        # 经验回放池
        self.replay_buffer = deque(maxlen=config['buffer_size'])
        
        # 噪声生成器 (Ornstein-Uhlenbeck过程)
        self.noise = OUNoise(self.action_dim, sigma=config['noise_sigma'])
        
        # 风险参数
        self.risk_lambda = config['initial_risk_lambda']

    def act(self, state, explore=True):
        """根据状态生成动作，添加探索噪声"""
        state = torch.FloatTensor(state).unsqueeze(0)
        with torch.no_grad():
            action = self.actor(state).squeeze(0).numpy()
        
        if explore:
            noise = self.noise.sample()
            action = np.clip(action + noise, -1, 1)
        return action

    def update_risk_lambda(self, volatility, volatility_percentile):
        """动态调整风险系数：当波动率超过历史90%分位数时增加风险厌恶"""
        if volatility > volatility_percentile:
            self.risk_lambda = min(self.risk_lambda * 1.5, 0.7)  # 上限0.7
        else:
            self.risk_lambda = max(self.risk_lambda * 0.9, 0.3)  # 下限0.3

    def update(self, agents):
        """使用Daul-Q机制更新网络"""
        if len(self.replay_buffer) < self.config['batch_size']:
            return
        
        # 从回放池采样
        batch = random.sample(self.replay_buffer, self.config['batch_size'])
        states, actions, rewards, next_states, dones = zip(*batch)
        
        # 转换为张量
        states = torch.FloatTensor(states)
        actions = torch.FloatTensor(actions)
        rewards = torch.FloatTensor(rewards).unsqueeze(1)
        next_states = torch.FloatTensor(next_states)
        dones = torch.FloatTensor(dones).unsqueeze(1)
        
        # ========== Critic更新 ==========
        with torch.no_grad():
            # 所有智能体的下一个动作
            next_actions = []
            for i, agent in enumerate(agents):
                next_actions.append(agent.actor_target(next_states[:, i*self.state_dim:(i+1)*self.state_dim]))
            next_actions = torch.cat(next_actions, dim=1)
            
            # 计算目标Q值 (Daul-Q机制)
            target_Q1 = self.critic1_target(next_states, next_actions)
            target_Q2 = self.critic2_target(next_states, next_actions)
            target_Q = rewards + self.config['gamma'] * (1 - dones) * torch.min(target_Q1, target_Q2)
        
        # 计算当前Q值
        current_Q1 = self.critic1(states, actions)
        current_Q2 = self.critic2(states, actions)
        
        # Critic损失
        critic1_loss = nn.MSELoss()(current_Q1, target_Q)
        critic2_loss = nn.MSELoss()(current_Q2, target_Q)
        
        # 优化Critic
        self.critic1_optim.zero_grad()
        critic1_loss.backward()
        self.critic1_optim.step()
        
        self.critic2_optim.zero_grad()
        critic2_loss.backward()
        self.critic2_optim.step()
        
        # ========== Actor更新 ==========
        # 冻结Critic参数
        for param in self.critic1.parameters():
            param.requires_grad = False
        for param in self.critic2.parameters():
            param.requires_grad = False
        
        # 计算策略梯度
        actor_loss = -self.critic1(states, actions).mean()
        
        # 优化Actor
        self.actor_optim.zero_grad()
        actor_loss.backward()
        self.actor_optim.step()
        
        # 解冻Critic参数
        for param in self.critic1.parameters():
            param.requires_grad = True
        for param in self.critic2.parameters():
            param.requires_grad = True
        
        # ========== 目标网络软更新 ==========
        self.soft_update(self.actor, self.actor_target, self.config['tau'])
        self.soft_update(self.critic1, self.critic1_target, self.config['tau'])
        self.soft_update(self.critic2, self.critic2_target, self.config['tau'])
        
        return critic1_loss.item(), critic2_loss.item(), actor_loss.item()

    def soft_update(self, local_model, target_model, tau):
        """软更新目标网络"""
        for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
            target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)

# ===================== 噪声生成器 =====================
class OUNoise:
    """Ornstein-Uhlenbeck过程生成噪声"""
    def __init__(self, action_dim, mu=0, theta=0.15, sigma=0.2):
        self.mu = mu * np.ones(action_dim)
        self.theta = theta
        self.sigma = sigma
        self.reset()
    
    def reset(self):
        self.state = copy.copy(self.mu)
    
    def sample(self):
        dx = self.theta * (self.mu - self.state) + self.sigma * np.random.randn(len(self.state))
        self.state += dx
        return self.state

# ===================== 期货交易环境 =====================
class FuturesTradingEnv:
    def __init__(self, data, num_assets, initial_balance=1000000):
        self.data = data  # 格式: [时间步, 资产, 特征]
        self.num_assets = num_assets
        self.initial_balance = initial_balance
        self.reset()
        
    def reset(self):
        self.current_step = 0
        self.balance = self.initial_balance
        self.positions = np.zeros(self.num_assets)
        self.portfolio_value = [self.initial_balance]
        self.done = False
        return self._get_state()
    
    def _get_state(self):
        """获取当前状态：市场数据+账户状态"""
        market_state = self.data[self.current_step].flatten()
        account_state = np.array([self.balance] + list(self.positions))
        return np.concatenate([market_state, account_state])
    
    def step(self, actions):
        """执行交易动作"""
        if self.done:
            raise Exception("环境已结束，请重置")
        
        # 保存当前投资组合价值
        prev_value = self.portfolio_value[-1]
        
        # 执行交易 (简化版，忽略交易费用)
        prices = self.data[self.current_step, :, 0]  # 假设第0列是价格
        for i, action in enumerate(actions):
            # 计算仓位变化 (action范围[-1,1])
            position_change = action * 10  # 放大仓位变化
            
            # 更新仓位
            self.positions[i] += position_change
            
            # 更新余额 (假设立即成交)
            self.balance -= position_change * prices[i]
        
        # 更新投资组合价值
        current_value = self.balance + np.sum(self.positions * prices)
        self.portfolio_value.append(current_value)
        
        # 计算回报 (风险感知奖励函数)
        returns = (current_value - prev_value) / prev_value
        sharpe_ratio = returns / (np.std(self.portfolio_value[-20:]) + 1e-5)  # 夏普比率
        drawdown = max(0, (max(self.portfolio_value) - current_value) / max(self.portfolio_value)  # 回撤
        reward = sharpe_ratio - self.risk_lambda * drawdown
        
        # 移动到下一步
        self.current_step += 1
        self.done = (self.current_step >= len(self.data) - 1)
        
        # 获取新状态
        next_state = self._get_state()
        
        return next_state, reward, self.done, {}

# ===================== 风险控制模块 =====================
class RiskControlModule:
    def __init__(self, max_drawdown=0.2, risk_threshold=0.7):
        self.max_drawdown = max_drawdown
        self.risk_threshold = risk_threshold
        self.alert_levels = {1: "短信预警", 2: "弹窗警告", 3: "人工干预"}
    
    def validate_actions(self, actions, portfolio_value, positions):
        """验证动作是否通过风控"""
        # CVaR风险控制 (简化实现)
        cvar = self.calculate_cvar(portfolio_value)
        if cvar > self.max_drawdown:
            return self.apply_cvar_constraint(actions, positions, cvar)
        return actions
    
    def calculate_cvar(self, values, alpha=0.95):
        """计算条件风险价值 (CVaR)"""
        returns = np.diff(values)
        sorted_returns = np.sort(returns)
        index = int(alpha * len(sorted_returns))
        return -np.mean(sorted_returns[:index])
    
    def apply_cvar_constraint(self, actions, positions, cvar):
        """应用CVaR约束：减少高风险仓位"""
        constrained_actions = []
        for i, (action, position) in enumerate(zip(actions, positions)):
            # 如果仓位风险过高，减少仓位
            risk_factor = min(1.0, self.max_drawdown / (cvar + 1e-5))
            constrained_actions.append(action * risk_factor)
        return constrained_actions
    
    def check_account_risk(self, drawdown):
        """检查账户风险水平并触发警报"""
        risk_level = drawdown / self.max_drawdown
        if risk_level > self.risk_threshold:
            alert_level = min(3, int(risk_level * 3))
            print(f"触发风控等级{alert_level}: {self.alert_levels[alert_level]}")
            return True
        return False

# ===================== 训练主循环 =====================
def train_dq_maddpg(config, data):
    # 初始化环境
    env = FuturesTradingEnv(data, config['num_assets'])
    risk_control = RiskControlModule()
    
    # 初始化智能体
    agents = [DQMADDPGAgent(config) for _ in range(config['num_agents'])]
    
    # 训练统计
    episode_rewards = []
    
    for episode in range(config['episodes']):
        states = env.reset()
        episode_reward = 0
        done = False
        
        while not done:
            # 每个智能体选择动作
            actions = []
            for i, agent in enumerate(agents):
                # 获取智能体对应的状态切片
                agent_state = states[i*config['state_dim']:(i+1)*config['state_dim']]
                action = agent.act(agent_state)
                actions.append(action)
            
            # 通过风控模块
            actions = risk_control.validate_actions(
                actions, 
                env.portfolio_value,
                env.positions
            )
            
            # 执行动作
            next_states, reward, done, _ = env.step(actions)
            episode_reward += reward
            
            # 存储经验
            for i, agent in enumerate(agents):
                agent_state = states[i*config['state_dim']:(i+1)*config['state_dim']]
                agent_action = actions[i]
                agent_next_state = next_states[i*config['state_dim']:(i+1)*config['state_dim']]
                
                agent.replay_buffer.append((
                    agent_state, 
                    agent_action, 
                    reward, 
                    agent_next_state, 
                    done
                ))
            
            # 更新智能体
            for agent in agents:
                agent.update(agents)
            
            # 更新状态
            states = next_states
            
            # 风险监控
            current_drawdown = (max(env.portfolio_value) - env.portfolio_value[-1]) / max(env.portfolio_value)
            if risk_control.check_account_risk(current_drawdown):
                # 触发风控时动态调整风险参数
                for agent in agents:
                    # 使用历史波动率90分位数 (简化处理)
                    agent.update_risk_lambda(current_drawdown, 0.9)
        
        episode_rewards.append(episode_reward)
        print(f"Episode {episode+1}/{config['episodes']} | Reward: {episode_reward:.2f} | Portfolio: {env.portfolio_value[-1]:.2f}")
    
    return agents, episode_rewards

# ===================== 配置参数 =====================
if __name__ == "__main__":
    # 实验配置 (根据实际期货品种调整)
    config = {
        'state_dim': 20,       # 每个品种的状态维度 (OHLC+技术指标+账户状态)
        'action_dim': 1,        # 每个品种的动作维度 (仓位调整)
        'num_agents': 3,        # 品种数量 (铜、原油、黄金)
        'num_assets': 3,        # 交易品种数量
        'gamma': 0.95,          # 折扣因子
        'tau': 0.01,            # 目标网络软更新系数
        'actor_lr': 1e-4,       # Actor学习率
        'critic_lr': 1e-3,      # Critic学习率
        'buffer_size': 1000000,  # 经验回放池大小
        'batch_size': 1024,     # 训练批量大小
        'episodes': 500,        # 训练轮数
        'noise_sigma': 0.2,     # 动作噪声强度
        'initial_risk_lambda': 0.5  # 初始风险系数
    }
    
    # 加载期货数据 (示例)
    # data = load_futures_data('copper,crude,gold', '2020-01-01', '2023-12-31')
    # 实际应用中需实现数据加载函数
    
    # 训练智能体
    trained_agents, rewards = train_dq_maddpg(config, data)
    
    # 保存模型
    for i, agent in enumerate(trained_agents):
        torch.save(agent.actor.state_dict(), f'futures_agent_{i}.pth')