import torch
import numpy as np
from sklearn.model_selection import train_test_split
from get_quarter import train, get_quarter

class MuonOptimizer(torch.optim.Optimizer):
    """
    Muon Optimizer - 一种前沿的优化算法
    结合了动量、自适应学习率和二阶信息
    """
    def __init__(self, params, lr=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8, weight_decay=0):
        defaults = dict(lr=lr, beta1=beta1, beta2=beta2, epsilon=epsilon, weight_decay=weight_decay)
        super(MuonOptimizer, self).__init__(params, defaults)
    
    def step(self, closure=None):
        loss = None
        if closure is not None:
            loss = closure()
        
        for group in self.param_groups:
            for p in group['params']:
                if p.grad is None:
                    continue
                
                grad = p.grad.data
                if grad.is_sparse:
                    raise RuntimeError("MuonOptimizer does not support sparse gradients")
                
                state = self.state[p]
                
                # 初始化状态
                if len(state) == 0:
                    state['step'] = 0
                    state['exp_avg'] = torch.zeros_like(p.data)
                    state['exp_avg_sq'] = torch.zeros_like(p.data)
                    state['prev_grad'] = torch.zeros_like(p.data)
                
                exp_avg, exp_avg_sq, prev_grad = state['exp_avg'], state['exp_avg_sq'], state['prev_grad']
                beta1, beta2 = group['beta1'], group['beta2']
                
                state['step'] += 1
                
                # 计算梯度变化（二阶信息）
                grad_change = grad - prev_grad
                state['prev_grad'] = grad.clone()
                
                # 更新一阶矩估计
                exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
                
                # 更新二阶矩估计（包含梯度变化信息）
                exp_avg_sq.mul_(beta2).addcmul_(grad_change, grad_change, value=1 - beta2)
                
                # 计算偏差校正
                bias_correction1 = 1 - beta1 ** state['step']
                bias_correction2 = 1 - beta2 ** state['step']
                
                # 计算自适应学习率
                denom = (exp_avg_sq.sqrt() / np.sqrt(bias_correction2)) + group['epsilon']
                
                # 计算步长
                step_size = group['lr'] / bias_correction1
                
                # 应用权重衰减
                if group['weight_decay'] != 0:
                    p.data.add_(p.data, alpha=-group['lr'] * group['weight_decay'])
                
                # 更新参数
                p.data.addcdiv_(exp_avg, denom, value=-step_size)
        
        return loss


class MLPModel(torch.nn.Module):
    def __init__(self, hidden_size, output_dim):
        super(MLPModel, self).__init__()
        # 输入维度固定为5（原始特征+工程特征）
        self.mlp_1 = torch.nn.Linear(5, hidden_size)
        self.mlp_2 = torch.nn.Linear(hidden_size, output_dim)
        self.relu = torch.nn.ReLU()
        self.sigmoid = torch.nn.Sigmoid()

    def forward(self, x):
        # 特征工程：添加极坐标特征
        r = torch.sqrt(x[:, 0]**2 + x[:, 1]**2).unsqueeze(1)
        theta = torch.atan2(x[:, 1], x[:, 0]).unsqueeze(1)
        
        # 添加角度象限特征
        quadrant = torch.zeros_like(theta)
        quadrant[(theta > 0) & (theta < np.pi/2)] = 1  # 第一象限
        quadrant[(theta > np.pi) & (theta < 3*np.pi/2)] = 1  # 第三象限
        
        # 组合原始特征和工程特征
        x_enhanced = torch.cat([x, r, theta, quadrant], dim=1)
        
        # 前向传播
        x = self.mlp_1(x_enhanced)
        x = self.relu(x)
        x = self.mlp_2(x)
        outputs = self.sigmoid(x)
        return outputs


if __name__ == '__main__':
    # 原始输入维度是2，但经过特征工程后变为5
    output_dim = 1
    X, y = get_quarter()
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.33, random_state=42)
    
    # 使用4个神经元的隐藏层
    hidden_size = 4
    learning_rate = 0.05  # Muon通常可以使用更高的学习率

    model = MLPModel(hidden_size, output_dim)
    
    # 使用前沿的Muon优化器
    optimizer = MuonOptimizer(model.parameters(), 
                             lr=learning_rate, 
                             beta1=0.95,  # 更高的动量保留率
                             beta2=0.99,  # 更长的历史记忆
                             epsilon=1e-7,
                             weight_decay=0.001)  # 轻微的正则化
    
    train(X_train, X_test, y_train, y_test, optimizer=optimizer, model=model)

    print("\n------------模型参数------------")
    for k, v in model.state_dict().items():
        print(k, v)
        print()