import math

import torch
from mmcv.runner import OPTIMIZERS
from torch.optim import Optimizer, SGD
from torch.optim.optimizer import _use_grad_for_differentiable
from torch.optim.sgd import sgd

from medfmc.models.stiefel_vit import PolarLinearStiefel



# 投影梯度到 Stiefel 流形的切空间
def project_to_tangent_space(Q, G):
    sym_part = 0.5 * (Q.T @ G + G.T @ Q)  # 对称部分
    tangent_grad = G - Q @ sym_part
    return tangent_grad
# def project_to_gl_manifold(U, grad):
#     return grad - U @ (U.T @ grad)


# 将矩阵重新收缩到 Stiefel 流形
# def retraction(U):
#     Q, _ = torch.linalg.qr(U)
#     return Q


def retraction(U):
    return U


@OPTIMIZERS.register_module()
class Stiefel_OPT(SGD):
    def __init__(self, params, lr=6e-4, momentum=0, dampening=0,
                 weight_decay=0, nesterov=False, *, maximize: bool = False, foreach=None,
                 differentiable: bool = False):
        # 调用父类的构造函数
        super().__init__(params, lr=lr, momentum=momentum, dampening=dampening,
                         weight_decay=weight_decay, nesterov=nesterov, 
                         maximize=maximize, foreach=foreach, differentiable=differentiable)

    @_use_grad_for_differentiable
    def step(self, closure=None):
        """Performs a single optimization step for Stiefel manifold.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        """
        loss = None
        if closure is not None:
            with torch.enable_grad():
                loss = closure()

        for group in self.param_groups:
            params_with_grad = []
            d_p_list = []
            momentum_buffer_list = []

            has_sparse_grad = self._init_group(group, params_with_grad, d_p_list, momentum_buffer_list)

            sgd(params_with_grad,
                d_p_list,
                momentum_buffer_list,
                weight_decay=group['weight_decay'],
                momentum=group['momentum'],
                lr=group['lr'],
                dampening=group['dampening'],
                nesterov=group['nesterov'],
                maximize=group['maximize'],
                has_sparse_grad=has_sparse_grad,
                foreach=group['foreach'])

            # Update each parameter based on its gradient
            for p, momentum_buffer in zip(params_with_grad, momentum_buffer_list):
                # Update momentum_buffers in state
                state = self.state[p]
                state['momentum_buffer'] = momentum_buffer
        return loss
