import math

import torch
from mmcv.runner import OPTIMIZERS
from torch.optim import Optimizer, SGD
from torch.optim.optimizer import _use_grad_for_differentiable
from torch.optim.sgd import sgd

from medfmc.models.stiefel_vit import PolarLinearStiefel



# 极分解函数
# def polar_decomposition(weight):
#     U_s, S, Vh = torch.linalg.svd(weight, full_matrices=False)
#     U = U_s @ Vh
#     P = Vh.T @ torch.diag(S) @ Vh
#     return U, P

# # # 投影到 Stiefel 流形的切空间
def project_to_gl_manifold(U, grad):
    return grad - U @ (U.T @ grad)


# 投影到一般线性群流形的切空间
# def project_to_gl_manifold(weight, grad):
#     """
#     将梯度 grad 投影到一般线性群流形 GL(n, R) 的切空间。
#     weight: 当前权重矩阵，形状为 (768, 3072)
#     grad: 梯度矩阵，形状为 (768, 3072)
#     """
#     # 分块数量
#     num_chunks = grad.shape[1] // weight.shape[0]
    
#     # 将 weight 和 grad 按列分块
#     grad_chunks = torch.chunk(grad, num_chunks, dim=1)
#     weight_chunks = torch.chunk(weight, num_chunks, dim=1)
    
#     # 初始化存储投影梯度的列表
#     projected_grads = []
    
#     for grad_chunk, weight_chunk in zip(grad_chunks, weight_chunks):
#         # 计算对称部分
#         sym_grad = 0.5 * (grad_chunk + grad_chunk.T)
        
#         # 投影到切空间
#         projected_grad = weight_chunk @ sym_grad
#         projected_grads.append(projected_grad)
    
#     # 合并投影结果
#     return torch.cat(projected_grads, dim=1)


# 将矩阵重新收缩到 Stiefel 流形
# def retraction(U):
#     # 将 U 分成三个 768x768 的子矩阵
#     U1, U2, U3, U4 = torch.split(U, 768, dim=1)
    
#     # 分别计算 QR 分解并提取正交矩阵 Q
#     Q1, _ = torch.linalg.qr(U1)
#     Q2, _ = torch.linalg.qr(U2)
#     Q3, _ = torch.linalg.qr(U3)
#     Q4, _ = torch.linalg.qr(U4)
    
#     # 将三个正交矩阵拼接起来
#     Q = torch.cat((Q1, Q2, Q3, Q4), dim=1)
    
#     return Q
# def retraction(weight, block_size=768):
#     """
#     将矩阵按块收缩到一般线性群流形 GL(n, R)。
    
#     参数:
#         weight: 需要收缩的矩阵，大小为 (n, m)。
#         block_size: 每个分块的大小，默认为 768。
    
#     返回:
#         收缩后的矩阵，大小与输入相同。
#     """
#     n, m = weight.shape
    
#     if m % block_size != 0:
#         raise ValueError("Matrix width must be divisible by block_size.")
    
#     num_blocks = m // block_size
#     retracted_blocks = []
    
#     for i in range(num_blocks):
#         weight_block = weight[:, i * block_size:(i + 1) * block_size]
        
#         # 奇异值分解 (SVD)
#         u, s, v = torch.svd(weight_block)
        
#         # 重新构造矩阵
#         retracted_block = u @ torch.diag_embed(torch.exp(torch.log(s))) @ v.T
#         retracted_blocks.append(retracted_block)
    
#     # 拼接所有分块
#     retracted_weight = torch.cat(retracted_blocks, dim=1)
#     return retracted_weight

def retraction(U):
    return U


@OPTIMIZERS.register_module()
class Stiefel_OPT(SGD):
    def __init__(self, params, lr=6e-4, momentum=0, dampening=0,
                 weight_decay=0, nesterov=False, *, maximize: bool = False, foreach=None,
                 differentiable: bool = False):
        # 调用父类的构造函数
        super().__init__(params, lr=lr, momentum=momentum, dampening=dampening,
                         weight_decay=weight_decay, nesterov=nesterov, 
                         maximize=maximize, foreach=foreach, differentiable=differentiable)

    @_use_grad_for_differentiable
    def step(self, closure=None):
        """Performs a single optimization step for Stiefel manifold.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        """
        loss = None
        if closure is not None:
            with torch.enable_grad():
                loss = closure()

        for group in self.param_groups:
            params_with_grad = []
            d_p_list = []
            momentum_buffer_list = []

            has_sparse_grad = self._init_group(group, params_with_grad, d_p_list, momentum_buffer_list)

            sgd(params_with_grad,
                d_p_list,
                momentum_buffer_list,
                weight_decay=group['weight_decay'],
                momentum=group['momentum'],
                lr=group['lr'],
                dampening=group['dampening'],
                nesterov=group['nesterov'],
                maximize=group['maximize'],
                has_sparse_grad=has_sparse_grad,
                foreach=group['foreach'])

            # Update each parameter based on its gradient
            for p, momentum_buffer in zip(params_with_grad, momentum_buffer_list):
                if p.shape==(768,3072):  # 这里我们检查是否有 'U' 参数
                        # 说明这个参数属于 PolarLinearStiefel 类型的层
                        grad_U = p.grad
                        tangent_grad = project_to_gl_manifold(p.data, grad_U)  # 投影到切空间
                        # tangent_grad = project_to_tangent(p.data, grad_U)  # 投影到切空间
                        
                        # 使用梯度更新 U
                        # p.data -= group['lr'] * tangent_grad  # 更新 U
                        p.data = p.data - group['lr'] * tangent_grad  # 更新 U
                        p.data = retraction(p.data)  # 收缩到 Stiefel 流形

                # Update momentum_buffers in state
                state = self.state[p]
                state['momentum_buffer'] = momentum_buffer

        return loss
