
from torchvision.transforms.functional import gaussian_blur
import torch
from torch import nn

def gaussian_blur_edge(image: torch.Tensor,
                       edge: torch.Tensor, 
                       kernel_size: int=15,
                       sigma: int=5
                       ) -> torch.Tensor:
  """
  该函数将边界模糊化
  """
  # 创建高斯核
  channels = image.size(1)
  kernel = torch.ones(channels, 1, kernel_size, kernel_size)
  kernel = kernel.to(image.device)
  # 应用高斯模糊
  blurred = gaussian_blur(image, kernel_size, sigma)
  # 使用mask混合原始图像和模糊图像
  result = image * (1 - edge) + blurred * edge
  return result


def gaussian_blur_masked(image: torch.Tensor,
                         mask: torch.Tensor, 
                         kernel_size: int=15,
                         sigma: int=5
                         ) -> torch.Tensor:
  mask = mask.to(image)
  pooling = nn.MaxPool2d(kernel_size=kernel_size, stride=1, padding=int((kernel_size - 1)//2)) # erode kernel
  edge = pooling(mask) + pooling(-mask)
  return gaussian_blur_edge(image, edge, kernel_size, sigma)


def smooth_features(features: torch.Tensor, # N x K
                    kernel_weight: list = [1, 4, 6, 4, 1],
                    ) -> torch.Tensor:
  """
  使用滑动窗口和给定的核权重平滑 N x K 的特征张量。

  Args:
      features (torch.Tensor): 输入特征张量，形状为 (N, K)。
      kernel_weight (list, optional): 用于平滑的一维核权重。
                                      默认为 [1, 4, 6, 4, 1]。

  Returns:
      torch.Tensor: 平滑后的特征张量，形状与输入相同 (N, K)。
  """
  import torch.nn.functional as F
  device = features.device
  dtype = features.dtype
  
  # 1. 将核权重转换为张量并归一化
  kernel = torch.tensor(kernel_weight, dtype=dtype, device=device)
  kernel = kernel / kernel.sum()
  kernel_size = len(kernel_weight)
  
  # 2. 计算填充大小
  padding = (kernel_size - 1) // 2
  
  # 3. 重塑特征张量以适应 conv1d (N, K) -> (K, 1, N)
  features_reshaped = features.permute(1, 0).unsqueeze(1) # (K, 1, N)
  
  # 4. 重塑核以适应 conv1d, (Cin, Cout, kernel_size)
  kernel_reshaped = kernel.view(1, 1, kernel_size)

  # 5. 应用一维卷积进行平滑，此时 padding=0
  features_padded = F.pad(features_reshaped, (padding, padding), mode='replicate') # 需复制填充
  smoothed_features = F.conv1d(features_padded, kernel_reshaped, stride=1, padding=0)
  
  # 6. 重塑回原始形状 (K, 1, N) -> (K, N) -> (N, K)
  smoothed_features = smoothed_features.squeeze(1).permute(1, 0)
  
  return smoothed_features
  
  
def create_ema_kernel(alpha, kernel_size, symmetry):
    """
    创建 EMA 样式的卷积核。

    参数：
        alpha (float): EMA 的平滑因子（例如，0.9）。
        kernel_size (int): 卷积核的大小。
        symmetry(bool): 是否对称
    返回：
        torch.Tensor: EMA 样式的卷积核。
    """
    kernel = torch.zeros(kernel_size)
    kernel[kernel_size // 2] = (1-alpha)  # 中心位置为 1

    for i in range(kernel_size // 2 - 1, -1, -1):
        kernel[i] = alpha * kernel[i + 1]
    if symmetry:
        for i in range(kernel_size // 2 + 1, kernel_size, 1):
            kernel[i] = alpha * kernel[i - 1]
        
    return kernel.view(1, 1, kernel_size, 1)

def smooth_tensor_ema(input_tensor, alpha, kernel_size=None, symmetry=True):
    """
    使用 EMA 样式的卷积核平滑张量。

    参数：
        input_tensor (torch.Tensor): 形状为 (B, N, K) 的输入张量。
        alpha (float): EMA 的平滑因子。
        kernel_size (int): 卷积核的大小。

    返回：
        torch.Tensor: 平滑后的张量。
    """
    if kernel_size == None:
        kernel_size = max(int(1 / (1 - alpha)) * 2 + 1, 3) # 最少长度为3

    # 创建 EMA 样式的卷积核
    ema_kernel = create_ema_kernel(alpha, kernel_size, symmetry)
    ema_kernel = ema_kernel / ema_kernel.sum()

    # 创建 Conv2d 层
    conv2d = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=(kernel_size, 1), padding=(kernel_size // 2, 0), bias=False).to(input_tensor.device)

    # 将 EMA 卷积核权重复制到 Conv2d 层
    with torch.no_grad():
        conv2d.weight.copy_(ema_kernel)

    # 转换输入张量形状
    reshaped_tensor = input_tensor.unsqueeze(1)  # 转换为 (B, 1, N, K)

    # 进行卷积操作
    smoothed_tensor = conv2d(reshaped_tensor)

    # 转换输出张量形状
    smoothed_tensor = smoothed_tensor.squeeze(1)  # 转换为 (B, N, K)

    return smoothed_tensor

def accum_conv(input_tensor, coefvec):
    """
    累加卷积核。
    """
    kernel_size = len(coefvec) * 2 + 1
    kernel = torch.zeros(kernel_size)
    for i, c in zip(range(kernel_size // 2, -1, -1), coefvec):
        kernel[i] = c
    conv2d = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=(kernel_size, 1), padding=(kernel_size // 2, 0), bias=False).to(input_tensor.device)
    with torch.no_grad():
        conv2d.weight.copy_(kernel.view(1, 1, -1, 1))
    reshaped_tensor = input_tensor.unsqueeze(1)  # 转换为 (B, 1, N, K)
    conv_tensor = conv2d(reshaped_tensor)
    conv_tensor = conv_tensor.squeeze(1)  # 转换为 (B, N, K)
    return conv_tensor