# ultralytics/nn/modules/ema.py
# import torch.nn as nn
# import torch
# ##ccc
# class EMA(nn.Module):                      # Efficient Multi-scale Attention
#     def __init__(self, c: int, r: int = 8):
#         super().__init__()
#         self.qkv = nn.Conv2d(c, c // r * 3, 1, bias=False)
#         self.proj = nn.Conv2d(c // r, c, 1, bias=False)
#         self.softmax = nn.Softmax(dim=-1)

#     def forward(self, x):
#         b, c, h, w = x.shape
#         q, k, v = self.qkv(x).chunk(3, 1)   # (B,C/r,H,W) ×3
#         q = q.flatten(2).transpose(1, 2)    # (B,HW,C/r)
#         k = k.flatten(2)                    # (B,C/r,HW)
#         attn = self.softmax(q @ k)          # (B,HW,HW)
#         v = v.flatten(2).transpose(1, 2)    # (B,HW,C/r)
#         out = (attn @ v).transpose(1, 2).reshape(b, -1, h, w)
#         return x + self.proj(out)           # residual
