import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import einsum
from einops import rearrange, repeat
import math

class MultiHeadAttention(nn.Module):
    # 多头注意力机制
    def __init__(self, dim=512, heads=8, dropout=0.1):
        super(MultiHeadAttention, self).__init__()
        self.heads = heads
        self.scale = dim ** -0.5
        self.to_qkv = nn.Linear(dim, dim * 3, bias=False)
        self.to_out = nn.Sequential(
            nn.Linear(dim, dim),
            nn.Dropout(dropout)
        )

    def forward(self, x, mask=None):
        b, n, _, h = *x.shape, self.heads
        qkv = self.to_qkv(x).chunk(3, dim=-1)
        q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), qkv)

        dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
        if mask is not None:
            mask = mask[:, None, None, :].float()
            dots = dots.masked_fill(mask == 0, -1e9)

        attn = dots.softmax(dim=-1)
        out = einsum('b h i j, b h j d -> b h i d', attn, v)
        out = rearrange(out, 'b h n d -> b n (h d)')
        return self.to_out(out)


class PositionalEncoding(nn.Module):
    # 位置编码 （支持动态序列长度）
    def __init__(self, dim, max_len=100):
        super(PositionalEncoding, self).__init__()
        pe = torch.zeros(max_len, dim)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, dim, 2).float() * (-math.log(10000.0)) / dim)
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        self.register_buffer('pe', pe)

    def forward(self, x):
        return x + self.pe[:x.size(1)]

class TransformerBlock(nn.Module):
    # Transformer块 （编码器）
    def __init__(self, dim=512, heads=8, dropout=0.1):
        super(TransformerBlock, self).__init__()
        self.attn = MultiHeadAttention(dim, heads, dropout)
        self.norm1 = nn.LayerNorm(dim)
        self.ff = nn.Sequential(
            nn.Linear(dim, dim * 4),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Linear(dim * 4, dim)
        )

        self.norm2 = nn.LayerNorm(dim)
        self.dropout = nn.Dropout(dropout)

    def forward(self, x, mask=None):
        x = self.norm1(x + self.attn(x, mask))
        x = self.norm2(x + self.ff(x))
        return self.dropout(x)

# PARSeq 模型
class PARSeq(nn.Module):
    def __init__(self, num_classes, dim=512, depth=6, heads=8, max_seq_len=25):
        super(PARSeq, self).__init__()
        # 图像特征提取
        self.cnn = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.MaxPool2d(3, stride=2, padding=1),
            nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.MaxPool2d(3, stride=2, padding=1),
            nn.Conv2d(128, dim, kernel_size=3, stride=1, padding=1),
            nn.AdaptiveAvgPool2d((1, None))         # [B, C, 1, W]
        )

        # 序列处理
        self.pos_enc = PositionalEncoding(dim, max_seq_len)
        self.transformer = nn.ModuleList([
            TransformerBlock(dim, heads) for _ in range(depth)
        ])

        # 并行解码头
        self.parallel_head = nn.Linear(dim, num_classes)

        # 自回归解码头
        self.autoregressive_head = nn.Linear(dim, num_classes)

        # 初始化
        self._init_weights()

    def _init_weights():
        for p in self.parameters():
            if p.dim() > 1:
                nn.init.xavier_normal_(p)

    def forward(self, x, targets=None):
        # CNN 特征提取
        x = self.cnn(x)                             # [B, C, 1, W]
        x = x.squeeze(2).permute(0, 2, 1)           # [B, W, C]

        # 添加位置编码
        x = self.pos_enc(x)

        # Transformer 处理
        for block in self.transformer:
            x = block(x)

        # 并行解码
        parallel_logits = self.parallel_head(x)

        # 自回归解码
        autoregressive_logits = []
        hidden = x
        for _ in range(max_seq_len):
            hidden = self.transformer[-1](hidden)
            logits = self.autoregressive_head(hidden[:, -1, :])
            autoregressive_logits.append(logits)
            hidden = torch.cat([hidden, logits.unsqueeze(1)], dim=1)
        autoregressive_logits = torch.stack(autoregressive_logits, dim=1)
        return parallel_logits, autoregressive_logits