import torch
import torch.nn as nn
class CrossAttention(nn.Module):
    """交叉注意力模块"""
    def __init__(self, dim, lenth):
        super(CrossAttention, self).__init__()
        self.query_layer = nn.Linear(dim, dim)
        self.key_layer = nn.Linear(lenth, dim)
        self.value_layer = nn.Linear(lenth, dim)
        self.scale = dim ** 0.5

    def forward(self, input1, input2):
        query = self.query_layer(input1)
        key = self.key_layer(input2)
        value = self.value_layer(input2)
        attention_scores = torch.matmul(query, key.transpose(-2, -1)) / self.scale
        attention_weights = torch.softmax(attention_scores, dim=-1)
        attended_output = torch.matmul(attention_weights, value)
        return attended_output