
from torch.utils.data import Dataset, DataLoader
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math

class MultiHeadAttention(nn.Module):
    def __init__(self, d_model, n_heads):
        super(MultiHeadAttention, self).__init__()
        self.d_model = d_model
        self.n_heads = n_heads
        self.d_k = d_model // n_heads

        self.q_linear = nn.Linear(d_model, d_model)
        self.v_linear = nn.Linear(d_model, d_model)
        self.k_linear = nn.Linear(d_model, d_model)
        self.fc = nn.Linear(d_model, d_model)

    def forward(self, q, k, v, mask=None):
        bs, len_q, _ = q.size()
        bs, len_k, _ = k.size()
        bs, len_v, _ = v.size()

        q = self.q_linear(q).view(bs, len_q, self.n_heads, self.d_k)
        k = self.k_linear(k).view(bs, len_k, self.n_heads, self.d_k)
        v = self.v_linear(v).view(bs, len_v, self.n_heads, self.d_k)

        q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, self.d_k)
        k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, self.d_k)
        v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, self.d_k)

        scores = torch.matmul(q, k.transpose(1, 2)) / np.sqrt(self.d_k)

        if mask is not None:
            mask = mask.unsqueeze(1).repeat(1, self.n_heads, 1, 1).view(-1, len_q, len_k)
            scores = scores.masked_fill(mask == 0, -1e9)

        attn = nn.Softmax(dim=-1)(scores)
        attn = nn.Dropout(p=0.1)(attn)
        context = torch.matmul(attn, v)
        context = context.view(self.n_heads, bs, len_q, self.d_k).permute(1, 2, 0, 3).contiguous().view(bs, len_q, -1)
        output = self.fc(context)
        return output, attn


class FeedForward(nn.Module):
    def __init__(self, d_model, d_ff):
        super(FeedForward, self).__init__()
        self.d_model = d_model
        self.d_ff = d_ff

        self.fc1 = nn.Linear(d_model, d_ff)
        self.fc2 = nn.Linear(d_ff, d_model)

    def forward(self, x):
        x = nn.ReLU()(self.fc1(x))
        x = nn.Dropout(p=0.1)(x)
        x = self.fc2(x)
        return x



class PositionalEncoding(torch.nn.Module):
    def __init__(self, d_model, max_seq_len=512):
        super(PositionalEncoding, self).__init__()

        # 创建一个位置编码矩阵
        pe = torch.zeros(max_seq_len, d_model)

        # 计算位置编码的值
        for pos in range(max_seq_len):
            for i in range(0, d_model, 2):
                pe[pos, i] = math.sin(pos / (10000 ** ((2 * i) / d_model)))
                pe[pos, i + 1] = math.cos(pos / (10000 ** ((2 * (i + 1)) / d_model)))

        pe = pe.unsqueeze(0)
        self.register_buffer('pe', pe)

    def forward(self, x):
        x = x + self.pe[:, :x.size(1)]
        return x


class TransformerClassifier(nn.Module):
    def __init__(self, d_model=64, n_layers=4, n_heads=4,dropout=0.1, d_ff=128):
        super().__init__()
        self.n_layers = n_layers
        self.d_model = d_model
        self.n_heads = n_heads
        self.d_ff = d_ff
        self.dropout = dropout

        self.input_layer = nn.Linear(1, d_model)
        self.pos_embedding = nn.Embedding(num_embeddings=1000, embedding_dim=d_model)
        self.layers = nn.ModuleList([nn.ModuleList([MultiHeadAttention(d_model=d_model, n_heads=n_heads),
                                                    nn.LayerNorm(normalized_shape=d_model),
                                                    nn.ModuleList([FeedForward(d_model=d_model, d_ff=d_ff),
                                                                   nn.LayerNorm(normalized_shape=d_model)])])
                                     for _ in range(n_layers)])
        self.output_layer = nn.Linear(d_model, 1)

    def forward(self, x):
        bs, seq_len = x.size()

        pos = torch.arange(start=0, end=seq_len, dtype=torch.long, device=x.device).unsqueeze(0).repeat(bs, 1)
        x = x.unsqueeze(-1)
        x = self.input_layer(x)
        x = x + self.pos_embedding(pos)

        mask = torch.tril(torch.ones(seq_len, seq_len, device=x.device)).unsqueeze(0).repeat(bs, 1, 1)

        for i in range(self.n_layers):
            sublayer = self.layers[i][0]
            x_norm = self.layers[i][1]
            ff_layers = self.layers[i][2]

            x_res = x
            x, _ = sublayer(x, x, x, mask)
            x = nn.Dropout(p=self.dropout)(x)
            x = x_norm(x + x_res)

            x_res = x
            x = ff_layers[0](x)
            x = nn.Dropout(p=self.dropout)(x)
            x = ff_layers[1](x + x_res)

        x = self.output_layer(x.mean(dim=1))
        return x.squeeze()


class MyDataset(Dataset):
    def __init__(self, data):
        self.data = data

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        x = torch.tensor(self.data[index][0], dtype=torch.float32)
        y = torch.tensor(self.data[index][1], dtype=torch.float32)
        return x, y

if __name__ == "__main__":
    data = [
        ([1.0, 2.0, 3.0, 4.0, 5.0], 0),
        ([5.0, 4.0, 3.0, 2.0, 1.0], 1),
        ([2.0, 4.0, 6.0, 8.0, 10.0], 0),
        ([10.0, 8.0, 6.0, 4.0, 2.0], 1)
    ]

    # 创建数据集和数据加载器
    dataset = MyDataset(data)
    loader = DataLoader(dataset, batch_size=32, shuffle=True)

    # 创建模型和优化器
    model = TransformerClassifier()
    optimizer = torch.optim.Adam(model.parameters())

    # 训练模型
    for epoch in range(10):
        for x, y in loader:
            optimizer.zero_grad()
            y_pred = model(x)
            loss = nn.BCEWithLogitsLoss()(y_pred, y)
            loss.backward()
            optimizer.step()
        print(f"Epoch {epoch + 1}, Loss: {loss.item():.4f}")
