import torch
import torch.nn as nn
import torch.optim as optim
from torch.nn import Transformer
from torch.utils.data import DataLoader, Dataset
import math
import torch.nn.functional as F

# 定义数据集类
class MyDataset(Dataset):
    def __init__(self, data):
        self.data = data

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        return self.data[idx]

# 定义生成器（G），即改进的Transformer模型
class Generator(nn.Module):
    def __init__(self, src_vocab, tgt_vocab, d_model=512, nhead=8, num_encoder_layers=6, num_decoder_layers=6,
                 dim_feedforward=2048, dropout=0.1):
        super(Generator, self).__init__()
        self.d_model = d_model
        self.transformer = Transformer(d_model, nhead, num_encoder_layers, num_decoder_layers, dim_feedforward, dropout)
        self.src_embedding = nn.Embedding(src_vocab, d_model)
        self.tgt_embedding = nn.Embedding(tgt_vocab, d_model)
        self.linear = nn.Linear(d_model, tgt_vocab)

    def forward(self, src, tgt):
        src = self.src_embedding(src) * math.sqrt(self.d_model)  # 对源句子进行embedding并乘以sqrt(d_model)
        tgt = self.tgt_embedding(tgt) * math.sqrt(self.d_model)  # 对目标句子进行embedding并乘以sqrt(d_model)
        src = src.transpose(0, 1)  # 将batch size维度放在第二维
        tgt = tgt.transpose(0, 1)  # 将batch size维度放在第二维
        output = self.transformer(src, tgt)  # 将源句子和目标句子输入Transformer模型
        output = output.transpose(0, 1)  # 将batch size维度放回第一维
        return self.linear(output)  # 输出线性变换后的结果

# 定义判别器（D），使用卷积神经网络（CNN）
class Discriminator(nn.Module):
    def __init__(self, vocab_size, d_model=512):
        super(Discriminator, self).__init__()
        self.conv1 = nn.Conv1d(d_model, d_model, kernel_size=3, padding=1)
        self.conv2 = nn.Conv1d(d_model, d_model, kernel_size=3, padding=1)
        self.fc1 = nn.Linear(d_model, d_model)
        self.fc2 = nn.Linear(d_model, 1)

    def forward(self, x):
        x = F.relu(self.conv1(x.transpose(1, 2)))  # 进行第一层卷积
        x = F.relu(self.conv2(x))  # 进行第二层卷积
        x = F.relu(self.fc1(x.mean(dim=2)))  # 平均池化后进行全连接层
        return torch.sigmoid(self.fc2(x))  # 最后一层输出，使用sigmoid激活函数

# 数据准备
src_vocab_size = 10000
tgt_vocab_size = 10000
data = [(torch.randint(0, src_vocab_size, (10,)), torch.randint(0, tgt_vocab_size, (10,))) for _ in range(1000)]
dataset = MyDataset(data)
data_loader = DataLoader(dataset, batch_size=32, shuffle=True)

# 模型初始化
generator = Generator(src_vocab_size, tgt_vocab_size)
discriminator = Discriminator(tgt_vocab_size)

# 损失函数和优化器
criterion = nn.BCELoss()
optimizer_G = optim.Adam(generator.parameters(), lr=0.0001)
optimizer_D = optim.Adam(discriminator.parameters(), lr=0.0001)

# 训练循环
num_epochs = 10
for epoch in range(num_epochs):
    for src, tgt in data_loader:
        # 训练判别器D
        optimizer_D.zero_grad()
        real_labels = torch.ones(src.size(0), 1)  # 真实标签
        fake_labels = torch.zeros(src.size(0), 1)  # 假标签

        outputs = discriminator(tgt)  # 判别真实目标句子
        d_loss_real = criterion(outputs, real_labels)  # 计算真实目标句子的损失
        real_score = outputs

        gen_tgt = generator(src, tgt)  # 生成假目标句子
        outputs = discriminator(gen_tgt.detach())  # 判别假目标句子
        d_loss_fake = criterion(outputs, fake_labels)  # 计算假目标句子的损失
        fake_score = outputs

        d_loss = d_loss_real + d_loss_fake  # 总损失
        d_loss.backward()  # 反向传播
        optimizer_D.step()  # 更新判别器参数

        # 训练生成器G
        optimizer_G.zero_grad()
        gen_tgt = generator(src, tgt)  # 生成假目标句子
        outputs = discriminator(gen_tgt)  # 判别假目标句子
        g_loss = criterion(outputs, real_labels)  # 计算生成器的损失（希望生成器生成的句子被判别器认为是真实的）
        g_loss.backward()  # 反向传播
        optimizer_G.step()  # 更新生成器参数

    # 打印每个epoch的损失值
    print(f'Epoch [{epoch + 1}/{num_epochs}], d_loss: {d_loss.item()}, g_loss: {g_loss.item()}')
