import math
import random


# 随机生成中文句子
def generate_chinese_sentence():
    # 这里简单起见，我们只生成长度不超过10的句子
    length = random.randint(1, 10)
    sentence = []
    for i in range(length):
        # 每个词也是随机生成的
        sentence.append(random.choice(['我', '你', '他', '她', '它', '是', '不是', '很', '不', '喜欢', '讨厌', '吃', '喝', '玩', '乐']))
    return ''.join(sentence)


# 随机生成英文句子
def generate_english_sentence():
    # 这里简单起见，我们只生成长度不超过10的句子
    length = random.randint(1, 10)
    sentence = []
    for i in range(length):
        # 每个词也是随机生成的
        sentence.append(random.choice(
            ['I', 'you', 'he', 'she', 'it', 'am', 'is', 'are', 'like', 'dislike', 'eat', 'drink', 'play', 'fun']))
    return ' '.join(sentence)


# 生成训练数据
train_data = []
for i in range(10000):
    chinese_sentence = generate_chinese_sentence()
    english_sentence = generate_english_sentence()
    train_data.append((chinese_sentence, english_sentence))

# 生成测试数据
test_data = []
for i in range(1000):
    chinese_sentence = generate_chinese_sentence()
    english_sentence = generate_english_sentence()
    test_data.append((chinese_sentence, english_sentence))

import torch
import torch.nn as nn
import torch.nn.functional as F


# 定义模型
class TransformerModel(nn.Module):
    def __init__(self, input_vocab_size, output_vocab_size, d_model, nhead, num_encoder_layers, num_decoder_layers,
                 dim_feedforward, dropout):
        super(TransformerModel, self).__init__()
        self.d_model = d_model
        self.nhead = nhead
        self.num_encoder_layers = num_encoder_layers
        self.num_decoder_layers = num_decoder_layers

        self.embedding_src = nn.Embedding(input_vocab_size, d_model)
        self.embedding_tgt = nn.Embedding(output_vocab_size, d_model)
        self.pos_encoder = PositionalEncoding(d_model, dropout)

        encoder_layer = nn.TransformerEncoderLayer(d_model=d_model, nhead=nhead, dim_feedforward=dim_feedforward,
                                                   dropout=dropout)
        encoder_norm = nn.LayerNorm(d_model)
        self.encoder = nn.TransformerEncoder(encoder_layer, num_layers=num_encoder_layers, norm=encoder_norm)

        decoder_layer = nn.TransformerDecoderLayer(d_model=d_model, nhead=nhead, dim_feedforward=dim_feedforward,
                                                   dropout=dropout)
        decoder_norm = nn.LayerNorm(d_model)
        self.decoder = nn.TransformerDecoder(decoder_layer, num_layers=num_decoder_layers, norm=decoder_norm)

        self.fc = nn.Linear(d_model, output_vocab_size)

        self.init_weights()

    def init_weights(self):
        initrange = 0.1
        self.embedding_src.weight.data.uniform_(-initrange, initrange)
        self.embedding_tgt.weight.data.uniform_(-initrange, initrange)
        self.fc.bias.data.zero_()
        self.fc.weight.data.uniform_(-initrange, initrange)

    def forward(self, src, tgt):
        src = self.embedding_src(src) * math.sqrt(self.d_model)
        src = self.pos_encoder(src)
        memory = self.encoder(src)

        tgt = self.embedding_tgt(tgt) * math.sqrt(self.d_model)
        tgt = self.pos_encoder(tgt)
        output = self.decoder(tgt, memory)
        output = self.fc(output)

        return output


import math


# 定义位置编码
class PositionalEncoding(nn.Module):
    def __init__(self, d_model, dropout=0.1, max_len=5000):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(p=dropout)

        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0).transpose(0, 1)
        self.register_buffer('pe', pe)

    def forward(self, x):
        x = x + self.pe[:x.size(0), :]
        return self.dropout(x)


# 定义数据加载函数
def collate_fn(batch):
    src = [torch.LongTensor([char2idx.get(char, 0) for char in pair[0]]) for pair in batch]
    tgt = [torch.LongTensor([char2idx.get(char, 0) for char in pair[1]]) for pair in batch]
    src_lens = torch.LongTensor([len(s) for s in src])
    tgt_lens = torch.LongTensor([len(t) for t in tgt])
    src = nn.utils.rnn.pad_sequence(src, batch_first=True, padding_value=0)
    tgt = nn.utils.rnn.pad_sequence(tgt, batch_first=True, padding_value=0)
    return src, tgt, src_lens, tgt_lens


# 定义训练函数
def train(model, optimizer, criterion, train_data, batch_size, device):
    model.train()
    total_loss = 0
    data_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
    for src, tgt, src_lens, tgt_lens in data_loader:
        src = src.to(device)
        tgt = tgt.to(device)
        tgt_input = tgt[:, :-1]
        tgt_output = tgt[:, 1:]
        tgt_mask = nn.Transformer().generate_square_subsequent_mask(tgt_input.size(1)).to(device)
        optimizer.zero_grad()
        output = model(src, tgt_input)
        loss = criterion(output.view(-1, output.size(-1)), tgt_output.view(-1))
        loss.backward()
        optimizer.step()
        total_loss += loss.item()
    return total_loss / len(data_loader)


# 定义测试函数
def evaluate(model, criterion, test_data, batch_size, device):
    model.eval()
    total_loss = 0
    data_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=False, collate_fn=collate_fn)
    with torch.no_grad():
        for src, tgt, src_lens, tgt_lens in data_loader:
            src = src.to(device)
            tgt = tgt.to(device)
            tgt_input = tgt[:, :-1]
            tgt_output = tgt[:, 1:]
            tgt_mask = nn.Transformer().generate_square_subsequent_mask(tgt_input.size(1)).to(device)
            output = model(src, tgt_input)
            loss = criterion(output.view(-1, output.size(-1)), tgt_output.view(-1))
            total_loss += loss.item()
    return total_loss / len(data_loader)


# 将中英文字符映射到数字
char2idx = {'<pad>': 0, '<sos>': 1, '<eos>': 2}
for c in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ':
    char2idx[c] = len(char2idx)
for c in '我你他她它是不是很不喜欢讨厌吃喝玩乐':
    char2idx[c] = len(char2idx)
for c in 'Iyouhesheitamisarelikedislikeeatdrinkplayfun':
    char2idx[c] = len(char2idx)
idx2char = {i: c for c, i in char2idx.items()}

# 定义超参数
input_vocab_size = len(char2idx)
output_vocab_size = len(char2idx)
d_model = 256
nhead = 8
num_encoder_layers = 6
num_decoder_layers = 6
dim_feedforward = 512
dropout = 0.1
batch_size = 32
lr = 0.001
num_epochs = 10

# 创建模型和优化器
model = TransformerModel(input_vocab_size, output_vocab_size, d_model, nhead, num_encoder_layers, num_decoder_layers,
                         dim_feedforward, dropout)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
criterion = nn.CrossEntropyLoss(ignore_index=0)

# 将数据移动到GPU上（如果有的话）
device = torch.device('cuda' if torch.cuda.is_available() else 'mps')
model.to(device)

# 训练模型
for epoch in range(num_epochs):
    train_loss = train(model, optimizer, criterion, train_data, batch_size, device)
    test_loss = evaluate(model, criterion, test_data, batch_size, device)
    print('Epoch [{}/{}], Train Loss: {:.4f}, Test Loss: {:.4f}'.format(epoch + 1, num_epochs, train_loss, test_loss))

# 测试模型
model.eval()
src = '我喜欢吃饭'
src = torch.LongTensor([char2idx.get(char, 0) for char in src]).unsqueeze(0).to(device)
tgt_input = torch.LongTensor([[char2idx['<sos>']]]).to(device)
with torch.no_grad():
    for i in range(10):
        output = model(src, tgt_input)
        output = F.softmax(output, dim=-1)
        output = output.argmax(dim=-1)
        if output[0][-1] == char2idx['<eos>']:
            break
        tgt_input = torch.cat([tgt_input, output[:, -1:]], dim=-1)
output = tgt_input[0][1:].mps().numpy()
output = ''.join([idx2char.get(idx, '') for idx in output])
print(output)

if __name__ == '__main__':
    print("========")
