import torch
import torch.nn.functional as F
import numpy as np
import torch.nn as nn
import torch.utils.data as Data

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 声明字典 声明字典索引 反向字典索引
letter = [c for c in 'SE?abcdefghijklmnopqrstuvwxyz']
letter2idx = {n: i for i, n in enumerate(letter)}
idx2letter = {i: n for i, n in enumerate(letter)}
print(letter)
print(letter2idx)
print(idx2letter)

# 声明序列单词最大长度 根据词典计算
seq_data = [['man', 'woman'], ['black', 'white'], ['king', 'queen'], ['girl', 'body'], ['up', 'down'], ['high', 'low']]
n_step = max([max(len(i), len(j)) for i, j in seq_data])

num_layers = 2
n_hidden = 128
n_class = len(letter2idx)
batch_size = 3


def make_data(seq_data):
    encode_input_all, decode_input_all, decode_output_all = [], [], []
    for seq in seq_data:
        for i in range(2):
            seq[i] = seq[i] + '?' * (n_step - len(seq[i]))

        encode_input = [letter2idx[n] for n in seq[0]]
        decode_input = [letter2idx[n] for n in ("S" + seq[1])]
        decode_output = [letter2idx[n] for n in (seq[1] + "E")]

        encode_input_all.append(np.eye(n_class)[encode_input])
        decode_input_all.append(np.eye(n_class)[decode_input])
        decode_output_all.append(decode_output)

    return torch.Tensor(encode_input_all), torch.Tensor(decode_input_all), torch.Tensor(decode_output_all)


encode_input_all, decode_input_all, decode_output_all = make_data(seq_data)


# print(decode_output_all)
# print(decode_output_all.shape)

class MyDataSet(Data.Dataset):
    def __init__(self, encode_input_all, decode_input_all, decode_output_all):
        self.encode_input_all = encode_input_all
        self.decode_input_all = decode_input_all
        self.decode_output_all = decode_output_all

    def __len__(self):
        return len(self.encode_input_all)
        s

    def __getitem__(self, idx):
        return self.encode_input_all[idx], self.decode_input_all[idx], self.decode_output_all[idx]


loader = Data.DataLoader(MyDataSet(encode_input_all, decode_input_all, decode_output_all), batch_size, shuffle=True)

'''
x: 输入序列，形状为 (batch_size, seq_length, input_size)。
h0: 初始隐藏状态，形状为 (num_layers, batch_size, hidden_size)。
c0: 初始细胞状态，形状为 (num_layers, batch_size, hidden_size)。
'''


class Encoder(nn.Module):
    def __init__(self, input_size, hidden_size, num_layers=1):
        super(Encoder, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)

    def forward(self, x, h0, c0):
        out, (h, c) = self.lstm(x, (h0, c0))
        return out, h, c


class Attention(nn.Module):
    def __init__(self, hidden_size):
        super(Attention, self).__init__()
        self.hidden_size = hidden_size
        self.W1 = nn.Linear(hidden_size, hidden_size)
        self.W2 = nn.Linear(hidden_size, hidden_size)
        self.V = nn.Parameter(torch.rand(hidden_size))

    def forward(self, encoder_outputs, decoder_hidden):
        # encoder_outputs: (batch_size, seq_length, hidden_size)
        # decoder_hidden: (num_layers, batch_size, hidden_size)
        # 使用最后一层的隐藏状态
        decoder_hidden = decoder_hidden[-1].unsqueeze(1)  # (batch_size, 1, hidden_size)
        scores = torch.tanh(self.W1(encoder_outputs) + self.W2(decoder_hidden))  # (batch_size, seq_length, hidden_size)
        scores = scores.permute(0, 2, 1)  # (batch_size, hidden_size, seq_length)
        V_expanded = self.V.unsqueeze(0).unsqueeze(2).expand(scores.size(0), -1, -1).permute(0,2,1)
        scores = torch.bmm(V_expanded, scores)  # (batch_size, 1, seq_length)
        scores = scores.squeeze(1)  # (batch_size, seq_length)
        attention_weights = F.softmax(scores, dim=1).unsqueeze(1)  # (batch_size, 1, seq_length)
        context_vector = torch.bmm(attention_weights, encoder_outputs)  # (batch_size, 1, hidden_size)
        return context_vector, attention_weights


class Decoder(nn.Module):
    def __init__(self, input_size, hidden_size, num_layers=1):
        super(Decoder, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.lstm = nn.LSTM(input_size+hidden_size, hidden_size, num_layers, batch_first=True)
        self.attention = Attention(hidden_size)

    def forward(self, input, hidden, cell, encoder_outputs):
        context_vector, attention_weights = self.attention(encoder_outputs, hidden)
        tensor2_expanded = context_vector.expand(-1, input.size(1), -1)
        input = torch.cat((input, tensor2_expanded), dim=2)  # 将输入和上下文向量拼接
        output, (hidden, cell) = self.lstm(input, (hidden, cell))
        return output, hidden, cell, attention_weights


class Seq2Seq(nn.Module):
    def __init__(self):
        super(Seq2Seq, self).__init__()
        self.encoder = Encoder(input_size=n_class, hidden_size=n_hidden, num_layers=num_layers)
        self.decoder = Decoder(input_size=n_class, hidden_size=n_hidden, num_layers=num_layers)
        self.fc = nn.Linear(n_hidden, n_class)

    def forward(self, encode_input, decode_input, h0, c0):
        encoder_outputs, h, c = self.encoder(encode_input, h0, c0)

        outputs, hidden, cell, attention_weights = self.decoder(decode_input, h, c, encoder_outputs)

        predict = self.fc(outputs)

        return predict, attention_weights


model = Seq2Seq().to(device)
criterion = nn.CrossEntropyLoss().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

for epoch in range(300):
    model.train()

    # batch_size=2 所以 for循环执行2次
    for encode_input_batch, decode_input_batch, decode_output_batch in loader:
        hidden = torch.zeros(num_layers, encode_input_batch.size(0), n_hidden).to(device)
        cell = torch.zeros(num_layers, encode_input_batch.size(0), n_hidden).to(device)

        encode_input_batch = encode_input_batch.to(device)
        decode_input_batch = decode_input_batch.to(device)
        decode_output_batch = decode_output_batch.to(device)

        optimizer.zero_grad()

        predict, _ = model(encode_input_batch, decode_input_batch, hidden, cell)

        loss = criterion(predict.view(-1, predict.size(2)), decode_output_batch.view(-1).long())
        loss.backward()
        optimizer.step()

        if (epoch + 1) % 100 == 0:
            print("Epoch:", "%04d" % (epoch + 1), 'Cost = ', "{:.6f}".format(loss))


def translate(word):
    encode_input, decode_input, _ = make_data([[word, '']])
    encode_input, decode_input = encode_input.to(device), decode_input.to(device)

    hidden = torch.zeros(num_layers, 1, n_hidden).to(device)
    cell = torch.zeros(num_layers, 1, n_hidden).to(device)

    output, _ = model(encode_input, decode_input, hidden, cell)

    predict = output.data.max(2, keepdim=True)[1]  # 0本身 1索引

    decoded = [letter[i] for i in predict.view(-1)]

    translated = ''.join(decoded[:decoded.index('E')])
    return translated.replace('?', '')


print(translate("man"))
print(translate("black"))
print(translate("king"))
print(translate("mans"))
print(translate("waters"))
print(translate("m"))
