import numpy as np
import torch
from collections import Counter

max_epochs = 100
learning_rate = 0.1

input = "hihell"
output = "ihello"

char_freq = dict(Counter(input + output))
# print(char_freq)

idx2char = [c for c in char_freq.keys()]
char2idx = {c:i for i, c in enumerate(idx2char)}
print(idx2char)
print(char2idx)

def char_to_onehot(c):
    one_hot = np.zeros(len(idx2char))
    idx = char2idx[c]
    one_hot[idx] = 1
    return one_hot

one_hot = char_to_onehot('h')
print(one_hot)

class MyModel(torch.nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(MyModel, self).__init__()
        self.rnn = torch.nn.RNN(
            input_size=input_size,
            hidden_size=hidden_size,
            batch_first=True,
            bidirectional=False,
            num_layers=1,
        )
        self.fc = torch.nn.Linear(hidden_size, output_size)

    def forward(self, x):
        '''
            x: [B, s, input_dim]
        '''
        output, hidden = self.rnn(x) # output: [B, s, d], hidden: [1, B, d]
        output = output.view(-1, output.shape[-1]) # [B * s, d]
        output = self.fc(output) # [B * s, output_size]
        output = output.view(x.shape[0], x.shape[1], output.shape[-1]) # [B, s, output_size]
        return output

model = MyModel(len(idx2char), 2, len(idx2char))

criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

for epoch in range(max_epochs):
    input_tensor = [char_to_onehot(c) for c in input]
    input_tensor = torch.tensor(input_tensor, dtype=torch.float32)
    input_tensor = input_tensor.unsqueeze(0)

    output_tensor = [char2idx[c] for c in output]
    output_tensor = torch.tensor(output_tensor, dtype=torch.long)

    pred_output = model(input_tensor)
    loss = criterion(pred_output, output_tensor)

    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

    print("epoch: {:d}, loss: {:.4f}".format(epoch, loss.item()))


print('finish')

