import numpy as np
import torch as pt
from torch.nn import functional
import sys
import os

np.random.seed(777)
pt.manual_seed(777)
pt.set_printoptions(edgeitems=200)

VER = 'v5.0'
ALPHA = 0.01
N_RNN_HIDDEN = 128
N_EPOCHS = 200
FILE_NAME = os.path.basename(__file__)
SAVE_PATH = os.path.join('_save', FILE_NAME, VER, 'save.tmp.dat')
DICT_PATH = os.path.join('_save', FILE_NAME, VER, 'dict.tmp.dat')

# sample sentence
sentence = ("if you want to build a ship, don't drum up people together to "
            "collect wood and don't assign them tasks and work, but rather "
            "teach them to long for the endless immensity of the sea.")

if os.path.exists(DICT_PATH):
    char2idx = pt.load(DICT_PATH)
    idx2char = [0] * len(char2idx)
    for w, i in char2idx.items():
        idx2char[i] = w
else:
    idx2char = list(set(sentence))  # id -> char ['i', 'l', 'e', 'o', 'h', ...]
    char2idx = {w: i for i, w in enumerate(idx2char)}
    os.makedirs(os.path.split(DICT_PATH)[0], exist_ok=True)
    pt.save(char2idx, DICT_PATH)

print(char2idx)
seq_length = timesteps = 10
num_classes = len(char2idx)
print('seq_length', seq_length)
print('num_classes', num_classes)

dataX = []
dataY = []
for i in range(0, len(sentence) - seq_length):
    x_str = sentence[i:i + seq_length]
    y_str = sentence[i + 1: i + seq_length + 1]
    if i < 5:
        print(x_str, '->', y_str)

    x = [char2idx[c] for c in x_str]  # char to index
    y = [char2idx[c] for c in y_str]  # char to index

    dataX.append(x)
    dataY.append(y)

# One-hot encoding
dataX = pt.tensor(dataX)
dataX = functional.one_hot(dataX, num_classes=num_classes)
# reshape X to be [samples, time steps, features]
dataX = dataX.view(-1, seq_length, num_classes).float()  # ATTENTION x:float->decide model type, y:long
print('dataX', dataX.shape)  # (170, 10, 25)

# One-hot encoding
dataY = pt.tensor(dataY)
# dataY = functional.one_hot(dataY, num_classes=num_classes)
# # time steps
dataY = dataY.view(-1).long()  # ATTENTION x:float->decide model type, y:long
print('dataY', dataY.shape)  # (1700)


class MyLstmModel(pt.nn.Module):

    def __init__(self, num_classes, seq_length, hidden_size, num_layers, **kwargs):
        super().__init__(**kwargs)
        self.num_classes = num_classes
        self.seq_length = seq_length
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        # Set parameters for RNN block
        # Note: batch_first=False by default.
        # When true, inputs are (batch_size, sequence_length, input_dimension)
        # instead of (sequence_length, batch_size, input_dimension)
        self.lstm = pt.nn.LSTM(input_size=num_classes,
                               hidden_size=hidden_size,
                               num_layers=num_layers,
                               batch_first=True)
        self.fc = pt.nn.Linear(hidden_size, num_classes)

    def forward(self, input):
        x = input
        m = len(input)
        h0 = pt.zeros(self.num_layers, m, self.hidden_size)
        c0 = pt.zeros(self.num_layers, m, self.hidden_size)
        x, _ = self.lstm(x, (h0, c0))
        # print('x(after lstm)', x.shape)  # （170, 10, 128)
        x = x.reshape(-1, self.hidden_size)
        # print('x(before fc', x.shape)  # (1700, 128)
        x = self.fc(x)
        # print('x(after fc)', x.shape)  # (1700, 25)
        return x


model = MyLstmModel(num_classes=num_classes,
                    seq_length=seq_length,
                    hidden_size=N_RNN_HIDDEN,
                    num_layers=2)
criterion = pt.nn.CrossEntropyLoss()
optim = pt.optim.Adam(params=model.parameters(), lr=ALPHA)  # ATTENTION SGD cannot make it.


def acc(ht, yt):
    idx1 = pt.argmax(ht, dim=1)
    idx2 = yt.long()
    r = (idx1 == idx2).double().mean()
    return r


# train
if os.path.exists(SAVE_PATH):
    model.load_state_dict(pt.load(SAVE_PATH))
    print('Loaded')
else:
    loss_history = np.zeros(N_EPOCHS)
    GROUP = int(np.ceil(N_EPOCHS / 20))
    for step in range(N_EPOCHS):
        model.train(True)
        optim.zero_grad()
        ht = model(dataX)
        cost = criterion(ht, dataY)
        cost.backward()
        optim.step()
        model.train(False)
        costv = cost.data.numpy()
        loss_history[step] = costv
        accv = acc(ht, dataY).data.numpy()
        if step % GROUP == 0:
            print(f'#{step + 1}: cost = {costv}, acc = {accv}')
    if step % GROUP != 0:
        print(f'#{step + 1}: cost = {costv}, acc = {accv}')
    os.makedirs(os.path.split(SAVE_PATH)[0], exist_ok=True)
    pt.save(model.state_dict(), SAVE_PATH)
    print('Saved')

model.eval()
preds = model(dataX)
accv = acc(preds, dataY).data.numpy()
print(f'ACC = {accv}')
preds = preds.reshape(-1, seq_length, num_classes)
preds = preds.argmax(dim=2)
dataY = dataY.reshape(-1, seq_length)
for i, pred in enumerate(preds):
    pred = pred.data.numpy()
    result = [idx2char[j] for j in pred]

    x_index = np.argmax(dataX[i], axis=1)
    x_str = [idx2char[j] for j in x_index]

    y_str = [idx2char[j] for j in dataY[i]]

    print('|', ''.join(x_str), '| -> |', ''.join(result), '| (h)', sep='')
    print('|', ''.join(x_str), '| -> |', ''.join(y_str), '| (tgt)', sep='')
