import torch
import numpy as np
from sklearn.preprocessing import scale

print(1) #test

# batch_size = 3
# hidden_size = 5
# embedding_dim = 6
# seq_length = 4
# num_layers = 1
# num_directions = 1
# vocab_size = 20
#
# import numpy as np
#
# input_data = np.random.uniform(0, 19, size=(batch_size, seq_length))
# input_data = torch.from_numpy(input_data).long()
# embedding_layer = torch.nn.Embedding(vocab_size, embedding_dim)
# lstm_layer = torch.nn.LSTM(input_size=embedding_dim, hidden_size=hidden_size, num_layers=num_layers,
# 						   bias=True, batch_first=False, dropout=0.5, bidirectional=False)
# lstm_input = embedding_layer(input_data)
# assert lstm_input.shape == (batch_size, seq_length, embedding_dim)
# lstm_input.transpose_(1, 0)
# assert lstm_input.shape == (seq_length, batch_size, embedding_dim)
# output, (h_n, c_n) = lstm_layer(lstm_input)
# assert output.shape == (seq_length, batch_size, hidden_size)
# assert h_n.shape == c_n.shape == (num_layers * num_directions, batch_size, hidden_size)

# data = np.array([[200, 6, 3], [100, 200, 300]])
# data_list = scale(data)
# print(data_list) # test

import numpy as np

def mape(actual, pred):
    actual, pred = np.array(actual), np.array(pred)
    return np.mean(np.abs((actual - pred) / actual)) * 100

actual = [12, 13, 14, 15, 15,22, 27]
pred = [11, 13, 14, 14, 15, 16, 18]

print(mape(actual, pred))


