import numpy as np
from autocode_torch import processInput
from torch.autograd import Variable
import torch
import paras
from torch import nn

BATCH_SIZE = 64
fspan = 30
TIME_STEP = 2
INPUT_SIZE = 10
CLASS_NUM = 7

try:
    autocoder = torch.load('data/{}_{}_autoencoder.pkl'.format(paras.symbol, paras.qt_type))
    autocoder.eval()
    autocoder.need_decode = False
except:
    pass


def getone(his):
    try:
        one = [o["Close"] for o in his[-100:]]
    except:
        one = [his[-100:]]
    one = np.array(one, dtype=np.float32).reshape(1, -1)
    one = processInput(one)
    var = Variable(torch.from_numpy(one))
    var = autocoder(var)
    return var.data.numpy().reshape(-1).tolist()


def getone1(his):
    one = [o["Close"] for o in his[-100:]]
    one = np.array(one, dtype=np.float32).reshape(1, -1)
    one = processInput(one)
    var = Variable(torch.from_numpy(one))
    var = autocoder(var)
    return var.data.numpy().reshape(-1).tolist()


class RNN(nn.Module):
    def __init__(self):
        super(RNN, self).__init__()

        self.rnn = nn.LSTM(
            input_size=INPUT_SIZE,
            hidden_size=BATCH_SIZE,  # rnn hidden unit
            num_layers=1,  # 有几层 RNN layers
            batch_first=True,  # input & output 会是以 batch size 为第一维度的特征集 e.g. (batch, time_step, input_size)
        )

        self.out = nn.Sequential(
            nn.Linear(BATCH_SIZE, 32),
            nn.BatchNorm1d(32),
            nn.Dropout(0.1),
            nn.Tanh(),
            nn.Linear(32, 16),
            nn.BatchNorm1d(16),
            nn.Dropout(0.1),
            nn.Tanh(),
            nn.Linear(16, 8),
            nn.BatchNorm1d(8),
            nn.Tanh(),
            nn.Linear(8, CLASS_NUM),
            nn.BatchNorm1d(CLASS_NUM),
            nn.Sigmoid()
        )

    def forward(self, x):
        r_out, (h_n, h_c) = self.rnn(x, None)  # None 表示 hidden state 会用全0的 state
        out = self.out(r_out[:, -1, :])
        return out
