# LSTM模型
import torch
from torch import nn
from torch.functional import F


class LSTM(nn.Module):
	def __init__(self, input_size=5, user_size=20, hidden_layer_size=100, output_size=1):
		super().__init__()
		self.hidden_layer_size = hidden_layer_size
		# 用户特征嵌入层
		self.fc = nn.Linear(user_size, input_size)
		self.lstm = nn.LSTM(input_size, hidden_layer_size)
		self.linear = nn.Linear(hidden_layer_size, output_size)
		self.hidden_cell = (torch.zeros(1, 1, self.hidden_layer_size).to(device),
		                    torch.zeros(1, 1, self.hidden_layer_size).to(device))

	def forward(self, input_seq, user_tensor):
		# 用户特征嵌入
		user_tensor = F.relu(self.fc(user_tensor))
		input_seq = input_seq + user_tensor
		# 隐藏层
		lstm_out, self.hidden_cell = self.lstm(input_seq.view(len(input_seq), 1, -1), self.hidden_cell)
		predictions = self.linear(lstm_out.view(len(input_seq), -1))
		return predictions[-1]


lstm_path = "./model/cond_lstm.pt"
device = 'cuda' if torch.cuda.is_available() else 'cpu'


# 创建数据窗口
def create_inout_sequences(input_data, tw):
	inout_seq = []
	L = len(input_data)
	for i in range(L - tw):
		train_seq = input_data[i:i + tw]
		train_label = input_data[i + tw:i + tw + 1]
		inout_seq.append((train_seq, train_label))
	inout_seq.append((input_data[-5:], [0]))

	return inout_seq
