import torch
import torch.nn as nn


class LSTMModel(nn.Module):
	def __init__(self, input_size, hidden_layer_size, num_layers, output_size, dropout):
		super().__init__()
		self.hidden_layer_size = hidden_layer_size

		self.linear_1 = nn.Linear(input_size, hidden_layer_size)
		self.relu = nn.PReLU()
		# self.sigmod = nn.Sigmoid()

		self.lstm = nn.LSTM(hidden_layer_size, hidden_size=self.hidden_layer_size, num_layers=num_layers,
							batch_first=True)
		self.dropout = nn.Dropout(dropout)
		self.linear_2 = nn.Linear(num_layers * hidden_layer_size, output_size)

		self.linear_3 = nn.Linear(1000, num_layers * hidden_layer_size)
		self.linear_4 = nn.Linear(num_layers * hidden_layer_size * 2, num_layers * hidden_layer_size)

		self.init_weights()

		# CNN model
		self.cnn = nn.Sequential(
			nn.Conv2d(3, 64, 3, 1, 1),  # [64, 128, 128]
			nn.BatchNorm2d(64),
			nn.ReLU(),
			nn.MaxPool2d(2, 2, 0),  # [64, 64, 64]

			nn.Conv2d(64, 128, 3, 1, 1),  # [128, 64, 64]
			nn.BatchNorm2d(128),
			nn.ReLU(),
			nn.MaxPool2d(2, 2, 0),  # [128, 32, 32]

			nn.Conv2d(128, 256, 3, 1, 1),  # [256, 32, 32]
			nn.BatchNorm2d(256),
			nn.ReLU(),
			nn.MaxPool2d(2, 2, 0),  # [256, 16, 16]

			nn.Conv2d(256, 512, 3, 1, 1),  # [512, 16, 16]
			nn.BatchNorm2d(512),
			nn.ReLU(),
			nn.MaxPool2d(2, 2, 0),  # [512, 8, 8]

			nn.Conv2d(512, 512, 3, 1, 1),  # [512, 8, 8]
			nn.BatchNorm2d(512),
			nn.ReLU(),
			nn.MaxPool2d(2, 2, 0),  # [512, 4, 4]
		)
		self.fc = nn.Sequential(
			nn.Linear(512 * 4 * 4, 1024),
			nn.ReLU(),
			nn.Linear(1024, 512),
			nn.ReLU(),
			nn.Linear(512, num_layers * hidden_layer_size)
		)

	def init_weights(self):
		for name, param in self.lstm.named_parameters():
			if 'bias' in name:
				nn.init.constant_(param, 0.0)
			elif 'weight_ih' in name:
				nn.init.kaiming_normal_(param)
			elif 'weight_hh' in name:
				nn.init.orthogonal_(param)

	def forward(self, x, img_x):
		batchsize = x.shape[0]

		# layer 1
		x = self.linear_1(x)
		# x = self.relu(x_test)

		# LSTM layer
		lstm_out, (h_n, c_n) = self.lstm(x) # (h_n, c_n)所有样本都一样了

		# reshape output from hidden cell into [batch, features] for `linear_2`
		x = h_n.permute(1, 0, 2).reshape(batchsize, -1)

		# # visual layer 1
		# img_x_test = self.linear_3(img_x)
		# img_x = self.relu(img_x_test)
		# img_x = self.dropout(img_x)

		# CNN layer
		img_x_test = self.cnn(img_x)
		test = img_x_test.view(img_x_test.size()[0], -1)
		img_x = self.fc(test)

		x = torch.cat((x, img_x), 1)
		x = self.linear_4(x)
		# x = self.relu(x)

		# layer 2
		x = self.dropout(x)
		predictions = self.linear_2(x)
		return predictions