import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import pandas as pd
from business_feature import *
import time
import matplotlib.pyplot as plt
from grid_feature import *
from torch import nn, optim


class LSTM_FC2(torch.nn.Module):
    def __init__(self, in_dim, n_hidden_1, n_hidden_2, out_dim):
        super(LSTM_FC2, self).__init__()
        self.layer1 = nn.Sequential(nn.Linear(in_dim, n_hidden_1), nn.ReLU(True))
        self.layer2 = nn.Sequential(nn.Linear(n_hidden_1, n_hidden_2), nn.ReLU(True))
        self.layer3 = nn.Sequential(nn.Linear(n_hidden_2, out_dim))

    def forward(self, x_fc):
        x_fc = self.layer1(x_fc)
        x_fc = self.layer2(x_fc)
        x_fc = self.layer3(x_fc)
        return x_fc

    @staticmethod
    def train_and_predict(x_train_fc, y_train, x_test_fc, y_test, learning_rate, num_epochs):
        x_train_fc = torch.from_numpy(x_train_fc)
        y_train = torch.from_numpy(y_train)
        x_test_fc = torch.from_numpy(x_test_fc)
        y_test = torch.from_numpy(y_test)

        model = LSTM_FC2(x_train_fc.shape[1], 32, 4, 1)
        if torch.cuda.is_available():
            model = model.cuda()
            x_train_fc = x_train_fc.cuda()
            y_train = y_train.cuda()
            x_test_fc = x_test_fc.cuda()
            y_test = y_test.cuda()

        criterion = nn.MSELoss()
        optimizer = optim.Adam(model.parameters(), lr=learning_rate)
        loss_changes = []

        lstm_train_out = None
        for epoch in range(num_epochs):

            lstm_train_out = model.forward(x_train_fc)

            loss = criterion(lstm_train_out, y_train.unsqueeze(1))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if epoch % 100 == 0:
                print('Epoch[{}/{}], loss:{:.6f}'.format(epoch + 1, num_epochs, loss.item()))
            lstm_test_out = model.forward(x_test_fc)
            loss_test = criterion(lstm_test_out, y_test.unsqueeze(1))
            loss_changes.append([loss.item(), loss_test.item()])

        model.eval()

        lstm_test_out = model.forward(x_test_fc)
        lstm_test_out = lstm_test_out.cpu().data.numpy()
        lstm_train_out = lstm_train_out.cpu().data.numpy()

        return lstm_train_out, lstm_test_out, loss_changes


class LSTM_FC(torch.nn.Module):
    def __init__(self, ln, lc, time_chunk_size, in_dim, n_hidden_1, n_hidden_2, out_dim):
        super(LSTM_FC, self).__init__()
        self.lstm = torch.nn.LSTM(input_size=time_chunk_size, hidden_size=ln, num_layers=lc)
        lstm_out_len = 16
        self.layer1 = nn.Sequential(nn.Linear(in_dim+lstm_out_len, n_hidden_1), nn.ReLU(True))
        self.layer2 = nn.Sequential(nn.Linear(n_hidden_1, n_hidden_2), nn.ReLU(True))
        self.layer3 = nn.Sequential(nn.Linear(n_hidden_2, out_dim))

    def forward(self, x_lstm, x_fc):
        out, _ = self.lstm(x_lstm)
        s, b, h = out.shape
        out = out.view(s * b, h)
        x_fc = torch.cat([out, x_fc.type_as(out)], dim=1)
        if torch.cuda.is_available():
            x_fc = x_fc.cuda()
        x_fc = self.layer1(x_fc)
        x_fc = self.layer2(x_fc)
        x_fc = self.layer3(x_fc)
        return x_fc

    @staticmethod
    def train_and_predict(x_train_lstm, x_train_fc, y_train, x_test_lstm, x_test_fc, y_test,
                          grid_num, time_chunk_size, learning_rate, num_epochs):
        x_train_lstm = torch.from_numpy(x_train_lstm)
        x_train_fc = torch.from_numpy(x_train_fc)
        y_train = torch.from_numpy(y_train)
        x_test_lstm = torch.from_numpy(x_test_lstm)
        x_test_fc = torch.from_numpy(x_test_fc)
        y_test = torch.from_numpy(y_test)

        model = LSTM_FC(16, 12, time_chunk_size, x_train_fc.shape[1], 32, 4, 1)
        if torch.cuda.is_available():
            model = model.cuda()
            x_train_lstm = x_train_lstm.cuda()
            x_train_fc = x_train_fc.cuda()
            y_train = y_train.cuda()
            x_test_lstm = x_test_lstm.cuda()
            x_test_fc = x_test_fc.cuda()
            y_test = y_test.cuda()

        criterion = nn.MSELoss()
        optimizer = optim.Adam(model.parameters(), lr=learning_rate)
        loss_changes = []

        lstm_train_out = None
        for epoch in range(num_epochs):

            lstm_train_out = model.forward(x_train_lstm, x_train_fc)

            loss = criterion(lstm_train_out, y_train.unsqueeze(1))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if epoch % 100 == 0:
                print('Epoch[{}/{}], loss:{:.6f}'.format(epoch + 1, num_epochs, loss.item()))
            lstm_test_out = model.forward(x_test_lstm, x_test_fc)
            loss_test = criterion(lstm_test_out, y_test.unsqueeze(1))
            loss_changes.append([loss.item(), loss_test.item()])

        model.eval()

        lstm_test_out = model.forward(x_test_lstm, x_test_fc)
        lstm_test_out = lstm_test_out.cpu().data.numpy()
        lstm_train_out = lstm_train_out.cpu().data.numpy()

        return lstm_train_out, lstm_test_out, loss_changes


class LSTM(torch.nn.Module):
    def __init__(self, ln, lc, time_chunk_size):
        super(LSTM, self).__init__()
        self.lstm = torch.nn.LSTM(input_size=time_chunk_size, hidden_size=ln, num_layers=lc)
        self.reg = nn.Linear(ln, 1)

    def forward(self, x):
        out, _ = self.lstm(x)
        s, b, h = out.shape
        to_fc = out.view(s*b, h)
        out = self.reg(to_fc)
        out = out.view(b)
        return out, to_fc

    @staticmethod
    def train_and_predict(x_train, y_train, x_test, y_test, grid_num, time_chunk_size, learning_rate, num_epochs):
        x_train = torch.from_numpy(x_train)
        y_train = torch.from_numpy(y_train)
        x_test = torch.from_numpy(x_test)
        y_test = torch.from_numpy(y_test)

        model = LSTM(4, 2, time_chunk_size)
        if torch.cuda.is_available():
            model = model.cuda()
            x_train = x_train.cuda()
            y_train = y_train.cuda()
            x_test = x_test.cuda()
            y_test = y_test.cuda()

        criterion = nn.MSELoss()
        optimizer = optim.Adam(model.parameters(), lr=learning_rate)
        loss_changes = []

        lstm_train_out = None
        for epoch in range(num_epochs):

            lstm_train_out, to_fc_train = model.forward(x_train)

            loss = criterion(lstm_train_out, y_train)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if (epoch + 1) % 100 == 0:
                print('Epoch[{}/{}], loss:{:.6f}'.format(epoch + 1, num_epochs, loss.item()))
            lstm_test_out, _ = model.forward(x_test)
            loss_test = criterion(lstm_test_out, y_test)
            loss_changes.append([loss.item(), loss_test.item()])

        model.eval()
        lstm_test_out, to_fc_test = model.forward(x_test)
        lstm_test_out = lstm_test_out.cpu().data.numpy()
        lstm_train_out = lstm_train_out.cpu().data.numpy()
        to_fc_train = to_fc_train.cpu().data.numpy()
        to_fc_test = to_fc_test.cpu().data.numpy()
        return lstm_train_out, lstm_test_out, loss_changes, to_fc_train, to_fc_test


# 成都数据集单格测试
num = 100
interval = 60
time_chunk_size = 6
data = pd.read_csv('./data/' + str(interval) + '_chengdu_feature_data.data')
scalar = 1397
grid_num = 1
split_date = 21
learning_rate = 0.01
num_epochs = 400
grid_ids = data['grid_id'].unique()

data = data[data['grid_id'] == 5861]

train = data[data.month_day <= split_date]
test = data[data.month_day > split_date]

y_train = np.array(train['counts'], dtype=np.float32).reshape(len(train))
y_test = np.array(test['counts'], dtype=np.float32).reshape(len(test))

results = []

x_train_lstm = np.array(train[['last1', 'last2', 'last3', 'last4', 'last5', 'last6']],
                        dtype=np.float32).reshape((1, int(len(train) / grid_num), time_chunk_size))
x_test_lstm = np.array(test[['last1', 'last2', 'last3', 'last4', 'last5', 'last6']],
                       dtype=np.float32).reshape((1, int(len(test) / grid_num), time_chunk_size))
x_train_fc = np.array(train.drop(['last1', 'last2', 'last3', 'last4', 'last5', 'last6', 'counts'], axis=1), dtype=np.float32)
x_test_fc = np.array(test.drop(['last1', 'last2', 'last3', 'last4', 'last5', 'last6', 'counts'], axis=1), dtype=np.float32)

lstm_train_out, lstm_predict, lstm_loss_changes, to_fc_train, to_fc_test = \
    LSTM.train_and_predict(x_train_lstm, y_train, x_test_lstm, y_test,
                           grid_num, time_chunk_size, learning_rate, num_epochs)
num_epochs = 2000
combine_train_out, combine_predict, combine_loss_changes = \
    LSTM_FC.train_and_predict(x_train_lstm, x_train_fc, y_train, x_test_lstm, x_test_fc, y_test,
                              grid_num, time_chunk_size, learning_rate, num_epochs)

x_train_fc2 = np.hstack([x_train_fc, to_fc_train])
x_test_fc2 = np.hstack([x_test_fc, to_fc_test])
combine_train_out2, combine_predict2, combine_loss_changes2 = \
    LSTM_FC2.train_and_predict(x_train_fc2, y_train, x_test_fc2, y_test, learning_rate, num_epochs)


results.append(validate(y_test * scalar, lstm_predict * scalar))
results.append(validate(y_test * scalar, combine_predict * scalar))
results.append(validate(y_test * scalar, combine_predict2 * scalar))


'''
# 成都数据集单格测试
num = 100
interval = 60
time_chunk_size = 6
data = pd.read_csv('./data/' + str(interval) + '_chengdu_feature_data.data')
scalar = 1397
grid_num = 1
split_date = 21
learning_rate = 0.01
num_epochs = 400
grid_ids = data['grid_id'].unique()

data = data[data['grid_id'] == 5861]

train = data[data.month_day <= split_date]
test = data[data.month_day > split_date]
y_train = np.array(train['counts'], dtype=np.float32).reshape(len(train))
y_test = np.array(test['counts'], dtype=np.float32).reshape(len(test))

results = []

x_train_lstm = np.array(train[['last1', 'last2', 'last3', 'last4', 'last5', 'last6']],
                        dtype=np.float32).reshape((grid_num, 1, int(len(train) / grid_num), time_chunk_size))
x_test_lstm = np.array(test[['last1', 'last2', 'last3', 'last4', 'last5', 'last6']],
                       dtype=np.float32).reshape((grid_num, 1, int(len(test) / grid_num), time_chunk_size))
# print(pd.DataFrame([y_test, lstm_predict]).T)


class lstm_reg(nn.Module):
    def __init__(self, input_size, hidden_size, output_size=1, num_layers=2):
        super(lstm_reg, self).__init__()

        self.rnn = nn.LSTM(input_size, hidden_size, num_layers)
        self.reg = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        x, _ = self.rnn(x)
        s, b, h = x.shape
        x = x.view(s * b, h)
        x = self.reg(x)
        x = x.view(b)
        return x


net = lstm_reg(6, 4)

criterion = nn.MSELoss()
optimizer = torch.optim.Adam(net.parameters(), lr=1e-2)
x_train_lstm = x_train_lstm.reshape((1, x_train_lstm.shape[2], time_chunk_size))
x_test_lstm = x_test_lstm.reshape((1, x_test_lstm.shape[2], time_chunk_size))
for e in range(1000):
    var_x = torch.from_numpy(x_train_lstm)
    var_y = torch.from_numpy(y_train)

    out = net(var_x)
    loss = criterion(out, var_y)

    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
    # print('Epoch[{}/{}], loss:{:.6f}'.format(e + 1, num_epochs, loss.item()))


var_data = torch.from_numpy(x_test_lstm)
pred_test = net(var_data)
pred_test = pred_test.view(-1).data.numpy()

plt.plot([i+1 for i in range(len(y_test))], y_test*scalar)
plt.plot([i+1 for i in range(len(y_test))], pred_test*scalar)
plt.show()
'''
'''
# 厦门数据集单格测试
file_path = './data/'
num = 20
interval = 60
business = pd.read_csv(file_path + str(num)+'_'+str(interval)+'business_final.csv')
grid_num = len(business['grid_id'].unique())
poi = pd.read_csv(file_path + str(num) + 'counts_POI.csv')
weather = pd.read_csv(file_path + 'weather_data.csv')
data = pd.merge(business, poi, how='left').fillna(0)
data = pd.merge(data, weather, how='left', on=['month_day', 'hour']).fillna(0)
print(data.shape[0], data[data['counts'] == 0].shape[0])

split_date = 21
time_chunk_size = 6
learning_rate = 0.01
num_epochs = 20
grid_num = 1
business_step1 = pd.read_csv('./data/' + str(num) + '_' + str(interval) + 'business_step1.csv')
scalar = max(business_step1['counts']) - min(business_step1['counts'])
data = data[data['grid_id'] == 262]
data['counts'] = data['counts']*scalar


train = data[data.month_day <= split_date]
test = data[data.month_day > split_date]

y_train = np.array(train['counts'], dtype=np.float32).reshape(len(train))
y_test = np.array(test['counts'], dtype=np.float32).reshape(len(test))

results = []

x_train_lstm = np.array(train[['last1', 'last2', 'last3', 'last4', 'last5', 'last6']],
                        dtype=np.float32).reshape((grid_num, 1, int(len(train) / grid_num), time_chunk_size))
x_test_lstm = np.array(test[['last1', 'last2', 'last3', 'last4', 'last5', 'last6']],
                       dtype=np.float32).reshape((grid_num, 1, int(len(test) / grid_num), time_chunk_size))
lstm_train_out, lstm_predict, lstm_loss_changes = \
    LSTM.train_and_predict(x_train_lstm, y_train, x_test_lstm, y_test,
                           grid_num, time_chunk_size, learning_rate, num_epochs)
print(pd.DataFrame([y_test, lstm_predict]).T)
validate(y_test, lstm_predict)
'''


'''
'climate_cloudy',
       'climate_cloudytoovercast', 'climate_cloudytosmallrain',
       'climate_cloudytosunny', 'climate_midraintosmallrain',
       'climate_overcast', 'climate_overcasttocloudy',
       'climate_overcasttosmallrain', 'climate_overcasttosunny',
       'climate_rain', 'climate_smallrain', 'climate_sunny'

'climate_clear', 'climate_partlycloudy', 'climate_scatteredclouds',
       'climate_mostlycloudy', 'climate_lightrainshowers', 'climate_lightrain',
       'climate_unknown', 'climate_overcast', 'climate_rain',
       'climate_rainshowers', 'climate_thunderstorm',
       'climate_lightthunderstorms', 'climate_heavyrain'

word_to_ix = {'rain': 0, 'sun': 1}
embeds = nn.Embedding(1, 5)
hello_idx = torch.LongTensor([0])
hello_idx = Variable(hello_idx)
hello_embed = embeds(hello_idx)
print(hello_embed)

data = pd.read_csv('./data/60_chengdu_feature_data.data', index_col=0)
print(data[['counts', 'last1', 'last2', 'last3', 'last4', 'last5', 'last6']])
print(data.columns)
'''
'''
def embedding(col, target_dim):
    embeds = nn.Embedding(int(max(col)-min(col)+1), target_dim)
    tensor = embeds(torch.from_numpy(np.array(col - min(col))))
    return tensor.detach().numpy()


print(embedding(data['month_day'], 5))



print(data[['counts', 'last1', 'last2', 'last3', 'last4', 'last5', 'last6', 'avg_counts']])
'''
