# coding: utf-8
import torch
from torch import nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
import numpy
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score

import matplotlib
# matplotlib.use('Agg')
# get_ipython().magic(u'matplotlib inline')

import datetime as dt, itertools, pandas as pd, matplotlib.pyplot as plt, numpy as np
import logging as logger

use_cuda = torch.cuda.is_available()
logger.info("Is CUDA available? %s.", use_cuda)


def get_r2_numpy(x, y):
    slope, intercept = np.polyfit(x, y, 1)
    r_squared = 1 - (sum((y - (slope * x + intercept)) ** 2) / ((len(y) - 1) * np.var(y, ddof=1)))
    return r_squared


class lstm(nn.Module):
    def __init__(self, input_size, hidden_size, T, logger):
        # input size: number of underlying factors (81)
        # T: number of time steps (10)
        # hidden_size: dimension of the hidden state
        super(lstm, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.T = T

        self.logger = logger

        self.lstm_layer = nn.LSTM(input_size=input_size + 1, hidden_size=hidden_size, num_layers=1)
        self.linear = nn.Linear(in_features=hidden_size, out_features=1)

    def forward(self, input_data, y_history):
        # hidden, cell: initial states with dimention hidden_size
        hidden = self.init_hidden(input_data)  # 1 * batch_size * hidden_size
        cell = self.init_hidden(input_data)
        lstm_states = (hidden, cell)

        for t in range(self.T - 1):
            x = torch.cat((input_data[:, t, :],
                           y_history[:, t].unsqueeze(1)), dim=1)
            # Fix the warning about non-contiguous memory
            # see https://discuss.pytorch.org/t/dataparallel-issue-with-flatten-parameter/8282
            self.lstm_layer.flatten_parameters()
            _, lstm_states = self.lstm_layer(x.unsqueeze(0), lstm_states)

            hidden = lstm_states[0]
            cell = lstm_states[1]
        y_pred = self.linear(hidden[0])
        return y_pred

    def init_hidden(self, x):
        # No matter whether CUDA is used, the returned variable will have the same type as x.
        return Variable(x.data.new(1, x.size(0), self.hidden_size).zero_())  # dimension 0 is the batch dimension


# Train the model
class da_rnn:
    def __init__(self, file_data, logger, encoder_hidden_size=20, decoder_hidden_size=32, T=10,
                 learning_rate=0.00001, batch_size=128, parallel=True, debug=False):
        self.T = T
        dat = pd.read_csv(file_data)
        len_dat = dat.shape[0]
        # dat = dat[0:len_dat / 3]
        # dat = dat[len_dat / 3 - 1:len_dat / 3 * 2]
        # dat = dat[len_dat / 3 * 2 - 1:len_dat]
        # dat = dat[:int(dat.shape[0] * 0.75)]
        self.max = max(dat.NEE)
        self.min = min(dat.NEE)
        dat = (dat - dat.min()) / (dat.max() - dat.min())
        self.dat = dat
        # 数据处理，归一化至0~1之间

        self.logger = logger
        self.logger.info("Shape of data: %s.\nMissing in data: %s.", dat.shape, dat.isnull().sum().sum())

        self.X = dat.loc[:, [x for x in dat.columns.tolist() if x != 'NEE']].as_matrix()
        self.y = np.array(dat.NEE)
        self.batch_size = batch_size

        self.lstm = lstm(input_size=self.X.shape[1], hidden_size=encoder_hidden_size, T=T,
                               logger=logger).cuda()

        if parallel:
            self.encoder = nn.DataParallel(self.encoder)
            self.decoder = nn.DataParallel(self.decoder)
            self.lstm = nn.DataParallel(self.lstm)
        self.lstm_optimizer = optim.Adam(
            params=itertools.ifilter(lambda p: p.requires_grad, self.lstm.parameters()),
            lr=learning_rate)

        # self.train_size = int(self.X.shape[0] / 3 * 2)
        self.train_size = int(self.X.shape[0] * 0.75)
        self.logger.info("Training size: %d.", self.train_size)

    def train(self, n_epochs=100):
        n_epochs = 100
        iter_per_epoch = int(np.ceil(self.train_size * 1. / self.batch_size))
        logger.info("Iterations per epoch: %3.3f ~ %d.", self.train_size * 1. / self.batch_size, iter_per_epoch)
        self.iter_losses = np.zeros(n_epochs * iter_per_epoch)
        self.epoch_losses = np.zeros(n_epochs)
        self.loss_func = nn.MSELoss()

        n_iter = 0
        for i in range(n_epochs):
            perm_idx = np.random.permutation(self.train_size - self.T)
            j = 0
            while j < self.train_size:
                batch_idx = perm_idx[j:(j + self.batch_size)]
                X = np.zeros((len(batch_idx), self.T - 1, self.X.shape[1]))
                y_history = np.zeros((len(batch_idx), self.T - 1))
                y_target = self.y[batch_idx + self.T]

                for k in range(len(batch_idx)):
                    X[k, :, :] = self.X[batch_idx[k]: (batch_idx[k] + self.T - 1), :]
                    y_history[k, :] = self.y[batch_idx[k]: (batch_idx[k] + self.T - 1)]

                loss = self.train_iteration(X, y_history, y_target)
                self.iter_losses[i * iter_per_epoch + j / self.batch_size] = loss

                j += self.batch_size
                n_iter += 1

                # if n_iter % 10 == 0 and n_iter > 0:
                #     for param_group in self.lstm_optimizer.param_groups:
                #         param_group['lr'] = param_group['lr'] * 0.9

                '''
                if learning_rate > self.learning_rate:
                    for param_group in self.encoder_optimizer.param_groups:
                        param_group['lr'] = param_group['lr'] * .9
                    for param_group in self.decoder_optimizer.param_groups:
                        param_group['lr'] = param_group['lr'] * .9
                    learning_rate *= .9
                '''

            self.epoch_losses[i] = np.mean(self.iter_losses[range(i * iter_per_epoch, (i + 1) * iter_per_epoch)])
            if i % 10 == 0:
                print("Epoch %d, loss: %3.3f." % (i, self.epoch_losses[i]))
                continue

            if i % 10 == 0:
                y_train_pred = self.predict(on_train=True)
                y_test_pred = self.predict(on_train=False)
                y_pred = np.concatenate((y_train_pred, y_test_pred))

    def train_iteration(self, X, y_history, y_target):
        self.lstm_optimizer.zero_grad()

        y_pred = self.lstm(Variable(torch.from_numpy(X).type(torch.FloatTensor).cuda()), Variable(torch.from_numpy(y_history).type(torch.FloatTensor).cuda()))
        y_true = Variable(torch.from_numpy(y_target).type(torch.FloatTensor).cuda())
        loss = self.loss_func(y_pred.squeeze(1), y_true)
        loss.backward()

        self.lstm_optimizer.step()

        print("MSE: %s, loss: %s." % (loss.data, (y_pred[:, 0] - y_true).pow(2).mean()))

        return loss.item()

    def predict(self, on_train=False):
        if on_train:
            y_pred = np.zeros(self.train_size - self.T + 1)
        else:
            y_pred = np.zeros(self.X.shape[0] - self.train_size)

        i = 0
        while i < len(y_pred):
            batch_idx = np.array(range(len(y_pred)))[i: (i + self.batch_size)]
            X = np.zeros((len(batch_idx), self.T - 1, self.X.shape[1]))
            y_history = np.zeros((len(batch_idx), self.T - 1))
            for j in range(len(batch_idx)):
                if on_train:
                    X[j, :, :] = self.X[range(batch_idx[j], batch_idx[j] + self.T - 1), :]
                    y_history[j, :] = self.y[range(batch_idx[j], batch_idx[j] + self.T - 1)]
                else:
                    X[j, :, :] = self.X[
                                 range(batch_idx[j] + self.train_size - self.T, batch_idx[j] + self.train_size - 1), :]
                    y_history[j, :] = self.y[
                        range(batch_idx[j] + self.train_size - self.T, batch_idx[j] + self.train_size - 1)]

            y_history = Variable(torch.from_numpy(y_history).type(torch.FloatTensor).cuda())
            y_pred[i:(i + self.batch_size)] = self.lstm(Variable(torch.from_numpy(X).type(torch.FloatTensor).cuda()), y_history).cpu().data.numpy()[:, 0]
            i += self.batch_size
        return y_pred

def mean_absolute_percentage_error(y_true, y_pred):
    y_true, y_pred = np.array(y_true), np.array(y_pred)
    return np.mean(np.abs((y_true - y_pred) / y_true)) * 100


model = da_rnn(file_data='GF-Guy_HH_TOTAL.csv', logger=logger, parallel=False,
               learning_rate=.001)
model.train(n_epochs = 20)

y_train = model.predict(on_train=True)
y_train_true = model.y[:model.train_size - model.T + 1]

y_pred = model.predict()
y_pred_true = model.y[model.train_size:]


def inverse(data, max, min):
    for i in range(len(data)):
        data[i] *= (max - min)
        data[i] += min
    return data


y_train = inverse(y_train, model.max, model.min)
y_train_true = inverse(y_train_true, model.max, model.min)
y_pred = inverse(y_pred, model.max, model.min)
y_pred_true = inverse(y_pred_true, model.max, model.min)

print("train rmse: ", numpy.sqrt(mean_squared_error(y_train, y_train_true)))
print("train mae: ", mean_absolute_error(y_train, y_train_true))
print("train mape: ", mean_absolute_percentage_error(y_train_true, y_train))
print("train r square: ", get_r2_numpy(y_train, y_train_true))
print("train corr: ", np.corrcoef(y_train, y_train_true))

print("predict rmse: ", numpy.sqrt(mean_squared_error(y_pred, y_pred_true)))
print("predict mae: ", mean_absolute_error(y_pred, y_pred_true))
print("predict mape: ", mean_absolute_percentage_error(y_pred_true, y_pred))
print("predict r square: ", get_r2_numpy(y_pred, y_pred_true))
print("predict corr: ", np.corrcoef(y_pred, y_pred_true))

true_list = numpy.array(numpy.concatenate((y_train_true, y_pred_true)))
pred_list = numpy.array(numpy.concatenate((y_train, y_pred)))

print("true_list: " + str(list(true_list)))
print("pred_list: " + str(list(pred_list)))

fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(range(len(true_list)), true_list, c="black")
ax.plot(range(len(pred_list)), pred_list, c="red")
x_ticks = ax.set_xticks(range(len(true_list) / 24, len(true_list), len(true_list) / 12))
x_labels = ax.set_xticklabels(range(1, 13, 1), fontsize="small")
ax.set_xlabel("Month")
ax.set_ylabel("NEE(gCO2 m-2 d-1)")
ax.legend()
plt.show()
