import pandas as pd
import numpy as np
#from LSTM_gai import *
#from LSTM_FC_main2 import *
import os
from tools import *
import torch
from torch import nn, optim

'''
data = pd.read_csv('./clean_data/60_chengdu_data_gai')
#data1 = data[['grid_id','counts']]
#temp = data1.groupby(['grid_id']).sum().reset_index().sort_values(by='counts')
grid_id_rest = [5061,5163,5159,5660,5960,5862,5564,5458,5062,5758,5658, 5264, 5859 ,5358 ,5863,
 5761, 5762 ,5560 ,5662, 5164, 5161, 5558 ,5860 ,5160 ,5464 ,5162, 5763 ,5759, 5364 ,5259,
 5263, 5462 ,5559 ,5659 ,5463 ,5562 ,5563 ,5362 ,5460 ,5661 ,5262 ,5461 ,5459 ,5363 ,5561,
 5359, 5360 ,5260 ,5361 ,5261]
grid_id = data['grid_id'].unique()
tmp = list(set(grid_id) ^ set(grid_id_rest))
data['grid_id'].replace(tmp, np.nan,inplace=True)
data.dropna(inplace=True)
data.to_csv('./clean_data/temp',index=False)
'''

class LSTM_FC(torch.nn.Module):
    def __init__(self, ln, lc, time_chunk_size, in_dim, n_hidden_1, n_hidden_2, out_dim):
        super(LSTM_FC, self).__init__()
        self.lstm = torch.nn.LSTM(input_size=time_chunk_size, hidden_size=ln, num_layers=lc)
        lstm_out_len = 6
        self.layer1 = nn.Sequential(nn.Linear(in_dim+lstm_out_len, n_hidden_1), nn.ReLU(True))
        self.layer2 = nn.Sequential(nn.Linear(n_hidden_1, n_hidden_2), nn.ReLU(True))
        self.layer3 = nn.Sequential(nn.Linear(n_hidden_2, out_dim))

    def forward(self, x_lstm, x_fc):
        out, _ = self.lstm(x_lstm)
        s, b, h = out.shape
        out = out.view(s * b, h)
        x_fc = torch.cat([out, x_fc.type_as(out)], dim=1)
        if torch.cuda.is_available():
            x_fc = x_fc.cuda()
        x_fc = self.layer1(x_fc)
        x_fc = self.layer2(x_fc)
        x_fc = self.layer3(x_fc)
        return x_fc

    @staticmethod
    def train_and_predict(x_train_lstm, x_train_fc, y_train, x_test_lstm, x_test_fc, y_test,
                          grid_num, time_chunk_size, learning_rate, num_epochs):
        x_train_lstm = torch.from_numpy(x_train_lstm)
        x_train_fc = torch.from_numpy(x_train_fc)
        y_train = torch.from_numpy(y_train)
        x_test_lstm = torch.from_numpy(x_test_lstm)
        x_test_fc = torch.from_numpy(x_test_fc)
        y_test = torch.from_numpy(y_test)

        model = LSTM_FC(6, 2, time_chunk_size, x_train_fc.shape[1], 32, 4, 1)
        if torch.cuda.is_available():
            model = model.cuda()
            x_train_lstm = x_train_lstm.cuda()
            x_train_fc = x_train_fc.cuda()
            y_train = y_train.cuda()
            x_test_lstm = x_test_lstm.cuda()
            x_test_fc = x_test_fc.cuda()
            y_test = y_test.cuda()

        criterion = nn.MSELoss()
        optimizer = optim.Adam(model.parameters(), lr=learning_rate)
        loss_changes = []

        lstm_train_out = None
        for epoch in range(num_epochs):

            lstm_train_out = model.forward(x_train_lstm, x_train_fc)

            loss = criterion(lstm_train_out, y_train.unsqueeze(1))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # if epoch % 100 == 0:
            print('Epoch[{}/{}], loss:{:.6f}'.format(epoch + 1, num_epochs, loss.item()))
            lstm_test_out = model.forward(x_test_lstm, x_test_fc)
            loss_test = criterion(lstm_test_out, y_test.unsqueeze(1))
            loss_changes.append([loss.item(), loss_test.item()])

        model.eval()

        lstm_test_out = model.forward(x_test_lstm, x_test_fc)
        lstm_test_out = lstm_test_out.cpu().data.numpy()
        lstm_train_out = lstm_train_out.cpu().data.numpy()

        return lstm_train_out, lstm_test_out, loss_changes

class LSTM(torch.nn.Module):
    def __init__(self, ln, lc, time_chunk_size):
        super(LSTM, self).__init__()
        self.lstm = torch.nn.LSTM(input_size=time_chunk_size, hidden_size=ln, num_layers=lc)
        self.reg = nn.Linear(ln, 1)
    def forward(self, x):
        out, _ = self.lstm(x)
        s, b, h = out.shape
        out = out.view(s*b, h)
        out = self.reg(out)
        out = out.view(b)
        return out
    @staticmethod
    def train_and_predict(x_train, y_train, x_test, y_test, grid_num, time_chunk_size, learning_rate, num_epochs):
        x_train = torch.from_numpy(x_train)
        y_train = torch.from_numpy(y_train)
        x_test = torch.from_numpy(x_test)
        y_test = torch.from_numpy(y_test)

        model = LSTM(4, 2, time_chunk_size)
        if torch.cuda.is_available():
            model = model.cuda()
            x_train = x_train.cuda()
            y_train = y_train.cuda()
            x_test = x_test.cuda()
            y_test = y_test.cuda()

        criterion = nn.MSELoss()
        optimizer = optim.Adam(model.parameters(), lr=learning_rate)
        loss_changes = []

        lstm_train_out = None
        for epoch in range(num_epochs):

            lstm_train_out = model.forward(x_train)

            loss = criterion(lstm_train_out, y_train)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # if epoch % 100 == 0:
            print('Epoch[{}/{}], loss:{:.6f}'.format(epoch + 1, num_epochs, loss.item()))
            lstm_test_out = model.forward(x_test)
            loss_test = criterion(lstm_test_out, y_test)
            loss_changes.append([loss.item(), loss_test.item()])

        model.eval()

        lstm_test_out = model.forward(x_test)
        lstm_test_out = lstm_test_out.cpu().data.numpy()
        lstm_train_out = lstm_train_out.cpu().data.numpy()

        return lstm_train_out, lstm_test_out, loss_changes


def run_comb_model(train, test, grid_num, time_chunk_size, scalar, learning_rate, num_epochs, output_path):
    y_train = np.array(train['counts'], dtype=np.float32).reshape(len(train))
    y_test = np.array(test['counts'], dtype=np.float32).reshape(len(test))

    results = []

    x_train_lstm = np.array(train[['last1', 'last2', 'last3', 'last4', 'last5', 'last6']],
                            dtype=np.float32).reshape((1, int(len(train) / grid_num), time_chunk_size))
    x_test_lstm = np.array(test[['last1', 'last2', 'last3', 'last4', 'last5', 'last6']],
                           dtype=np.float32).reshape((1, int(len(test) / grid_num), time_chunk_size))
    x_train_fc = np.array(train.drop(['last1', 'last2', 'last3', 'last4', 'last5', 'last6', 'counts'], axis=1))
    x_test_fc = np.array(test.drop(['last1', 'last2', 'last3', 'last4', 'last5', 'last6', 'counts'], axis=1))

    combine_train_out, combine_predict, combine_loss_changes = \
        LSTM_FC.train_and_predict(x_train_lstm, x_train_fc, y_train, x_test_lstm, x_test_fc, y_test,
                                  grid_num, time_chunk_size, learning_rate, num_epochs)

    save_excel([combine_loss_changes], output_path+'loss.csv')
    results.append(validate(y_test * scalar, combine_predict * scalar))
    save_excel(results, output_path+'result.csv')

def run_lstm_model(train, test, grid_num, time_chunk_size, scalar, learning_rate, num_epochs, output_path):
    y_train = np.array(train['counts'], dtype=np.float32).reshape(len(train))
    y_test = np.array(test['counts'], dtype=np.float32).reshape(len(test))

    results = []

    x_train_lstm = np.array(train[['last1', 'last2', 'last3', 'last4', 'last5', 'last6']],
                            dtype=np.float32).reshape((1, int(len(train) / grid_num), time_chunk_size))
    x_test_lstm = np.array(test[['last1', 'last2', 'last3', 'last4', 'last5', 'last6']],
                           dtype=np.float32).reshape((1, int(len(test) / grid_num), time_chunk_size))

    lstm_train_out, lstm_predict, lstm_loss_changes = \
        LSTM.train_and_predict(x_train_lstm, y_train, x_test_lstm, y_test,
                               grid_num, time_chunk_size, learning_rate, num_epochs)

    save_excel([lstm_loss_changes], output_path+'lstm_loss.csv')
    results.append(validate(y_test * scalar, lstm_predict * scalar))
    save_excel(results, output_path+'lstm_result.csv')

def embed(data, split_date, embedding_cols, embedding_dims):
    embedding_result = [data]
    for col, dim in zip(embedding_cols, embedding_dims):
        embedding_result.append(pd.DataFrame(embedding(data[col].map(int), dim)))
    data = pd.concat(embedding_result, axis=1)

    train = data[data.month_day <= split_date]
    test = data[data.month_day > split_date]

    if embed:
        train = train.drop(embedding_cols, axis=1)
        test = test.drop(embedding_cols, axis=1)
    return train, test

def main():
    '''
    place = 'cd'
    if place == 'xm':
        num = 20
        interval = 60
        time_chunk_size = 6
        data, scalar = load_xm(num, interval, time_chunk_size)
    elif place == 'cd':
    '''
    num = 100
    interval = 60
    time_chunk_size = 6
    data = pd.read_csv('./clean_data/' + 'temp')
    data = data.drop('time', axis=1)
    data = data[data['grid_id'] == 5061]
    scalar = 1398

    '''
    data = data.drop(['Food', 'Hotel', 'Transport', 'Life',
                      'Attractions', 'Entertainment', 'Sport', 'Education', 'Media',
                      'Medical', 'Shopping', 'Car_Service', 'Financial', 'Estate', 'Company',
                      'Government', 'Gateway', 'Natural_features'], axis=1)
    '''

    # 基础通用设置
    learning_rate = 0.01
    num_epochs = 3000
    split_date = 21
    grid_num = len(data['grid_id'].unique())
    # embed设置
    need_embed = True
    embedding_cols = ['month_day', 'time_chunk', 'grid_id', 'hour']
    embedding_dims = [2, 2, 2, 2]
    # 输出信息设置
    info = str(num) + '_' + str(interval) + 'temp'
    output_path = './data/result2/' + info
    if not os.path.exists(output_path):
        os.makedirs(output_path)

    #output_path = './data/result/' + info + '/' + str(i)
    if need_embed:
        train, test = embed(data, split_date, embedding_cols, embedding_dims)
    else:
        train = data[data.month_day <= split_date]
        test = data[data.month_day > split_date]

    run_lstm_model(train, test, grid_num, time_chunk_size, scalar, learning_rate, num_epochs, output_path)

main()