# coding=utf-8

import os
from tools import *
from LSTM_FC import *
from LSTM_gai import *
import pandas as pd


def run_lstm_model(train, test, grid_num, time_chunk_size, scalar, learning_rate, num_epochs, output_path):
    y_train = np.array(train['counts'], dtype=np.float32).reshape(len(train))
    y_test = np.array(test['counts'], dtype=np.float32).reshape(len(test))

    results = []

    x_train_lstm = np.array(train[['last1', 'last2', 'last3', 'last4', 'last5', 'last6']],
                            dtype=np.float32).reshape((grid_num, 1, int(len(train) / grid_num), time_chunk_size))
    x_test_lstm = np.array(test[['last1', 'last2', 'last3', 'last4', 'last5', 'last6']],
                           dtype=np.float32).reshape((grid_num, 1, int(len(test) / grid_num), time_chunk_size))

    lstm_train_out, lstm_predict, lstm_loss_changes = \
        LSTM.train_and_predict(x_train_lstm, y_train, x_test_lstm, y_test,
                               grid_num, time_chunk_size, learning_rate, num_epochs)

    save_excel([lstm_loss_changes], output_path+'loss.csv')
    results.append(validate(y_test * scalar, lstm_predict * scalar))
    save_excel(results, output_path+'result.csv')


def run_comb_model(train, test, grid_num, time_chunk_size, scalar, learning_rate, num_epochs, output_path):
    y_train = np.array(train['counts'], dtype=np.float32).reshape(len(train))
    y_test = np.array(test['counts'], dtype=np.float32).reshape(len(test))

    results = []

    x_train_lstm = np.array(train[['last1', 'last2', 'last3', 'last4', 'last5', 'last6']],
                            dtype=np.float32).reshape((grid_num, 1, int(len(train) / grid_num), time_chunk_size))
    x_test_lstm = np.array(test[['last1', 'last2', 'last3', 'last4', 'last5', 'last6']],
                           dtype=np.float32).reshape((grid_num, 1, int(len(test) / grid_num), time_chunk_size))
    x_train_fc = np.array(train.drop(['last1', 'last2', 'last3', 'last4', 'last5', 'last6', 'counts'], axis=1))
    x_test_fc = np.array(test.drop(['last1', 'last2', 'last3', 'last4', 'last5', 'last6', 'counts'], axis=1))

    combine_train_out, combine_predict, combine_loss_changes = \
        LSTM_FC.train_and_predict(x_train_lstm, x_train_fc, y_train, x_test_lstm, x_test_fc, y_test,
                                  grid_num, time_chunk_size, learning_rate, num_epochs)

    save_excel([combine_loss_changes], output_path+'loss.csv')
    results.append(validate(y_test * scalar, combine_predict * scalar))
    save_excel(results, output_path+'result.csv')


def load_xm(num, interval, time_chunk_size):
    max_longitude = 118.2007
    min_longitude = 118.0635
    max_latitude = 24.5664
    min_latitude = 24.4214
    month_length = 31
    file_path = './data/'
    # 判断是否预处理过
    if not os.path.exists(file_path + str(num) + '_' + str(interval) + 'business_step1.csv'):
        from business_feature import business_step1, business_step2
        business_step1(num, max_longitude, min_longitude, max_latitude, min_latitude, interval)
        business_step2(num, interval, month_length, time_chunk_size)
    if not os.path.exists(file_path + str(num) + 'counts_POI.csv'):
        from process_POI import poi_process
        poi_process(num, max_longitude, min_longitude, max_latitude, min_latitude)
    if not os.path.exists(file_path + 'weather_data.csv'):
        from process_weather import weather_process
        weather_process()

    # 读取数据
    business = pd.read_csv(file_path + str(num) + '_' + str(interval) + 'business_final.csv')
    poi = pd.read_csv(file_path + str(num) + 'counts_POI.csv')
    weather = pd.read_csv(file_path + 'weather_data.csv')
    weather = weather_transform(weather)
    data = pd.merge(business, poi, how='left').fillna(0)
    data = pd.merge(data, weather, how='left', on=['month_day', 'hour']).fillna(0)
    business_step1 = pd.read_csv('./data/' + str(num) + '_' + str(interval) + 'business_step1.csv')
    scalar = max(business_step1['counts']) - min(business_step1['counts'])
    data = data.drop('date', axis=1)
    return data, scalar


def embed(data, split_date, embedding_cols, embedding_dims):
    embedding_result = [data]
    for col, dim in zip(embedding_cols, embedding_dims):
        embedding_result.append(pd.DataFrame(embedding(data[col].map(int), dim)))
    data = pd.concat(embedding_result, axis=1)

    train = data[data.month_day <= split_date]
    test = data[data.month_day > split_date]

    if embed:
        train = train.drop(embedding_cols, axis=1)
        test = test.drop(embedding_cols, axis=1)
    return train, test


def main():

    place = 'cd'
    if place == 'xm':
        num = 20
        interval = 60
        time_chunk_size = 6
        data, scalar = load_xm(num, interval, time_chunk_size)
    elif place == 'cd':
        num = 100
        interval = 60
        time_chunk_size = 6
        data = pd.read_csv('./clean_data/' + 'temp')
        data = data.drop('time',axis=1)
        scalar = 1397

    '''
    data = data.drop(['Food', 'Hotel', 'Transport', 'Life',
                      'Attractions', 'Entertainment', 'Sport', 'Education', 'Media',
                      'Medical', 'Shopping', 'Car_Service', 'Financial', 'Estate', 'Company',
                      'Government', 'Gateway', 'Natural_features'], axis=1)
    '''

    # 基础通用设置
    learning_rate = 0.01
    num_epochs = 500
    split_date = 21
    grid_num = len(data['grid_id'].unique())
    # embed设置
    need_embed = True
    embedding_cols = ['month_day', 'time_chunk', 'grid_id', 'hour']
    embedding_dims = [2, 2, 2, 2]
    # 输出信息设置
    info = str(num) + '_' + str(interval) + place
    output_path = './data/result2/' + info
    if not os.path.exists(output_path):
        os.makedirs(output_path)

    #output_path = './data/result/' + info + '/' + str(i)
    if need_embed:
        train, test = embed(data, split_date, embedding_cols, embedding_dims)
    else:
        train = data[data.month_day <= split_date]
        test = data[data.month_day > split_date]

    run_comb_model(train, test, grid_num, time_chunk_size, scalar, learning_rate, num_epochs, output_path)


