# coding=utf-8

import os
import pandas as pd
import numpy as np
from business_feature import *
from process_POI import *
from process_weather import *
from sklearn import preprocessing
from LSTM import *
from FC import *
from tools import *


# 分格子数，经纬度范围
num = 20
max_longitude = 118.2007
min_longitude = 118.0635
max_latitude = 24.5664
min_latitude = 24.4214
# 时间块长度，单位分钟
interval = 60
month_length = 31
time_chunk_size = 6
info = str(num)+'_'+str(interval)+'weather_xm'
file_path = './data/'
output_path = './data/result/'+info
if not os.path.exists(output_path):
    os.makedirs('./data/result/'+info)

# 判断是否预处理过
if not os.path.exists(file_path+str(num)+'_'+str(interval)+'business_step1.csv'):
    business_step1(num, max_longitude, min_longitude, max_latitude, min_latitude, interval)
    business_step2(num, interval, month_length, time_chunk_size)
if not os.path.exists(file_path+str(num)+'counts_POI.csv'):
    poi_process(num, max_longitude, min_longitude, max_latitude, min_latitude)
if not os.path.exists(file_path+'weather_data.csv'):
    weather_process()

# 读取数据
business = pd.read_csv(file_path + str(num)+'_'+str(interval)+ 'business_final.csv')
grid_num = len(business['grid_id'].unique())
poi = pd.read_csv(file_path + str(num) + 'counts_POI.csv')
weather = pd.read_csv(file_path + 'weather_data.csv')
# 天气转换为4维one-hot
# weather = weather_transform(weather)
data = pd.merge(business, poi, how='left').fillna(0)
data = pd.merge(data, weather, how='left', on=['month_day', 'hour']).fillna(0)
business_step1 = pd.read_csv('./data/'+str(num)+'_'+str(interval)+'business_step1.csv')
scalar = max(business_step1['counts']) - min(business_step1['counts'])

embed = False
'''
# embedding
embed = True
embedding_cols = ['month_day', 'time_chunk', 'grid_id', 'week_day', 'hour']
embedding_dims = [5, 5, 5, 2, 5]
embedding_result = [data]
for col, dim in zip(embedding_cols, embedding_dims):
    embedding_result.append(pd.DataFrame(embedding(data[col].map(int), dim)))
data = pd.concat(embedding_result, axis=1)
'''
# 训练、测试集划分
train = data[data.month_day <= 21]
test = data[data.month_day > 21]

# if embed:
#    data = data.drop(embedding_cols, axis=1)

y_train = np.array(train['counts'], dtype=np.float32).reshape(len(train))
y_test = np.array(test['counts'], dtype=np.float32).reshape(len(test))

results = []

# lstm训练和预测

x_train = np.array(train[['last1', 'last2', 'last3', 'last4', 'last5', 'last6']],
                   dtype=np.float32).reshape((grid_num, 1, int(len(train) / grid_num), time_chunk_size))
x_test = np.array(test[['last1', 'last2', 'last3', 'last4', 'last5', 'last6']],
                  dtype=np.float32).reshape((grid_num, 1, int(len(test) / grid_num), time_chunk_size))

lstm_learning_rate = 0.02
lstm_num_epochs = 10
lstm_train_out, lstm_predict, loss_changes = LSTM.train_and_predict(x_train, y_train, x_test, y_test, grid_num, time_chunk_size, lstm_learning_rate, lstm_num_epochs)
save_excel(loss_changes, output_path+'/'+info+'lstm_loss.csv')
results.append(validate(y_test*scalar, lstm_predict*scalar))

# fc训练和预测
x_train_fc = train.drop(['last1', 'last2', 'last3', 'last4', 'last5', 'last6', 'date', 'counts'], axis=1)
x_test_fc = test.drop(['last1', 'last2', 'last3', 'last4', 'last5', 'last6', 'date', 'counts'], axis=1)


# 标准化
'''
scaler = preprocessing.StandardScaler().fit(x_train_fc)
x_train_fc = scaler.fit(x_train_fc)
x_test_fc = scaler.fit(x_test_fc)
'''

x_train_fc.insert(0, 'counts', lstm_train_out)
x_test_fc.insert(0, 'counts', lstm_predict)
x_train_fc = np.array(x_train_fc, dtype=np.float32)
x_test_fc = np.array(x_test_fc, dtype=np.float32)

fc_learning_rate = 0.02
fc_num_epochs = 10

fc_predict, loss_changes = FC.train_and_predict(x_train_fc, y_train, x_test_fc, y_test, lstm_learning_rate, lstm_num_epochs)
save_excel(loss_changes, output_path+'/'+info+'fc_loss.csv')
results.append(validate(y_test*scalar, fc_predict*scalar))


save_excel(results, output_path+'/'+info+'.csv')

