import sklearn

import numpy as np
import matplotlib.pyplot as plt
from keras import Sequential
from keras.layers import LSTM, Dense
from pandas import read_csv
# import math
# from keras.models import Sequential
# from keras.layers import Dense
# from keras.layers import LSTM
# from sklearn.preprocessing import MinMaxScaler
# from sklearn.metrics import mean_squared_error
#%matplotlib inline


# load the dataset
from sklearn.preprocessing import MinMaxScaler, robust_scale
import seaborn as sns
color = sns.color_palette()
sns.set_style('darkgrid')
from scipy import stats, random
from scipy.stats import norm, skew

# 首先要明确有多少特征,哪些是连续的,哪些是类别的。
# 检查有没有缺失值,对确实的特征选择恰当方式进行弥补,使数据完整。
# 对连续的数值型特征进行标准化,使得均值为0,方差为1。
# 对类别型的特征进行one-hot编码。
# 将需要转换成类别型数据的连续型数据进行二值化。
# 为防止过拟合或者其他原因,选择是否要将数据进行正则化。
# 在对数据进行初探之后发现效果不佳,可以尝试使用多项式方法,寻找非线性的关系。
# 根据实际问题分析是否需要对特征进行相应的函数转换。

def np_move_avg(a,n,mode="same"):
    return(np.convolve(a, np.ones((n,))/n, mode=mode))

dataframe = read_csv('S1.csv', usecols=[2,3,6], engine='python', skipfooter=3)
dataset = dataframe.values

# 将整型变为float
dataset = dataset.astype('float32')
ns = np.random.randint(1000,20000);


dp = dataset[:,2]
dp=np.random.choice(dp,1000)
dp=dp.reshape(-1,1)
print(np.mean(dp))
print(np.std(dp))
print(np.mean(dp))

plt.boxplot(dp)
plt.grid()
plt.show()

sns.distplot(dp,fit=norm)
plt.show()

# Get the fitted parameters used by the function
(mu, sigma) = norm.fit(dp)
print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))

#Now plot the distribution
plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
            loc='best')
plt.ylabel('Frequency')
plt.title('cnt distribution')

#Get also the QQ-plot
fig = plt.figure()
res = stats.probplot(dp, plot=plt)
plt.show()

avg = np.average(dp)
dp = robust_scale(dp)
scaler = MinMaxScaler(feature_range=(avg-1,avg+1))
dp = scaler.fit_transform(dp)
# 4. 特征二值化（Binarization）
binarizer = sklearn.preprocessing.Binarizer(avg)
bin_dp = binarizer.transform(dp.reshape(-1,1))

# 6. 类别特征编码
enc = sklearn.preprocessing.OneHotEncoder()
enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], [1, 0, 2]])
enc.transform([[0, 1, 3]]).toarray() #array([[ 1., 0., 0., 1., 0., 0., 0., 0., 1.]])

ax = plt.figure().add_subplot(111)
ax.plot(dataset[ns:ns+1000,2],label='raw')
ax.plot(np_move_avg(dataset[ns:ns+1000,2],20),'-r',label='avg')
plt.legend()
ax2 = ax.twinx()
ax2.plot(dataset[ns:ns+1000,1],'-',label='fb')

plt.legend()
plt.title('dp')
plt.show()

# X is the number of passengers at a given time (t) and Y is the number of passengers at the next time (t + 1).

# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
    dataX, dataY = [], []
    for i in range(len(dataset)-look_back-1):
        a = dataset[i:(i+look_back), 0]
        dataX.append(a)
        dataY.append(dataset[i + look_back, 0])
    return numpy.array(dataX), numpy.array(dataY)

# fix random seed for reproducibility
numpy.random.seed(7)

# normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)


# split into train and test sets
train_size = int(len(dataset) * 0.67)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
#X=t and Y=t+1 时的数据,并且此时的维度为 [samples, features]
# use this function to prepare the train and test datasets for modeling
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
#投入到 LSTM 的 X 需要有这样的结构： [samples, time steps, features],所以做一下变换
# reshape input to be [samples, time steps, features]
trainX = numpy.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = numpy.reshape(testX, (testX.shape[0], 1, testX.shape[1]))

# 建立 LSTM 模型：
# 输入层有 1 个input,隐藏层有 4 个神经元,输出层就是预测一个值,激活函数用 sigmoid,迭代 100 次,batch size 为 1

# create and fit the LSTM network
model = Sequential()
model.add(LSTM(4, input_shape=(1, look_back)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=2)