import numpy
from sklearn.model_selection import GridSearchCV
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.wrappers.scikit_learn import KerasClassifier
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
from keras.callbacks import EarlyStopping
from sklearn.preprocessing import MinMaxScaler

# Function to create model, required for KerasClassifier
#def create_model(init_mode='uniform'):
#    # create model
#    model = Sequential()
#    model.add(Dense(64, input_dim=2, init=init_mode, activation='relu'))
#    model.add(Dense(1, init=init_mode, activation='sigmoid'))
#    # Compile model
#    #model.compile(loss='mse', optimizer='adam')
#    model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
     #return model


def create_model():
    # create model
    model = Sequential()
    model.add(Dense(256, input_dim=5, activation='relu'))
    model.add(Dense(128))
    model.add(Dense(64))
    model.add(Dense(32))

    model.add(Dense(1))
    model.add(Activation('relu'))
    # Compile model
    #model.compile(loss='mse', optimizer='adam')

    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model

   
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)


#dataset = numpy.loadtxt("Corntest.csv", delimiter=",",dtype = np.float)
## split into input (X) and output (Y) variables
#X = dataset[:,0:2]
#Y = dataset[:,2]
#print(X)
#print('-'*20)
#print(Y)

# load dataset
filepath = 'Corntest2'+".csv"
dataset = pd.read_csv(filepath, parse_dates=True, index_col='Month')
# split into input (X) and output (Y) variables
scale_x = MinMaxScaler()
scale_y = MinMaxScaler()

X = scale_x.fit_transform(dataset.values[:,:-1])

Y = scale_y.fit_transform(dataset.values[:,-1].reshape(-1,1))

#print(type(dataset.values[:,-1].reshape(-1,1)))
#print(dataset.values[:,-1].reshape(-1,1))
#print(dataset.values[:,-1])


##X = dataset[:,:-1]
##Y = dataset[:,-1] 


#print(X)
#print('-'*20)
#print(type(Y))


##create model
#model = KerasClassifier(build_fn=create_model, nb_epoch=100, batch_size=10, verbose=0)
## define the grid search parameters
#init_mode = ['uniform', 'lecun_uniform', 'normal', 'zero', 'glorot_normal', 'glorot_uniform', 'he_normal', 'he_uniform']
#param_grid = dict(init_mode=init_mode)
#grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1)
#grid_result = grid.fit(X, Y)
## summarize results
#print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
#for params, mean_score, scores in grid_result.grid_scores_:
#    print("%f (%f) with: %r" % (scores.mean(), scores.std(), params))


#model = KerasClassifier(build_fn=create_model, verbose=0)
## define the grid search parameters
#batch_size = [1,2,3,4,5,10, 20, 40, 60, 80, 100]
#epochs = [10,20,30,40,50,60,70,100]
#param_grid = dict(batch_size=batch_size, nb_epoch=epochs)
#grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1)
#grid_result = grid.fit(X, Y)
## summarize results
#print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))


early_stop = EarlyStopping(monitor='loss', patience=2, verbose=1)  


model1 = create_model() 
#model1.fit(X[:-6],Y[:-6],batch_size=10,epochs=100,validation_split=0.1,callbacks=[early_stop])
model1.fit(X[:-6],Y[:-6],batch_size=10,epochs=100)

predict = scale_y.inverse_transform(model1.predict(X[-6:]))
print(predict)
print(scale_y.inverse_transform(Y[-6:]))
mse= mean_squared_error(predict, scale_y.inverse_transform(Y[-6:]))
rmse = mse**0.5
print("重构数据与原数据的  rmse " + str(rmse))