import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import time

import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import logging
import utils
from sklearn import preprocessing

log_filename = 'tensorflow.log'

logging.basicConfig(filename=log_filename, filemode='a', level=logging.DEBUG)


# Step 1: read the data from data.xlsx
data, n_samples = utils.read_excle('data.xlsx')

# Step 2: create placeholders for X (birth rate) and Y (life expectancy)
X0 = tf.placeholder(tf.float32, name='X0')
X1 = tf.placeholder(tf.float32, name='X1')
X2 = tf.placeholder(tf.float32, name='X2')
X3 = tf.placeholder(tf.float32, name='X3')
X4 = tf.placeholder(tf.float32, name='X4')
X5 = tf.placeholder(tf.float32, name='X5')
X6 = tf.placeholder(tf.float32, name='X6')
X7 = tf.placeholder(tf.float32, name='X7')
X8 = tf.placeholder(tf.float32, name='X8')
X9 = tf.placeholder(tf.float32, name='X9')
X10 = tf.placeholder(tf.float32, name='X10')
X11 = tf.placeholder(tf.float32, name='X11')
X12 = tf.placeholder(tf.float32, name='X12')
X13 = tf.placeholder(tf.float32, name='X13')
X14 = tf.placeholder(tf.float32, name='X14')
X15 = tf.placeholder(tf.float32, name='X15')
X16 = tf.placeholder(tf.float32, name='X16')
X17 = tf.placeholder(tf.float32, name='X17')
X18 = tf.placeholder(tf.float32, name='X18')
X19 = tf.placeholder(tf.float32, name='X19')
X20 = tf.placeholder(tf.float32, name='X20')
Y = tf.placeholder(tf.float32, name='Y')

# Step 3: create variables: weights_1, weights_2, bias. All are initialized to 0
a0 = tf.get_variable('weights_0', initializer=tf.constant(0.0))
a1 = tf.get_variable('weights_1', initializer=tf.constant(0.0))
a2 = tf.get_variable('weights_2', initializer=tf.constant(0.0))
a3 = tf.get_variable('weights_3', initializer=tf.constant(0.0))
a4 = tf.get_variable('weights_4', initializer=tf.constant(0.0))
a5 = tf.get_variable('weights_5', initializer=tf.constant(0.0))
a6 = tf.get_variable('weights_6', initializer=tf.constant(0.0))
a7 = tf.get_variable('weights_7', initializer=tf.constant(0.0))
a8 = tf.get_variable('weights_8', initializer=tf.constant(0.0))
a9 = tf.get_variable('weights_9', initializer=tf.constant(0.0))
a10 = tf.get_variable('weights_10', initializer=tf.constant(0.0))
a11 = tf.get_variable('weights_11', initializer=tf.constant(0.0))
a12 = tf.get_variable('weights_12', initializer=tf.constant(0.0))
a13 = tf.get_variable('weights_13', initializer=tf.constant(0.0))
a14 = tf.get_variable('weights_14', initializer=tf.constant(0.0))
a15 = tf.get_variable('weights_15', initializer=tf.constant(0.0))
a16 = tf.get_variable('weights_16', initializer=tf.constant(0.0))
a17 = tf.get_variable('weights_17', initializer=tf.constant(0.0))
a18 = tf.get_variable('weights_18', initializer=tf.constant(0.0))
a19 = tf.get_variable('weights_19', initializer=tf.constant(0.0))
a20 = tf.get_variable('weights_20', initializer=tf.constant(0.0))
bias = tf.get_variable('bias', initializer=tf.constant(0.0))

scaler = preprocessing.StandardScaler().fit(data)
x_data_standard = scaler.transform(data)
# Step 4: predict Y (number of theft) from the number of fire
Y_predicted = a0*X0 + a1*X1 + a2*X2 + a3*X3 + a4*X4 + a5*X5 + a6*X6 + a7*X7 \
 + a8*X8 + a9*X9 + a10*X10 + a12*X12 + a13*X13 + a14*X14 + a15*X15 + a16*X16 + a17*X17 + a18*X18 + a19*X19 +\
 a20*X20 + bias

# Step 5: use the squared error as the loss function
# you can use either mean squared error or Huber loss
loss = tf.square(Y - Y_predicted, name='loss')


# Step 6: using gradient descent with learning rate of 0.001 to minimize loss
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(loss)


start = time.time()
writer = tf.summary.FileWriter('./graphs/main_graph', tf.get_default_graph())
with tf.Session() as sess:
	# Step 7: initialize the necessary variables, in this case, w and b
	sess.run(tf.global_variables_initializer()) 
	
	# Step 8: train the model for 100 epochs
	for i in range(100): 
		total_loss = 0
		for x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20,y in x_data_standard:
			# Session execute optimizer and fetch values of loss
			_, l = sess.run([optimizer, loss], feed_dict={X0: x0, X1: x1, X2: x2, X3: x3, \
				X4: x4, X5: x5, X6: x6, X7: x7, X8: x8, X9: x9, X10: x10, X11: x11, X12: x12, X13: x13,\
				X14: x14, X15: x15, X16: x16, X17: x17, X18: x18, X19: x19, X20: x20, Y:y}) 
			total_loss += l
		logging.info('Epoch {0}: {1}'.format(i, total_loss/n_samples))

	# close the writer when you're done using it
	writer.close() 
	
	# Step 9: output the values of w and b
	a0_out, a1_out, a2_out, a3_out, a4_out, a5_out, a6_out, a7_out, a8_out, a9_out, \
	a10_out, a11_out, a12_out,a13_out, a14_out, a15_out, a16_out, a17_out, a18_out, a19_out, a20_out, bias = \
	sess.run([a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, bias]) 

logging.info('Took: %f seconds' %(time.time() - start))
logging.info("%f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f"%(a0_out, a1_out, a2_out, a3_out, a4_out, a5_out, a6_out, a7_out, a8_out, a9_out, a10_out, a11_out, a12_out,a13_out, a14_out, a15_out, a16_out, a17_out, a18_out, a19_out, a20_out, bias))
logging.info("ok")
