from __future__ import print_function
import os.path
import tensorflow as tf
import numpy as np
import sys

from Evaluate import Evaluate

def load_data(path):
  raw_data = np.load(path)
  return raw_data['datas'], raw_data['labels']


def train(data, labels, logdir, save_path, task, learning_rate = 0.1, training_epochs = 300, batch_size = 4, display_step = 1, FEATURE_NUM = 119):
  # tf Graph Input
  x = tf.placeholder(tf.float32, [None, FEATURE_NUM]) # mnist data image of shape 28*28=784
  y = tf.placeholder(tf.float32, [None, 12]) # 0-9 digits recognition => 10 classes

  # Set model weights
  W1 = tf.Variable(tf.random_uniform(shape=[FEATURE_NUM, 300], minval = -1, maxval = 1))
  b1 = tf.Variable(tf.random_uniform(shape=[300], minval = -1, maxval = 1))
    
  W2 = tf.Variable(tf.random_uniform(shape=[300, 200], minval = -1, maxval = 1))
  b2 = tf.Variable(tf.random_uniform(shape=[200], minval = -1, maxval = 1))

  W3 = tf.Variable(tf.random_uniform(shape=[200, 200], minval = -1, maxval = 1))
  b3 = tf.Variable(tf.random_uniform(shape=[200], minval = -1, maxval = 1))
  # W4 = tf.Variable(tf.random_uniform(shape=[200, 100], minval = 0, maxval = 1))
  # b4 = tf.Variable(tf.random_uniform(shape=[100], minval = 0, maxval = 1))
  Wf = tf.Variable(tf.random_uniform(shape=[200, 12], minval = -1, maxval = 1))
  bf = tf.Variable(tf.random_uniform(shape=[12], minval = -1, maxval = 1))
  # Construct model
  p1 = tf.nn.relu(tf.matmul(x, W1) + b1) # Softmax
  p2 = tf.nn.relu(tf.matmul(p1, W2) + b2) # Softmax
  p3 = tf.nn.relu(tf.matmul(p2, W3) + b3) # Softmax
  # p4 = tf.nn.relu(tf.matmul(p3, W4) + b4) # Softmax
  pred = tf.matmul(p2, Wf) + bf # Softmax
  cost = tf.reduce_mean(tf.abs(pred - y))
  # Gradient Descent
  optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
  tf.summary.scalar('cost', cost)
  tf.summary.histogram('W1', W1)
  tf.summary.histogram('b1', b1)
  tf.summary.histogram('Wf', Wf)
  tf.summary.histogram('bf', bf)
  merged_summary_op = tf.summary.merge_all()

  # Initializing the variables
  init = tf.global_variables_initializer()
  saver = tf.train.Saver()  
  if task == 'train':
    # Launch the graph
    with tf.Session() as sess:
        sess.run(init)
        summary_writer = tf.summary.FileWriter(logdir, sess.graph)
        if(os.path.isfile(save_path+'/checkpoint')):
            saver.restore(sess, save_path + '/model.ckpt')  
            print("Model restored from file: %s" % save_path)
        for epoch in range(training_epochs):
            avg_cost = 0

            total_batch = int(data.shape[0] / batch_size)
            for i in range(total_batch):
                batch_xs, batch_ys = (data[i* batch_size: (i+1) * batch_size], labels[i * batch_size: (i+1) * batch_size])
                # Run optimization op (backprop) and cost op (to get loss value)
                _, c, summary_str = sess.run([optimizer, cost, merged_summary_op], feed_dict={x: batch_xs, y: batch_ys})
                # Compute average loss             
                avg_cost += c / total_batch
                if(i == 0):
                  summary_writer.add_summary(summary_str, epoch)
            # Display logs per epoch step
            if (epoch+1) % display_step == 0:
                print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost))
        if(not os.path.exists(save_path)):
          os.makedirs(save_path)
        saver.save(sess, save_path + '/model.ckpt')  
        print("Model saved in file: %s" % save_path) 
        return None 
  else:
      with tf.Session() as sess:
        sess.run(init)
        if(os.path.isfile(save_path+'/checkpoint')):
            saver.restore(sess, save_path + '/model.ckpt')  
            print("Model restored from file: %s" % save_path)
        else:
          print('No model!')
        _pred, _x, _p1 = sess.run([pred, x, b1], feed_dict={x: data})
        # print(_pred)
        return _pred

if __name__ == "__main__":
  PATH = '../data/traindata2.npz'
  TESTPATH = '../data/testdata2.npz'
  ans = []
  if sys.argv[1] == 'train':
    data, labels = load_data(PATH)
    # data = np.concatenate((data[:12],data[20:]), axis = 0)
    # labels = np.concatenate((labels[:,:12,:],labels[:,20:,:]),axis = 1)
  elif sys.argv[1] == 'test':
    data = np.load(TESTPATH)['tests']
  elif sys.argv[1] == 'valid':
    data, labels = load_data(PATH)
    data = data[-2:]
    labels = labels[:,-2:,:]
    print(labels.shape)
  elif sys.argv[1] == 'train_valid':
    data, labels = load_data(PATH)
    # data = np.concatenate((data[:12],data[20:-2]), axis = 0)
    # labels = np.concatenate((labels[:,:12,:],labels[:,20:-2,:]),axis = 1)
    print(labels.shape)    


  for i in range(6):
    print('Dealing with model %d' % i)
    g = tf.Graph()
    with g.as_default():
      # Define operations and tensors in `g`.
      if sys.argv[1] == 'train' or sys.argv[1] == 'train_valid':
        train(data, labels[i], './log/'+str(i), './model/'+str(i), 'train')
      elif sys.argv[1] == 'test':
        ans.append(train(data, None, './log/'+str(i), './model/'+str(i), 'test'))
      elif sys.argv[1] == 'valid':
        ans.append(train(data, labels, './log/'+str(i), './model/'+str(i), 'test'))
  if sys.argv[1] == 'test':
    np.save('./prediction.npy', np.array(ans))
  elif sys.argv[1] == 'valid':
    ans = np.array(ans)
    evaluate = Evaluate()
    MAPE_1 = evaluate.evaluate_test(ans[0:6], labels[0:6])
    # MAPE_2 = evaluate.evaluate_test(ans[6:11], labels[6:11])
    print('travel time MAPE: ' + str(MAPE_1))
    # print('traffic volume MAPE: ' + str(MAPE_2))
