# -*- coding: utf-8 -*-
import time
import sys
import tensorflow as tf
import numpy as np
from tqdm import tqdm
import rnn_rbm
import midi_manipulation

import summaries

batch_size = 100 #The number of trianing examples to feed into the rnn_rbm at a time
epochs_to_save = 5 #The number of epochs to run between saving each checkpoint
saved_weights_path = "parameter_checkpoints/initialized.ckpt" #The path to the initialized weights checkpoint file
SUMMARY_LOG_DIR="SUMMARY_LOGS/RNN"
def main(num_epochs):

    # First, we build the model and get pointers to the model parameters
    x, cost, generate, W, bh, bv, x, lr, Wuh, Wuv, Wvu, Wuu, bu, u0 = rnn_rbm.rnnrbm()
    # 可视化初始直
    with tf.name_scope("initial_parameters"):
        # The trainable variables include the weights and biases of the RNN and the RBM, as well as the initial state of the RNN
        tvars = [W, Wuh, Wuv, Wvu, Wuu, bh, bv, bu, u0]

    # opt_func = tf.train.AdamOptimizer(learning_rate=lr)
    # grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), 1)
    # updt = opt_func.apply_gradients(zip(grads, tvars))

    with tf.name_scope("train"):
        ##梯度优化器
        with tf.name_scope("Optimizer"):
            # The learning rate of the  optimizer is a parameter that we set on a schedule during training
            opt_func = tf.train.GradientDescentOptimizer(learning_rate=lr)
            ##计算部分梯度参数
            with tf.name_scope("ComputeGradients"):
                gvs = opt_func.compute_gradients(cost, tvars)
                ##计算值域范围
                ##详情参见官方API
                with tf.name_scope("clip_by_value"):
                    gvs = [(tf.clip_by_value(grad, -10., 10.), var) for grad, var in gvs]  # We use gradient clipping to prevent gradients from blowing up during training

            with tf.name_scope("ApplyGradiennts"):
                updt = opt_func.apply_gradients(gvs)  # The update step involves applying the clipped gradients to the model parameters

    with tf.name_scope("input_songs"):

        songs = midi_manipulation.get_songs('Pop_Music_Midi')  # Load the songs

    saver=tf.train.Saver(tvars)

    merged = tf.summary.merge_all()

    with tf.Session() as sess:

        summary_writer = tf.summary.FileWriter(SUMMARY_LOG_DIR, sess.graph)

        init=tf.global_variables_initializer()

        sess.run(init)

        saver.restore(sess,saved_weights_path)



        print("begin to training....")

        for epoch in range(num_epochs):

            costs=[]
            start=time.time()

            for s_ind,song in enumerate(songs):

                for i in range(1,len(song),batch_size):

                    run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)

                    run_metadata = tf.RunMetadata()


                    tr_x=song[i:i+batch_size]

                    alpha=min(0.01,0.1/float(i))

                    _,C=sess.run([updt,cost],feed_dict={x:tr_x,lr:alpha},options=run_options,run_metadata=run_metadata)

                    summary=sess.run(merged,{x:tr_x,lr:alpha})

                    costs.append(C)
                    summary_writer.add_summary(summary, epoch)
            #Print the progress at epoch
            print "epoch: {} cost: {} time: {}".format(epoch, np.mean(costs), time.time()-start)
            print
            #Here we save the weights of the model every few epochs
            if (epoch + 1) % epochs_to_save == 0:
                saver.save(sess, "parameter_checkpoints/epoch_{}.ckpt".format(epoch))

        summary_writer.close()
if __name__ == "__main__":
    main(500)