
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file

class LinearRegression:
    def __init__(self):
        self.x = 1
        self.plotdata = { "batchsize" : [], "loss" : [] }
        self.savedir = "D:/proj/TestData/ai"
        self.linearmodel_filename = "linearmodel.cpkt"
        self.graphfile = "mnist_with_summary"
    
    def moving_average(self, a, w = 10):
        if len(a) < w:
            return a[:]
        return [val if idx < w else sum(a[(idx - w):idx]) / w for idx, val in enumerate(a)]

    def print_model_linearmodel(self):
        self._print_model(self.linearmodel_filename)

    def _print_model(self, filename):
        print_tensors_in_checkpoint_file(self.savedir + "/" + filename, None, True, True)

    def model_from_save(self):
        X = tf.placeholder("float")
        Y = tf.placeholder("float")
        W = tf.Variable(tf.random_normal([1]), name = "weight")
        b = tf.Variable(tf.zeros([1]), name = "bias")
        z = tf.multiply(X, W) + b


        cost = tf.reduce_mean(tf.square(Y - z))


        learning_rate = 0.01
        optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
        save = tf.train.Saver()
        init = tf.global_variables_initializer()

        ## Check Pointer restore
        load_epoch = 19
        with tf.Session() as sess:
            sess.run(init)

            ## restore method 1
            kpt = tf.train.latest_checkpoint(self.savedir)
            if kpt != None:
                save.restore(sess, kpt)
            ## restore method 2
            # save.restore(sess, self.savedir + "/" + self.linearmodel_filename + "-" + str(load_epoch))
            print("x=0.2, z = ", sess.run(z, feed_dict = {X: 0.2}))

    def start(self):
        train_X = np.linspace(-1, 1, 100)
        train_Y = 2 * train_X + np.random.randn(*train_X.shape) * 0.3
        # plt.plot(train_X, train_Y, 'ro', label='Original data')
        # plt.legend()
        # plt.show()

        X = tf.placeholder("float")
        Y = tf.placeholder("float")
        W = tf.Variable(tf.random_normal([1]), name = "weight")
        b = tf.Variable(tf.zeros([1]), name = "bias")
        z = tf.multiply(X, W) + b

        tf.summary.histogram('z', z)

        cost = tf.reduce_mean(tf.square(Y - z))
        
        tf.summary.scalar('loss_function', cost)

        learning_rate = 0.01
        optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)

        init = tf.global_variables_initializer()
        training_epochs = 20
        display_step = 2

        # saver = tf.train.Saver()
        ## Check Pointer save
        saver = tf.train.Saver(max_to_keep=1)
        with tf.Session() as sess:
            sess.run(init)

            merged_summary_op = tf.summary.merge_all()
            summary_writer = tf.summary.FileWriter(self.savedir + "/" + self.graphfile, sess.graph)

            # plotdata = { "batchsize": [], "loss":[] }
            for epoch in range(training_epochs):
                for (x, y) in zip(train_X, train_Y):
                    sess.run(optimizer, feed_dict = {X: x, Y: y})
                    summary_str = sess.run(merged_summary_op, feed_dict={X: x, Y: y})
                    summary_writer.add_summary(summary_str, epoch)
               
                if epoch % display_step == 0:
                    loss = sess.run(cost, feed_dict = {X: train_X, Y:train_Y})
                    print("Epoch: ", epoch+1, "cost = ", loss, " W=", sess.run(W), " b = ", sess.run(b))
                    if not (loss == "NA"):
                        self.plotdata["batchsize"].append(epoch)
                        self.plotdata["loss"].append(loss)
            print("Finished")
            # saver.save(sess, self.savedir + "/linearmodel.cpkt")
            saver.save(sess, self.savedir + "/" + self.linearmodel_filename, global_step=epoch)
            print("cost = ", sess.run(cost, feed_dict = {X: train_X, Y: train_Y}), " W = ", sess.run(W), " b= ", sess.run(b))

            print("x=0.2, z = ", sess.run(z, feed_dict = {X: 0.2}))

            plt.plot(train_X, train_Y, 'ro', label='Original data')
            plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label = 'Fittedline')
            plt.legend()
            plt.show()

            self.plotdata["avgloss"] = self.moving_average(self.plotdata["loss"])
            plt.figure(1)
            plt.subplot(211)
            plt.plot(self.plotdata["batchsize"], self.plotdata["avgloss"], "b--")
            plt.xlabel('Minibatch number')
            plt.ylabel('Loss')
            plt.title('Minibatch run vs.Training loss')
            plt.show()



