import tensorflow as tf
import numpy as np

class Network():
    def __init__(self,config):
        self.config = config
        self.config["input_h"] = config.get("input_w", 224)
        self.config["input_w"] = config.get("input_w", 224)
        self.net = {}
        self.saver = None
        self.sess = None

        self.load_config()

    def build(self):
        pass

    def load_config(self):
        if "data" in self.config:
            self.data = self.config["data"]
        if "sess" in self.config:
            self.sess = self.config["sess"]
        if "saver" in self.config:
            self.saver = self.config["saver"]
            self.saver_variables = ["input","output"]
        if "trained_model" in self.config:
            self.trained_model = np.load(self.config["trained_model"],encoding="latin1").item()
        pass

    def save_model(self,save_path):
        assert self.sess is not None
        if self.saver is None:
            for one in self.saver_variables:
                tf.add_to_collection(one, self.net[one])
            self.saver = tf.train.Saver()
        self.saver.save(self.sess,save_path)

    def restore_from_model(self,model_path):
        assert self.sess is not None
        assert self.saver is not None
        ckpt = tf.train.get_checkpoint_state(model_path)
        self.saver.restore(self.sess, ckpt.model_checkpoint_path)

    def tensorboard(self):
        sess = tf.Session()
        tf.summary.scalar("output",tf.reduce_sum(self.net["output"]))
        merged = tf.summary.merge_all()
        writer = tf.summary.FileWriter("log/",sess.graph)
        sess.run(tf.global_variables_initializer())
        a = sess.run(merged,feed_dict={self.net["input"]:np.ones([1,self.config["input_h"],self.config["input_w"],3]),self.net["is_training"]:True})
        writer.add_summary(a)


    #def train(self,iterations=64000):
    #    labels = tf.placeholder(tf.int32,[None])
    #    self.loss["norm"] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels,logits=self.net["fc1000"])
    #    self.loss["total"] = self.loss["norm"] + 0.9*self.loss["l2"]
    #    lr = tf.Variable(0.1,trainable=False)
    #    opt = tf.train.MomentumOptimizer(lr,0.9)
    #    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    #    with tf.control_dependencies(update_ops):
    #        train_op = opt.minimize(self.loss["total"])

    #    batch_size = 128
    #    self.sess.run(tf.global_variables_initializer())
    #    fo i in range(iterations):
    #        if i == 32000 or i == 48000:
    #            self.sess.run(tf.assign(lr,lr/10))
    #        data_batch = self.data.next_batch(batch_size)
    #        print("data_batch[1] shape: %s" % str(data_batch[1].shape))
    #        params = {self.net["input"]:data_batch[0],labels:data_batch[1],self.net["is_training"]:True}
    #        pred,loss,_ = self.sess.run([self.net["output"],self.loss["total"],train_op],feed_dict = params)
    #        if i%100 == 99:
    #            pred = np.reshape(np.argmax(pred),[-1])
    #            gt = np.reshape(data_batch[1],[-1])
    #            accu = accuracy(gt,pred)
    #            print("iteration:%d loss:%f accu:%s" % (i,loss,accu))
    #        if i%1000 == 999:
    #            data_batch = self.data.next_batch(batch_size,"test")
    #            params = {self.net["input"]:data_batch[0],labels:data_batch[1],self.net["is_training"]:False}
    #            pred,loss,_ = self.sess.run([self.net["output"],self.loss["total"],train_op],feed_dict = params)
    #            pred = np.reshape(np.argmax(pred),[-1])
    #            gt = np.reshape(data_batch[1],[-1])
    #            accu = accuracy(gt,pred)
        
if __name__ == "__main__":
    pass
