# coding=utf-8
import os
import time
import tensorflow as tf
from tqdm import tqdm
from dataflow import dlogger


def solve_cudnn_error():
    gpus = tf.config.experimental.list_physical_devices('GPU')
    if gpus:
        try:
            # Currently, memory growth needs to be the same across GPUs
            for gpu in gpus:
                tf.config.experimental.set_memory_growth(gpu, True)
            logical_gpus = tf.config.experimental.list_logical_devices('GPU')
            print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
        except RuntimeError as e:
            # Memory growth must be set before GPUs have been initialized
            print(e)

class Meter:
    def __init__(self, name='default'):
        self.name = name
        self.data = 0
        self.dcnt = 0
    
    def __calc__(self, data):
        self.data = self.data + data
        self.dcnt += 1
    
    def update(self, data):
        self.data = self.data + data
        self.dcnt += 1
    
    def average(self):
        return self.data / self.dcnt
    
    def count(self):
        return self.dcnt

    def sum(self):
        return self.data

    def reset_state(self):
        self.data = 0
        self.dcnt = 0

class Info:
    def __init__(self, keys=['loss', 'accuracy'], main=['train', 'test']):
        self.info = {}
        for mkey in main:
            self.info[mkey] = {}
            for key in keys:
                self.info[mkey][key] = Meter(key)
    
    def update(self, main, key, data):
        if main in self.info.keys and key in self.info[main].keys:
            self.info[main][key].update(data)
    
    def to_string(self, main):
        string = ""
        for key in self.info[main].keys:
            string += "%s:%.6f " % (key, self.info[main][key].average())
        return string
    
    def __getitem__(self, main, key):
        return self.info[main][key]
    
    def all_key(self):
        keys = []
        mains = []
        for mkey in self.info.keys:
            mains.append(mkey)
            for key in self.info[mkey].keys:
                keys.append(mkey + '_' + key)
        return main, keys


class Trainer:
    """
    single gpu trainer
    """
    def __init__(self, model_dir, max_to_keep=5):
        self.model, self.optimizer = self.init()
        self.log = dlogger(log_path=model_dir, app_name='trainer')

        # you can add yourself info-keyword
        self.info = {"train": {"loss": Meter('loss'), "accuracy": Meter("accuracy")},
                     "test": {"loss": Meter('loss'), "accuracy": Meter("accuracy")}}

        if not tf.io.gfile.exists(model_dir):
            tf.io.gfile.makedirs(model_dir)

        train_dir = os.path.join(model_dir, 'summaries', 'train')
        test_dir = os.path.join(model_dir, 'summaries', 'test')

        self.train_summary_writer = tf.summary.create_file_writer(train_dir, flush_millis=10000, name="train")
        self.test_summary_writer = tf.summary.create_file_writer(test_dir, flush_millis=10000, name="test")

        checkpoint_dir = os.path.join(model_dir, 'checkpoints')
        self.checkpoint = tf.train.Checkpoint(model=self.model, optimizer=self.optimizer)
        self.manager = tf.train.CheckpointManager(self.checkpoint,
                                                  directory=checkpoint_dir,
                                                  checkpoint_name="model.ckpt",
                                                  max_to_keep=max_to_keep)
        self.checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))

        self.export_path = os.path.join(model_dir, 'export')
        self.log.info("=====================> init trainer ====================>")

    def init(self):
        # return model and optimizer
        raise NotImplementedError

    def compute_loss(self, ytrue, ypred, images, labels, train_parse='train'):
        # assert(training in self.info.keys)
        # 1. make sure your train_parse in your self.info.keys
        # 2. define and compute your loss
        # 3. update your loss # self.info[train_parse]['loss'].update(loss_value)
        raise NotImplementedError

    def compute_accuracy(self, ytrue, ypred, images, labels):
        # 1. define and compute your accuracy
        # 2. update your accuracy # self.info['test']['accuracy'].update(acc_value)
        raise NotImplementedError

    def preproc(self, images, labels, training=None):
        return images, labels

    @tf.function
    def __train_step(self, images, labels):
        with tf.GradientTape() as tape:
            ypred = self.model(images, training=True)
            loss_value, loss_dict = self.compute_loss(labels, ypred, images, labels)
        grads = tape.gradient(loss_value, self.model.trainable_variables)
        self.optimizer.apply_gradients(zip(grads, self.model.trainable_variables))

        accuracy, acc_dict = self.compute_accuracy(labels, ypred, images, labels)
        return loss_value, accuracy, loss_dict, acc_dict

    @tf.function
    def __test_step(self, images, labels):
        ypred = self.model(images, training=False)
        loss_value, loss_dict = self.compute_loss(labels, ypred)
        acc_value, acc_dict = self.compute_accuracy(labels, ypred)
        return loss_value, acc_value, loss_dict, acc_dict 

    def fit(self, train_dataset, test_dataset, train_epoch, log_freq=20):
        total_loss, total_acc = 0, 0

        for i in range(train_epoch):
            bs = 0
            for data in train_dataset:
                images, labels = data
                images, labels = self.preproc(images, labels, True)
                self.__train_step(images, labels)

                if bs % log_freq == 0:
                    log_str = ""
                    for key in self.info['train'].keys:
                        log_str += "%s: %0.6f " % (key, self.info['train'][key].average())
                    self.log.info("iter: %d %s" % (self.optimizer.iterations, log_str))

            iloss = total_loss / bs
            iacc = total_acc / bs
            self.log.info("train epoch: %d loss: %0.6f acc: %0.6f" % (i+1, iloss, iacc))
            print("train epoch: %d loss: %0.6f acc: %0.6f" % (i+1, iloss, iacc))
            with self.train_summary_writer.as_default():
                tf.summary.write("loss", iloss, step=i)
                tf.summary.write('accuracy', iacc, step=i)

            # save model checkpoint
            self.manager.save(checkpoint_number=i)

            # test
            total_acc, total_loss, bs = 0, 0, 0
            for data in test_dataset:
                images, labels = data
                images, labels = self.preproc(images, labels, False)
                loss, accuracy = self.__test_step(images, labels)
                total_loss += loss
                total_acc += accuracy
                bs = bs + 1
            # test summary
            iloss = total_loss / bs
            iacc = total_acc / bs
            self.log.info("test epoch: %d loss: %0.6f acc: %0.6f" % (i+1, iloss, iacc))
            print("test epoch: %d loss: %0.6f acc: %0.6f" % (i+1, iloss, iacc))
            with self.test_summary_writer.as_default():
                tf.summary.write("loss", iloss, step=i)
                tf.summary.write('accuracy', iacc, step=i)
        tf.saved_model.save(self.model, self.export_path)


class TFPBModel:
    def __init__(self, pb_dir):
        self.imported = tf.saved_model.load(pb_dir)
        self.model = self.imported.signatures["serving_default"]

    def __call__(self, *args, **kargs):
        return self.model(*args, **kargs)


# coding: utf-8
import tensorflow as tf
from tfmodel import SceneNet, batch_acrop, batch_adrop
from dataflow import RNGDataFlow, ClassifyDataset, BatchData, MultiProcessRunnerZMQ
import numpy as np
import cv2
import os

os.environ['CUDA_VISIBLE_DEVICES'] = '6'
print(tf.__version__)
tf.compat.v1.disable_eager_execution()


class Meter:
    def __init__(self, name='default'):
        self.name = name
        self.data = 0
        self.dcnt = 0

    def __calc__(self, data):
        self.data = self.data + data
        self.dcnt += 1

    def update(self, data):
        self.data = self.data + data
        self.dcnt += 1

    def average(self):
        return self.data / self.dcnt

    def count(self):
        return self.dcnt

    def sum(self):
        return self.data

    def reset_state(self):
        self.data = 0
        self.dcnt = 0


class Info:
    def __init__(self, keys=['loss', 'accuracy']):
        self.info = {}
        for key in keys:
            self.info[key] = Meter(key)

    def add_key(self, key):
        if key not in self.info.keys():
            self.info[key] = Meter(name=key)

    def update(self, key, data):
        if key in self.info.keys():
            self.info[key].update(data)

    def to_string(self):
        string = ""
        for key in self.info.keys():
            try:
                ival = self.info[key].average()
                string += "%s:%.6f " % (key, ival)
            except Exception as e:
                pass
        return string

    def reset_state(self):
        for key in self.info.keys():
            self.info[key].reset_state()

    def __getitem__(self, key):
        return self.info[key].average()


class SceneTrainerV1:
    def __init__(self, model_dir):
        gpu_options=tf.GPUOptions(allow_growth = True)
        config=tf.ConfigProto(gpu_options=gpu_options)
        self.graph = tf.Graph()
        self.sess = tf.compat.v1.Session(graph=self.graph, config=config)

        self.info = Info(keys=["loss", "origin_loss", "crop_loss", "accuracy", "refine_accuracy", 'refine_loss'])
        self.batch_size = 1#32
        with self.graph.as_default():
            self.input = tf.compat.v1.placeholder(shape=(self.batch_size, 224, 224, 3), dtype=tf.float32, name='input')
            self.label = tf.compat.v1.placeholder(shape=(self.batch_size, 6), dtype=tf.float32, name='label')
            self.learn = tf.compat.v1.placeholder(dtype=tf.bool, name='learning')
            self.input_logit = tf.compat.v1.placeholder(shape=(self.batch_size, 6), dtype=tf.float32, name='input_logit')
            self.model = SceneNet(6)
            self.out = self.model(self.input, self.learn)
            self.loss = self.compute_loss(self.label, self.out[0]) + self.compute_loss(self.label, self.out[1])
            self.output = tf.math.sigmoid((self.out[0] + self.out[1]) / 2)

            attention = tf.image.resize_images(self.out[2], (224, 224))
            self.crop_imgs = batch_acrop(attention, self.input, (224, 224))
            #self.drop_imgs = batch_adrop(attention, self.input, (224, 224))
            self.accuracy = self.compute_accuracy(self.label, self.input_logit)

            var_list = [var for var in tf.global_variables() if "moving" in var.name]
            var_list += tf.trainable_variables()

            update_ops = self.model.updates
            with tf.control_dependencies(update_ops):
                self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=0.01).minimize(self.loss)
            #from pudb import set_trace
            #set_trace()
            #self.optimizer = tf.keras.optimizers.Adam(learning_rate=0.01).minimize(self.loss, var_list=var_names)
            self.sess.run(tf.compat.v1.global_variables_initializer())
            if not os.path.exists(model_dir):
                os.makedirs(model_dir)
            self.model_dir = model_dir

            self.saver = tf.compat.v1.train.Saver(var_list=tf.global_variables(), max_to_keep=20)
            flag = tf.train.latest_checkpoint(model_dir)
            if flag:
                self.saver.restore(self.sess, flag)
                print("restore from ", flag)

    def compute_loss(self, ytrue, ypred):
        loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=ytrue, logits=ypred)
        loss = tf.math.reduce_mean(loss)
        return loss

    def compute_accuracy(self, ytrue, logits):
        acc = tf.keras.metrics.top_k_categorical_accuracy(y_true=ytrue, y_pred=logits)
        acc = tf.math.reduce_mean(acc)
        return acc

    def __train_step(self, images, labels):
        crop_imgs, out, loss, _ = self.sess.run([self.crop_imgs,
                                                self.output,
                                                self.loss,
                                                self.optimizer],
                                                feed_dict={self.input: images,
                                                        self.label: labels,
                                                        self.learn: True})
        # crop attention
        crop_out, crop_loss, _ = self.sess.run([self.output, self.loss, self.optimizer],
                                                feed_dict={self.input: crop_imgs,self.label: labels, self.learn: True})

        sum_out = (out + crop_out) / 2
        accuracy = self.sess.run(self.accuracy, feed_dict={self.input_logit: sum_out, self.label: labels})

        return accuracy, loss + crop_loss, loss, crop_loss

    def __test_step(self, images, labels):
        out, loss,  crop_imgs = self.sess.run([self.output, self.loss, self.crop_imgs],
                                            feed_dict={self.input: images, self.label: labels, self.learn: False})

        """
        crop_imgs_t, out_t, loss_t = self.sess.run([self.crop_imgs,
                                                    self.output,
                                                    self.loss],
                                                   feed_dict={self.input: images,
                                                              self.label: labels,
                                                              self.learn: True})

        """

        accuracy = self.sess.run(self.accuracy, feed_dict={self.input_logit: out, self.label: labels})

        crop_out, crop_loss = self.sess.run([self.output, self.loss], feed_dict={self.input: crop_imgs, self.label: labels, self.learn: False})

        input_logit = (out + crop_out) / 2
        refine_accuracy = self.sess.run(self.accuracy, feed_dict={self.input_logit: input_logit, self.label: labels})

        return accuracy, loss, refine_accuracy, loss + crop_loss

    def fit(self, trainset, testset, train_epoch, log_freq=20):
        for i in range(train_epoch):
            """
            self.info.reset_state()
            for images, labels in trainset:
                acc, loss, origin_loss, crop_loss = self.__train_step(images, labels)
                self.info.update('loss', loss)
                self.info.update('origin_loss', origin_loss)
                self.info.update('crop_loss', crop_loss)
                self.info.update('accuracy', acc)
            print(self.info.to_string())
            print("train epoch done: %d" % (i))
            #from pudb import set_trace
            #set_trace()
            self.saver.save(self.sess, os.path.join(self.model_dir, "./model-%03d" % i))

            """

            #"""
            self.info.reset_state()
            for images, labels in testset:
                acc, loss, refine_accuracy, refine_loss = self.__test_step(images, labels)
                self.info.update('loss', loss)
                self.info.update('accuracy', acc)
                self.info.update('refine_loss', refine_loss )
                self.info.update('refine_accuracy', refine_accuracy)
            print(self.info.to_string())

            #"""

    def predict(self, img_dir_with_lab):
        text = img_dir_with_lab.split()
        img_dir = text[0]
        img_lab = [int(lab) for lab in text[1:]]
        label = np.zeros((1,6))
        label[0][img_lab] = 1
        image = cv2.imread(img_dir, cv2.IMREAD_COLOR)
        image = cv2.resize(image, (224, 224))
        image = (image - 127.5) / 127.5
        image = np.expand_dims(image, axis=0)

        out = self.sess.run(self.output, feed_dict={self.input: image, self.label: label, self.learn: False})


        print(out[0], label[0])


def solve_cudnn_error():
    gpus = tf.config.experimental.list_physical_devices('GPU')
    if gpus:
        try:
            # Currently, memory growth needs to be the same across GPUs
            for gpu in gpus:
                tf.config.experimental.set_memory_growth(gpu, True)
            logical_gpus = tf.config.experimental.list_logical_devices('GPU')
            print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
        except RuntimeError as e:
            # Memory growth must be set before GPUs have been initialized
            print(e)


class SceneDataset(RNGDataFlow):
    def __init__(self, data_list, shuffle=False):
        ds = ClassifyDataset(6)

        self.dataset = ds.load_list(data_list)
        self.shuffle = shuffle

    def __len__(self):
        return len(self.dataset[0])

    def __iter__(self):
        idx = np.arange(self.__len__())
        if self.shuffle:
            self.rng.shuffle(idx)

        for i in idx:
            try:
                img_dir = self.dataset[0][i]
                image = cv2.imread(img_dir, cv2.IMREAD_COLOR)
                image = cv2.resize(image, (224, 224))
                image = (image - 127.5) / 127.5
                yield image, self.dataset[1][i].astype(np.float32)
            except Exception as e:
                print(str(e), img_dir)


"""
solve_cudnn_error()
trainset = SceneDataset('/data01/dingzhifeng/dbase/clothes/clothes.train', True)
trainset = BatchData(trainset, batch_size=32)
#trainset = MultiProcessRunnerZMQ(trainset, 8)
trainset.reset_state()
testset = SceneDataset('/data01/dingzhifeng/dbase/clothes/clothes.test', False)
testset = BatchData(testset, batch_size=32)
#testset = MultiProcessRunnerZMQ(testset, 8)
testset.reset_state()

#trainset = None
st = SceneTrainerV1('./clothes')
st.fit(trainset, testset, 10, 10)
"""


solve_cudnn_error()
st = SceneTrainerV1('./clothes')
with open('/data01/dingzhifeng/dbase/clothes/clothes.test', 'r') as f:
    for line in f:
        st.predict(line.strip())
#"""
