import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import models
import tensorflow.keras.backend as K
import numpy as np
import os
from tqdm import tqdm


class Estimate(keras.Model):
    def __init__(self, train_summary_writer=None, test_summary_writer=None, **kwargs):
        super(Estimate, self).__init__(**kwargs)
        self.train_summary_writer = train_summary_writer
        self.test_summary_writer = test_summary_writer

    def train_step(self, data):
        # Unpack the data. Its structure depends on your model and
        # on what you pass to `fit()`.
        x, y = data
        if self.train_summary_writer is not None:
            with self.train_summary_writer.as_default():
                tf.summary.image("train_img", x[:, :, :, ::-1], step=self._train_counter, max_outputs=1)

        with tf.GradientTape() as tape:
            y_pred = self(x, training=True)  # Forward pass
            loss = self.compiled_loss(y, y_pred, regularization_losses=self.losses)

        # Compute gradients
        trainable_vars = self.trainable_variables
        gradients = tape.gradient(loss, trainable_vars)
        # Update weights
        self.optimizer.apply_gradients(zip(gradients, trainable_vars))
        # Update metrics (includes the metric that tracks the loss)
        self.compiled_metrics.update_state(y, y_pred)
        return {m.name: m.result() for m in self.metrics}


class Estimate_tf14():
    def __init__(self, model: models.Model, label: layers.InputLayer):
        """

        :param model:
        :param input: layer input
        :param label: layer input
        """
        self.model = model
        self.label = label

    def compile(self, optimizer: keras.optimizers.Optimizer, **kwargs):
        self.optimizer = optimizer
        self.train_iter, self.test_iter = self.get_iter()

    def get_iter(self, **kwargs):
        logic = self.model.output
        loss = tf.losses.softmax_cross_entropy(self.label, logic)
        updata_op = self.optimizer.get_updates(params=self.model.trainable_variables, loss=loss)

        train_iter = K.function(inputs=[self.model.input, self.label], outputs=[loss], updates=updata_op)
        test_iter = K.function(inputs=[self.model.input, self.label], outputs=[loss])
        return train_iter, test_iter

    def train_step(self, x, y, **kwargs) -> dict:
        loss_value = self.train_iter([x, y])
        train_step_log = {"loss": loss_value}
        return train_step_log

    def test_step(self, x, y, **kwargs) -> dict:
        loss_value = self.test_iter([x, y])
        test_step_log = {"val_loss": loss_value}
        return test_step_log

    def fit(self, train_data: iter, epochs, steps_per_epoch, validation_data: iter, validation_steps, callbacks,
            **kwargs):
        for epoch in range(epochs):
            for train_step in range(steps_per_epoch):
                x_train, y_train = next(train_data)
                train_batch_log = self.train_step(x_train, y_train)

            for val_step in range(validation_steps):
                x_val, y_val = next(validation_data)
                val_batch_log = self.test_step(x_val, y_val)
            self.model.save("./out/test_model.h5")


class Estimate_BBN():
    def __init__(self, model: models.Model, input_map=None, train_output_map=None, val_output_map=None,
                 model_type="diff", save_path="./out", model_name="temp_model"):
        self.model = model
        self.model_type = model_type
        self.input_map = input_map
        self.train_output_map = train_output_map
        self.val_output_map = val_output_map
        self.train_iter = None
        self.val_iter = None
        self.seve_path = save_path
        self.model_name = model_name

    def compile(self, optimizer: keras.optimizers.Optimizer,alpha):
        self.optimizer = optimizer
        self.alpha = alpha

    def get_train_iter(self, fc_inputs, fc_outs, updata_op):
        if self.model_type == "diff":
            fc_inputs = fc_inputs + [K.learning_phase()]
        train_iter = K.function(inputs=fc_inputs, outputs=fc_outs, updates=updata_op, name="train_iter")
        return train_iter

    def get_val_iter(self, fc_inputs, fc_outs):
        if self.model_type == "diff":
            fc_inputs = fc_inputs + [K.learning_phase()]
        val_iter = K.function(inputs=fc_inputs, outputs=fc_outs, name="val_iter")
        return val_iter

    def train_step(self, data):
        # if self.transform_train_step is not None:
        #     data = self.transform_train_step(data)
        res = self.train_iter(data)
        log = self._get_log(res, True)
        return log

    def val_step(self, data):
        # if self.transfor_val_step is not None:
        #     data = self.transform_train_step(data)
        res = self.val_iter(data)
        log = self._get_log(res, False)
        return log

    def _get_log(self, res: list, is_train: bool):
        log = {}
        if is_train:
            out_map = self.train_output_map
        else:
            out_map = self.val_output_map
        if res is None:
            output_names = ["out_{}".format(i) for i in range(len(res))]
        else:
            output_names = [k for k, _ in out_map.items()]
        for k, v in zip(output_names, res):
            log[k] = v
        return log

    def _get_alpha(self, epoch, epochs):
        alpha = 1 - ((1 + epoch) / epochs)**2
        return alpha

    def summary(self):
        return self.model.summary()

    def fit(self, train_data: iter, epochs, steps_per_epoch, validation_data: iter, validation_steps, callbacks,
            **kwargs):
        # ----------------------------#
        logic = self.model.output
        label_cb = self.input_map["label_cb"]
        label_rb = self.input_map["label_rb"]
        weight_cb = self.input_map["weight_cb"]
        weight_rb = self.input_map["weight_rb"]

        loss_cb = tf.losses.softmax_cross_entropy(label_cb, logic, weight_cb, label_smoothing=0.1)
        loss_rb = tf.losses.softmax_cross_entropy(label_rb, logic, weight_rb, label_smoothing=0.1)
        self.train_output_map.update({"loss_cb": loss_cb, "loss_rb": loss_rb})
        # ----------------------------#
        fc_input = [v for _, v in self.input_map.items()]
        val_output = [v for _, v in self.val_output_map.items()]
        self.val_iter = self.get_val_iter(fc_input, val_output)


        for epoch in range(epochs):
            alpha = self._get_alpha(epoch, epochs)
            K.set_value(self.alpha,alpha)
            loss = self.alpha * loss_cb + (1 - self.alpha) * loss_rb
            updata_op = self.optimizer.get_updates(loss, self.model.trainable_variables)
            self.train_output_map.update({"loss":loss})
            train_output = [v for _, v in self.train_output_map.items()]
            self.train_iter = self.get_train_iter(fc_input, train_output, updata_op)

            ptba = tqdm(range(steps_per_epoch))
            for train_step in ptba:
                x_cb, y_cb, w_cb = next(train_data[0])
                x_rb, y_rb, w_rb = next(train_data[1])
                train_batch_log = self.train_step([x_cb, x_rb, y_cb, y_rb, w_cb, w_rb])  # 按照input_map的顺序输入
                # print(train_batch_log)
                ptba.set_description(str(train_batch_log))

            val_res = {}
            for val_step in range(validation_steps):
                x_val, y_val, w_val = next(validation_data)
                val_batch_log = self.val_step([x_val, x_val, y_val, y_val, w_val, w_val])  # 按照input_map的顺序输入
                # print(val_batch_log)
                for k, v in val_batch_log.items():
                    if k not in val_res.keys():
                        val_res[k] = [v]
                    else:
                        val_res[k].append(v)

            s = ''
            for k, v in val_res.items():
                value = np.mean(v)
                s += "-{}_{}".format(k, value)
            model_path = os.path.join(self.seve_path, self.model_name + s + ".h5")
            print(model_path)
            self.model.save(model_path)
