import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import models
import tensorflow.keras.backend as K

class Estimate(keras.Model):
    def __init__(self, train_summary_writer=None, test_summary_writer=None, **kwargs):
        super(Estimate, self).__init__(**kwargs)
        self.train_summary_writer = train_summary_writer
        self.test_summary_writer = test_summary_writer

    def train_step(self, data):
        # Unpack the data. Its structure depends on your model and
        # on what you pass to `fit()`.
        x, y = data
        if self.train_summary_writer is not None:
            with self.train_summary_writer.as_default():
                tf.summary.image("train_img", x[:, :, :, ::-1], step=self._train_counter,max_outputs=1)

        with tf.GradientTape() as tape:
            y_pred = self(x, training=True)  # Forward pass
            loss = self.compiled_loss(y, y_pred, regularization_losses=self.losses)

        # Compute gradients
        trainable_vars = self.trainable_variables
        gradients = tape.gradient(loss, trainable_vars)
        # Update weights
        self.optimizer.apply_gradients(zip(gradients, trainable_vars))
        # Update metrics (includes the metric that tracks the loss)
        self.compiled_metrics.update_state(y, y_pred)
        return {m.name: m.result() for m in self.metrics}



class Estimate_Base_tf14():

    def train_step(self,x,y,**kwargs):
        raise NotImplementedError("you must inplementd train_step")

    def test_step(self,x,y,**kwargs):
        raise NotImplementedError("you must inplementd test_step")

    def fit(self,X, epochs, steps_per_epoch,validation_data, validation_steps, callbacks,**kwargs):
        raise NotImplementedError("you must inplementd fit")


class Estimate_tf14(Estimate_Base_tf14):
    def __init__(self,model:models.Model,label:layers.InputLayer):
        """

        :param model:
        :param input: layer input
        :param label: layer input
        """
        self.model = model
        self.label = label

    def compile(self, optimizer: keras.optimizers.Optimizer, **kwargs):
        self.optimizer = optimizer
        self.train_iter, self.test_iter = self.get_iter()

    def get_iter(self, **kwargs):
        logic = self.model.output
        loss = tf.losses.softmax_cross_entropy(self.label,logic)
        updata_op = self.optimizer.get_updates(params=self.model.trainable_variables,loss=loss)

        train_iter = K.function(inputs=[self.model.input,self.label],outputs=[loss],updates=updata_op)
        test_iter = K.function(inputs=[self.model.input,self.label],outputs=[loss])
        return train_iter,test_iter

    def train_step(self,x,y,**kwargs)->dict:
        loss_value = self.train_iter([x,y])
        train_step_log = {"loss":loss_value}
        return train_step_log

    def test_step(self,x,y,**kwargs)->dict:
        loss_value = self.test_iter([x,y])
        test_step_log = {"val_loss":loss_value}
        return test_step_log

    def fit(self,train_data:iter, epochs, steps_per_epoch,validation_data:iter, validation_steps, callbacks,**kwargs):
        for epoch in range(epochs):
            for train_step in range(steps_per_epoch):
                x_train,y_train = next(train_data)
                train_batch_log = self.train_step(x_train,y_train)

            for val_step in range(validation_steps):
                x_val,y_val = next(validation_data)
                val_batch_log = self.test_step(x_val,y_val)



