import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers

def get_uncompiled_model():
  inputs = keras.Input(shape=(784,), name='digits')
  x = layers.Dense(64, activation='relu', name='dense_1')(inputs)
  x = layers.Dense(64, activation='relu', name='dense_2')(x)
  outputs = layers.Dense(10, activation='softmax', name='predictions')(x)
  model = keras.Model(inputs=inputs, outputs=outputs)
  return model


def get_compiled_model():
  model = get_uncompiled_model()
  model.compile(optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
              loss='sparse_categorical_crossentropy',
              metrics=['sparse_categorical_accuracy'])
  return model



(x_train,y_train),(x_test,y_test) = keras.datasets.mnist.load_data()

# Preprocess the data (these are Numpy arrays)
x_train = x_train.reshape(60000, 784).astype('float32') / 255
x_test = x_test.reshape(10000, 784).astype('float32') / 255

# Reserve 10,000 samples for validation
x_val = x_train[-10000:]
y_val = y_train[-10000:]
x_train = x_train[:-10000]
y_train = y_train[:-10000]

model = get_compiled_model()
# Specify the training configuration (optimizer, loss, metrics)
model.compile(optimizer=keras.optimizers.RMSprop(),
              loss=keras.losses.SparseCategoricalCrossentropy(),
              metrics=[keras.metrics.SparseCategoricalAccuracy()])

# Train the model by slicing the data into "batches"
# of size "batch_size", and repeatedly iterating over
# the entire dataset for a given number of "epochs"
print("# Fit model on training data")
history = model.fit(x_train,y_train,
                    batch_size=64,
                    epochs=3,
                    validation_data=(x_val,y_val))

# The returned "history" object holds a record
# of the loss values and metric values during training
print('\nhistory dict:', history.history)

# Evaluate the model on the test data using `evaluate`
print('\n# Evaluate on test data')
results = model.evaluate(x_test,y_test,batch_size=128)
print("test loss,test acc:",results)

# Generate predictions (probabilities -- the output of the last layer)
# on new data using `predict`
print('\n# Generate predictions for 3 samples')
predictions = model.predict(x_test[:3])
print('predictions shape:', predictions.shape)


class CatgoricalTruePositives(keras.metrics.Metric):
    def __init__(self,name='categorical_true_positives',**kwars):
        super(CatgoricalTruePositives,self).__init__(name=name,**kwars)
        self.true_positives = self.add_weight(name='tp',initializer='zeros')

    def update_state(self, y_true,y_pred,sample_weight=None):
        y_pred = tf.argmax(y_pred)
        values = tf.equal(tf.cast(y_true,'int32'),tf.cast(y_pred,'int32'))
        values = tf.cast(values,'float32')
        if sample_weight is not None:
            sample_weight = tf.cast(sample_weight,'float32')
            values = tf.multiply(values,sample_weight)
        self.true_positives.assign_add(tf.reduce_sum(values))

    def result(self):
        return self.true_positives

    def reset_states(self):
        self.true_positives.assign(0.)

model.compile(optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
              loss = keras.losses.SparseCategoricalCrossentropy(),
              metrics=[CatgoricalTruePositives()])
model.fit(x_train,y_train,
          batch_size=64,
          epochs=3)

class ActivityRegularizationLayer(layers.Layer):
    def call(self,inputs):
        self.add_loss(tf.reduce_sum(inputs) * 0.1)
        return inputs



model = get_compiled_model()
train_dataset = tf.data.Dataset.from_tensor_slices((x_train,y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)

test_dataset = tf.data.Dataset.from_tensor_slices(((x_test,y_test)))
test_dataset = test_dataset.batch(64)

print("\n#Evaluate")
model.evaluate(test_dataset)


from tensorflow import keras
from tensorflow.keras import layers

image_input = keras.Input(shape=(32,32,3),name='img_input')
timeseries_input = keras.Input(shape=(None,10),name='ts_input')

x1 = layers.Conv2D(3,3)(image_input)
x1 = layers.GlobalMaxPool2D()(x1)

x2 = layers.Conv1D(3,3)(timeseries_input)
x2 = layers.GlobalMaxPool1D()(x2)

x = layers.concatenate([x1,x2])

score_output = layers.Dense(1,name='score_output')(x)
class_output = layers.Dense(5,activation='softmax',name='class_output')(x)

model = keras.Model(inputs=[image_input,timeseries_input],
                    outputs=[score_output,class_output])

keras.utils.plot_model(model, 'multi_input_and_output_model.png', show_shapes=True)



inputs = keras.Input(shape=(784,),name="digits")
x = layers.Dense(64,activation='relu',name='dense_1')(inputs)
x = layers.Dense(64,activation='relu',name='dense_2')(x)
outputs = layers.Dense(10,activation='softmax',name='predictions')(x)
model = keras.Model(inputs=inputs,outputs=outputs)

optimizer = keras.optimizers.SGD(learning_rate=1e-3)

loss_fn = keras.losses.SparseCategoricalCrossentropy()

train_acc_metric = keras.metrics.SparseCategoricalAccuracy()
val_acc_metric = keras.metrics.SparseCategoricalAccuracy()

batch_size = 64
train_dataset = tf.data.Dataset.from_tensor_slices((x_train,y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size)

for epoch in range(3):
    print("Start of epoch %d" %(epoch,))
    for step,(x_batch_train,y_batch_train) in enumerate(train_dataset):
        with tf.GradientTape() as tape:
            logits = model(x_batch_train)
            loss_value = loss_fn(y_batch_train,logits)

        grads = tape.gradient(loss_value,model.trainable_weights)
        optimizer.apply_gradients(zip(grads,model.trainable_weights))


        train_acc_metric(y_batch_train,logits)
        if step % 200 == 0 :
            print('Training loss (for one batch) at step %s: %s' % (step, float(loss_value)))
            print('Seen so far: %s samples' % ((step + 1) * 64))

        train_acc =train_acc_metric.result()
        print('Training acc over epoch: %s' % (float(train_acc),))
        # Reset training metrics at the end of each epoch
        train_acc_metric.reset_states()

        for x_batch_val, y_batch_val in val_dataset:
            val_logits = model(x_batch_val)
            # Update val metrics
            val_acc_metric(y_batch_val, val_logits)
        val_acc = val_acc_metric.result()
        val_acc_metric.reset_states()
        print('Validation acc: %s' % (float(val_acc),))




