﻿using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using Tensorflow;
using static Tensorflow.Binding;
using static Tensorflow.KerasApi;

namespace TensorFlowTest.BasicModels;

public class LogisticRegressionEager : SciSharpExample, IExample
{
    int training_epochs = 1000;
    int batch_size = 256;
    int num_classes = 10;
    int num_features = 784;
    float learning_rate = 0.01f;
    int display_step = 50;
    float accuracy = 0f;

    Datasets<MnistDataSet> mnist = null;

    ExampleConfig IExample.InitConfig()
    {
        Config = new ExampleConfig()
        {
            Name = "Logistic Regression (Eager)",
            Enabled = true,
            IsImportingGraph = false
        };
        return Config;
    }

    bool IExample.Run()
    {
        tf.enable_eager_execution();

        RunEagerMode();

        return accuracy > 0.8;
    }

    private void RunEagerMode()
    {
        // Prepare MNIST data
        var ((x_train, y_train), (x_test, y_test)) =
             keras.datasets.mnist.load_data();

        (x_train, x_test) = (x_train.reshape((-1, num_features)), x_test.reshape((-1, num_features)));

        (x_train, x_test) = (x_train / 255f, x_test / 255f);

        //use tf.data api to shuffle and batch data
        var train_data = tf.data.Dataset.from_tensor_slices(x_train, y_train);
        train_data = train_data.repeat().shuffle(5000).batch(batch_size).prefetch(1);

        //Weight of shape [784,10],the 28*28 image features,and total number of classes.

        var W = tf.Variable(tf.ones((num_features, num_classes)), name: "weight");

        //bias of shape [10],the total number of classes.
        var b = tf.Variable(tf.zeros(num_classes), name: "bias");

        Func<Tensor, Tensor> logistic_regression = x =>
        tf.nn.softmax(tf.matmul(x, W) + b);

        Func<Tensor, Tensor, Tensor> cross_entropy = (y_pred, y_true) =>
        {
            y_true = tf.cast(y_true, TF_DataType.TF_UINT8);
            y_true = tf.one_hot(y_true, depth: num_classes);
            y_pred = tf.clip_by_value(y_pred, 1e-9f, 1.0f);

            return tf.reduce_mean(-tf.reduce_sum(y_true * tf.math.log(y_pred), 1));

        };

        Func<Tensor, Tensor, Tensor> accuracy = (y_pred, y_true) =>
        {
            var correct_prediction = tf.equal(tf.math.argmax(y_pred, 1), tf.cast(y_true, tf.int64));
            return tf.reduce_mean(tf.cast(correct_prediction, tf.float32));
        };

        //stochastic gradient descent optimizer
        var optimizer = keras.optimizers.SGD(learning_rate);

        Action<Tensor, Tensor> run_optimization = (x, y) =>
            {
                //wrap computation inside a gradienttape for automatic diffenerentiation
                using var g = tf.GradientTape();
                var pred = logistic_regression(x);
                var loss = cross_entropy(pred, y);

                //compute gradients
                var gradients = g.gradient(loss, (W, b));

                //Update W and b following gradients
                optimizer.apply_gradients(zip(gradients, (W, b)));
            };

        train_data = train_data.take(training_epochs);
        foreach (var (step, (batch_x, batch_y)) in enumerate(train_data, 1))
        {
            run_optimization(batch_x, batch_y);

            if (step % display_step == 0)
            {
                var pred = logistic_regression(batch_x);
                var loss = cross_entropy(pred, batch_y);
                var acc = accuracy(pred, batch_y);
                print($"step:{step},loss:{(float)loss},accuracy:{(float)acc}");
                this.accuracy = acc.numpy();
            }
        }

        //Test model on validation set
        {
            var pred = logistic_regression(x_test);
            print($"Test Accuracy:{(float)accuracy(pred, y_test)}");
        }
    }
}