﻿using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using Tensorflow;
using static Tensorflow.Binding;

namespace TensorFlowTest.BasicModels;

public class LogisticRegression : SciSharpExample, IExample
{
    public int training_epochs = 10;
    public int? train_size = null;
    public int validation_size = 5000;
    public int? test_size = null;
    public int batch_size = 100;

    private float learning_rate = 0.01f;
    private int display_step = 1;
    float accuracy = 0f;

    Datasets<MnistDataSet> mnist;

    ExampleConfig IExample.InitConfig()
    {
        Config = new ExampleConfig()
        {
            Name = "Logistic Regression (Graph)",
            Enabled = true,
            IsImportingGraph = false,
        };
        return Config;
    }

    bool IExample.Run()
    {
        PrepareData();

        tf.compat.v1.disable_eager_execution();

        Train();

        return accuracy > 0.9;
    }

    public override void PrepareData()
    {
        var loader = new MnistModelLoader();
        mnist = loader.LoadAsync(".resources/mnist",
            oneHot: true,
            trainSize: train_size, validationSize: validation_size, testSize: test_size,
            showProgressInConsole: true).Result;
    }
    public override void Train()
    {

        //tf graph input
        var x = tf.placeholder(tf.float32, (-1, 784));

        var y = tf.placeholder(tf.float32, (-1, 10));

        // set model weights
        var W = tf.Variable(tf.zeros((784, 10)));
        var b = tf.Variable(tf.zeros(10));

        //construct model
        var pred = tf.nn.softmax(tf.matmul(x, W) + b);//

        //minimize error using cross entropy
        var cost = tf.reduce_mean(-tf.reduce_sum(y * tf.log(pred), reduction_indices: 1));

        //gradient descent
        var optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost);

        // Initialize the variables(i.e. assign their default value)
        var init = tf.global_variables_initializer();

        var total_batch = mnist.Train.NumOfExamples / batch_size;


        var sw = new Stopwatch();

        using var sess = tf.Session();
        //run 
        sess.run(init);

        foreach (var epoch in range(training_epochs))
        {
            sw.Start();
            var avg_cost = 0.0f;

            //lopp over all batches
            foreach (var i in range(total_batch))
            {
                var start = i * batch_size;
                var end = (i + 1) * batch_size;
                var (batch_xs, batch_ys) = mnist.GetNextBatch(mnist.Train.Data,
                    mnist.Train.Labels, start, end);

                (_,float c)=sess.run((optimizer,cost),(x,batch_xs),(y,batch_ys));

                // compute average lost
                avg_cost += c / total_batch;
            }

            sw.Stop();
            if ((epoch + 1) % display_step == 0)
            {
                print($"Epoch: {(epoch+1):D4} Cost:{avg_cost:G9} Elapsed:{sw.ElapsedMilliseconds}ms");
            }
            sw.Reset();
        }

        print("Optimization Finished!");
        // SaveModel(sess);

        //Test model

        var correct_prediction = tf.equal(tf.math.argmax(pred, 1), tf.math.argmax(y, 1));

        //calculate accuracy
        var acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32));
        accuracy = acc.eval(sess, (x, mnist.Test.Data), (y, mnist.Test.Labels));
        print($"Accuracy:{accuracy:F4}");

    }
}
