﻿using Cloo;
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
using System.Text;

namespace SimpleCLML.LogisticRegression
{
    /// <summary>
    /// Classic gradient descent algorithm
    /// Slowly converges towards local minimum    
    /// </summary>
    public class LogisticRegressionGradientDescent : OpenCLAlgorithmBase, ILogisticRegressionSolver
    {
        private ComputeProgram program;

        private ComputeKernel kernelGradientStep1;
        private ComputeKernel kernelGradientStep2;
        private ComputeKernel kernelCostFunction1;
        private ComputeKernel kernelCostFunction2;

        private ComputeBuffer<float> bufX;
        private ComputeBuffer<float> bufY;
        private ComputeBuffer<float> bufTheta;
        private ComputeBuffer<float> bufO;
        private ComputeBuffer<float> bufOJ;
        private ComputeBuffer<float> bufJ;

        private int exampleCount;
        private int trueExampleCount;
        private int featureCount;
        private int localWorkgroupSize = 256; // TODO: query this value from OpenCL

        readonly int maxIterations;
        readonly float errorThreshold;
        readonly float initialAlpha;

        public int ExampleAlignmentRequirement { get { return this.localWorkgroupSize; } }
        public int FeatureAlignmentRequirement { get { return 4; } }

        private float[] theta;

        protected override void OnDisposing()
        {
            var resources = new IDisposable[] { bufJ, bufO, bufOJ, bufTheta, bufX, bufY, kernelCostFunction1, kernelCostFunction2, kernelGradientStep1, kernelGradientStep2, program };
            foreach (var r in resources.Where(x => x != null))
            {
                r.Dispose();
            }
        }

        public LogisticRegressionGradientDescent(int maxIterations, float errorThreshold, float initialAlpha) 
        {
            this.maxIterations = maxIterations;
            this.errorThreshold = errorThreshold;
            this.initialAlpha = initialAlpha;
        }

        public void Init(float[] X, float[] Y, int trueExampleCount, float[] theta = null)
        {
            if (X == null || Y == null)
                throw new ArgumentNullException();

            if (X.Length == 0 || Y.Length == 0 || (X.Length % Y.Length) != 0)
                throw new ArgumentException("Invalid size of X or Y");

            this.exampleCount = Y.Length;
            this.trueExampleCount = trueExampleCount;
            this.featureCount = X.Length / Y.Length;            

            initializeBuffers(X, Y, theta);
            initializeKernels();
        }

        private void initializeBuffers(float[] X, float[] Y, float[] theta)
        {            
            if (theta == null)
            {
                theta = new float[featureCount]; // theta = zeros(featureCount, 1);
            }

            this.theta = (float[])theta.Clone(); // keep initial theta so we can restore theta after overshoot

            bufX = new ComputeBuffer<float>(context, ComputeMemoryFlags.ReadOnly | ComputeMemoryFlags.CopyHostPointer, X);
            bufY = new ComputeBuffer<float>(context, ComputeMemoryFlags.ReadOnly | ComputeMemoryFlags.CopyHostPointer, Y);
            bufTheta = new ComputeBuffer<float>(context, ComputeMemoryFlags.ReadWrite | ComputeMemoryFlags.CopyHostPointer, theta);
            bufO = new ComputeBuffer<float>(context, ComputeMemoryFlags.ReadWrite, exampleCount);
            bufOJ = new ComputeBuffer<float>(context, ComputeMemoryFlags.ReadWrite, exampleCount);
            bufJ = new ComputeBuffer<float>(context, ComputeMemoryFlags.WriteOnly, 1);
        }

        private void initializeKernels()
        {
            // Load program
            program = LoadProgramFromResources("SimpleCLML.CL.logistic_regression_gradient_descent.cl", new Dictionary<string, object>() { { "ExampleGroupingConstant", localWorkgroupSize } });

            // Initialize kernels
            kernelGradientStep1 = program.CreateKernel("grad1");
            kernelGradientStep1.SetMemoryArgument(0, bufX);
            kernelGradientStep1.SetMemoryArgument(1, bufY);
            kernelGradientStep1.SetMemoryArgument(2, bufTheta);
            kernelGradientStep1.SetMemoryArgument(3, bufO);
            kernelGradientStep1.SetValueArgument(4, featureCount);
            kernelGradientStep1.SetValueArgument(5, exampleCount);

            kernelGradientStep2 = program.CreateKernel("grad2");
            kernelGradientStep2.SetMemoryArgument(0, bufX);
            kernelGradientStep2.SetMemoryArgument(1, bufO);
            kernelGradientStep2.SetMemoryArgument(2, bufTheta);
            kernelGradientStep2.SetValueArgument(3, exampleCount);
            kernelGradientStep2.SetValueArgument(5, featureCount);
            kernelGradientStep2.SetValueArgument(6, trueExampleCount);

            kernelCostFunction1 = program.CreateKernel("cost1");
            kernelCostFunction1.SetMemoryArgument(0, bufX);
            kernelCostFunction1.SetMemoryArgument(1, bufY);
            kernelCostFunction1.SetMemoryArgument(2, bufTheta);
            kernelCostFunction1.SetMemoryArgument(3, bufOJ);
            kernelCostFunction1.SetValueArgument(4, featureCount);
            kernelCostFunction1.SetValueArgument(5, exampleCount);

            kernelCostFunction2 = program.CreateKernel("cost2");
            kernelCostFunction2.SetMemoryArgument(0, bufOJ);
            kernelCostFunction2.SetMemoryArgument(1, bufJ);
            kernelCostFunction2.SetValueArgument(2, exampleCount);
            kernelCostFunction2.SetValueArgument(3, trueExampleCount);
        }

        public float[] Run()
        {
            // Select the most powerful device 
            // TODO: selecting
            var device = context.Devices.OrderByDescending(d => d.MaxComputeUnits).First();
            Trace.TraceInformation("Using device: {0}; Compute units: {1}; MaxParameterSize: {2}; MaxWorkGroupSize: {3}; LocalMemorySize: {4}; MaxMemoryAllocationSize: {5}", device.Name, device.MaxComputeUnits, device.MaxParameterSize, device.MaxWorkGroupSize, device.LocalMemorySize, device.MaxMemoryAllocationSize);

            ComputeEventList events = new ComputeEventList();            
            using (ComputeCommandQueue commands = new ComputeCommandQueue(context, device, ComputeCommandQueueFlags.None))
            {
                long maxIterations = this.maxIterations;
                float[] thetabuf = (float[])this.theta.Clone();
                float error = lrCost(commands);                
                int alphaReductions = 0;
                bool reduced = false;
                float alpha = initialAlpha;
                const float alphaReductionCoefficient = 0.8f;                
                const int errorCheckFreq = 128; // cost function check freq

                Trace.TraceInformation("Starting gradient descent. Starting alpha = {0}, alphaReductionCoefficient = {1}, maxIterations = {2}, smallStep = {3}, errorThreshold= {4}", alpha, alphaReductionCoefficient, maxIterations, errorCheckFreq, errorThreshold);
                Stopwatch sw = Stopwatch.StartNew();
                kernelGradientStep2.SetValueArgument(4, alpha);                
                long i,prev_i=0;
                for (i = 0; i < maxIterations; i++)
                {
                    // we check for error every errorCheckFreq step, on the last step or when previous error check resulted in learning rate decrease                    
                    bool checkStep = reduced || i % errorCheckFreq == 0 || i == maxIterations - 1;

                    commands.Execute(kernelGradientStep1, null, new long[] { exampleCount / 4 }, null, null);
                    commands.AddBarrier();                    
                    commands.Execute(kernelGradientStep2, null, new long[] { localWorkgroupSize, featureCount / 4 }, new long[] { localWorkgroupSize, 1 }, checkStep ? events : null);
                    commands.AddBarrier();

                    // Scheduling many (relatively) long-run kernels separated only by clEnqueueBarrier() results in video driver crash due to TDR
                    // Wait() solves it but introduces synchronization issues: cost1 kernel doesn't wait for grad2 to finish but rather executes in parallel with grad1                    
                    // So we use events to ensure that cost1 executes after grad2
                    // Further investigation is needed, however the impact on performance is small
                    commands.Wait(checkStep ? events : null); 
                    
                    if (checkStep)
                    {
                        reduced = false;
                        float old_error = error;
                        float new_error = lrCost(commands);
                        float error_delta = old_error - new_error;
                        error = new_error;
                        Trace.TraceInformation("{0} gradient descent steps: error = {1}, error delta = {2}", i+1, error, error_delta);

                        foreach (var e in events)
                        {
                            e.Dispose();
                        }
                        events.Clear();
                        

                        // Check for overshoot
                        if (Double.IsInfinity(error) || error_delta < 0.0)
                        {
                            alpha = alpha * alphaReductionCoefficient; // reduce step

                            if (alpha < 1.0e-15) // too small - breaking
                            {
                                Trace.TraceWarning("Learning rate correction after overshoot is too small! Terminating descent.");
                                break;
                            }

                            Trace.TraceInformation("Learning rate is too large! Reducing to {0}", alpha);
                            alphaReductions++;
                            maxIterations += i - prev_i;
                            error = old_error;
                            reduced = true;
                            kernelGradientStep2.SetValueArgument(4, alpha);
                            commands.WriteToBuffer(thetabuf, bufTheta, false, null);                                                        
                        }
                        else if (error < errorThreshold || error_delta < 1.0e-15)
                        {
                            Trace.TraceInformation("Exit condition is met (error < errorThreshold OR error_delta < 1.0e-15), terminating descent");
                            break;
                        }
                        else
                        {
                            // Everything is OK, saving theta                            
                            commands.ReadFromBuffer(bufTheta, ref thetabuf, true, null);
                        }
                        prev_i = i;
                    }
                }

                events.Clear();

                commands.ReadFromBuffer(bufTheta, ref thetabuf, true, null);

                sw.Stop();
                Trace.TraceInformation("iterations: {0}, error: {1:P4},{3} step ticks: {2:N2}",
                    i, error, 1.0 * sw.ElapsedTicks / i, alphaReductions == 0 ? "" : String.Format(" alphareds: {0} ({1}->{2}),", alphaReductions, initialAlpha, alpha));

                return thetabuf;
            }

        }
 
        private float lrCost(ComputeCommandQueue commandQueue)
        {
            float[] J = new float[1];

            commandQueue.Execute(kernelCostFunction1, null, new long[] { exampleCount / 4 }, null, null);
            commandQueue.AddBarrier();
            commandQueue.Execute(kernelCostFunction2, null, new long[] { localWorkgroupSize }, new long[] { localWorkgroupSize }, null);
            commandQueue.AddBarrier();
            commandQueue.ReadFromBuffer(bufJ, ref J, true, null);

            return J[0];
        }
    }
}
