﻿using Cloo;
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
using System.Text;

namespace SimpleCLML.LogisticRegression
{
    public class LogisticRegressionLBFGS : OpenCLAlgorithmBase, ILogisticRegressionSolver
    {
        private ComputeProgram program;

        private ComputeKernel kernelGradientStep1;
        private ComputeKernel kernelGradientStep2;
        private ComputeKernel kernelCostFunction1;
        private ComputeKernel kernelCostFunction2;

        private ComputeBuffer<float> bufX;
        private ComputeBuffer<float> bufY;
        private ComputeBuffer<float> bufTheta;
        private ComputeBuffer<float> bufGrad;
        private ComputeBuffer<float> bufO;
        private ComputeBuffer<float> bufOJ;
        private ComputeBuffer<float> bufJ;

        private int exampleCount;
        private int trueExampleCount;
        private int featureCount;
        private int localWorkgroupSize = 256; // TODO: query this value from OpenCL

        readonly int maxIterations;
        readonly float errorThreshold;
        readonly int historySize;

        public int ExampleAlignmentRequirement { get { return this.localWorkgroupSize; } }
        public int FeatureAlignmentRequirement { get { return 4; } }

        private float[] theta;

        protected override void OnDisposing()
        {
            var resources = new IDisposable[] { bufJ, bufO, bufOJ, bufTheta, bufGrad, bufX, bufY, kernelCostFunction1, kernelCostFunction2, kernelGradientStep1, kernelGradientStep2, program };
            foreach (var r in resources.Where(x => x != null))
            {
                r.Dispose();
            }
        }

        public LogisticRegressionLBFGS(int maxIterations, float errorThreshold, int historySize)
        {
            this.maxIterations = maxIterations;
            this.errorThreshold = errorThreshold;
            this.historySize = historySize;
        }

        public void Init(float[] X, float[] Y, int trueExampleCount, float[] theta = null)
        {
            if (X == null || Y == null)
                throw new ArgumentNullException();

            if (X.Length == 0 || Y.Length == 0 || (X.Length % Y.Length) != 0)
                throw new ArgumentException("Invalid size of X or Y");

            this.exampleCount = Y.Length;
            this.trueExampleCount = trueExampleCount;
            this.featureCount = X.Length / Y.Length;

            initializeBuffers(X, Y, theta);
            initializeKernels();
        }

        private void initializeBuffers(float[] X, float[] Y, float[] theta)
        {
            if (theta == null)
            {
                theta = new float[featureCount]; // theta = zeros(featureCount, 1);
            }

            this.theta = (float[])theta.Clone(); // keep initial theta so we can restore theta after overshoot

            bufX = new ComputeBuffer<float>(context, ComputeMemoryFlags.ReadOnly | ComputeMemoryFlags.CopyHostPointer, X);
            bufY = new ComputeBuffer<float>(context, ComputeMemoryFlags.ReadOnly | ComputeMemoryFlags.CopyHostPointer, Y);
            bufTheta = new ComputeBuffer<float>(context, ComputeMemoryFlags.ReadWrite | ComputeMemoryFlags.CopyHostPointer, theta);
            bufGrad = new ComputeBuffer<float>(context, ComputeMemoryFlags.ReadWrite, featureCount);
            bufO = new ComputeBuffer<float>(context, ComputeMemoryFlags.ReadWrite, exampleCount);
            bufOJ = new ComputeBuffer<float>(context, ComputeMemoryFlags.ReadWrite, exampleCount);
            bufJ = new ComputeBuffer<float>(context, ComputeMemoryFlags.WriteOnly, 1);
        }

        private void initializeKernels()
        {
            // Load program
            program = LoadProgramFromResources("SimpleCLML.CL.logistic_regression_lbfgs.cl", new Dictionary<string, object>() { { "ExampleGroupingConstant", localWorkgroupSize } });

            // Initialize kernels
            kernelGradientStep1 = program.CreateKernel("grad1");
            kernelGradientStep1.SetMemoryArgument(0, bufX);
            kernelGradientStep1.SetMemoryArgument(1, bufY);
            kernelGradientStep1.SetMemoryArgument(2, bufTheta);
            kernelGradientStep1.SetMemoryArgument(3, bufO);
            kernelGradientStep1.SetValueArgument(4, featureCount);
            kernelGradientStep1.SetValueArgument(5, exampleCount);

            kernelGradientStep2 = program.CreateKernel("grad2");
            kernelGradientStep2.SetMemoryArgument(0, bufX);
            kernelGradientStep2.SetMemoryArgument(1, bufO);
            kernelGradientStep2.SetMemoryArgument(2, bufGrad);
            kernelGradientStep2.SetValueArgument(3, exampleCount);
            kernelGradientStep2.SetValueArgument(4, featureCount);
            kernelGradientStep2.SetValueArgument(5, trueExampleCount);

            kernelCostFunction1 = program.CreateKernel("cost1");
            kernelCostFunction1.SetMemoryArgument(0, bufX);
            kernelCostFunction1.SetMemoryArgument(1, bufY);
            kernelCostFunction1.SetMemoryArgument(2, bufTheta);
            kernelCostFunction1.SetMemoryArgument(3, bufOJ);
            kernelCostFunction1.SetValueArgument(4, featureCount);
            kernelCostFunction1.SetValueArgument(5, exampleCount);

            kernelCostFunction2 = program.CreateKernel("cost2");
            kernelCostFunction2.SetMemoryArgument(0, bufOJ);
            kernelCostFunction2.SetMemoryArgument(1, bufJ);
            kernelCostFunction2.SetValueArgument(2, exampleCount);
            kernelCostFunction2.SetValueArgument(3, trueExampleCount);
        }

        public float[] Run()
        {
            // Select the most powerful device 
            // TODO: selecting
            var device = context.Devices.OrderByDescending(d => d.MaxComputeUnits).First();
            Trace.TraceInformation("Using device: {0}; Compute units: {1}; MaxParameterSize: {2}; MaxWorkGroupSize: {3}; LocalMemorySize: {4}; MaxMemoryAllocationSize: {5}", device.Name, device.MaxComputeUnits, device.MaxParameterSize, device.MaxWorkGroupSize, device.LocalMemorySize, device.MaxMemoryAllocationSize);

            // TODO: reduce GPU IO, move some calculations to GPU
            using (ComputeCommandQueue commands = new ComputeCommandQueue(context, device, ComputeCommandQueueFlags.None))
            {
                int maxIterations = this.maxIterations;
                float[] thetabuf = (float[])this.theta.Clone();
                float J1 = lrCost(commands);
                float[] grad1 = lrGrad(commands);
                float Ji = J1;
                float[] gradi;
                float[] thetai = new float[featureCount];
                float[] p = new float[featureCount];

                float[] yhistory = new float[featureCount * historySize];
                float[] shistory = new float[featureCount * historySize];
                float[] ys = new float[historySize];

                const float c1 = 0.0001f; // Wolfe condition constant
                int iter;
                for (iter = 0; iter < maxIterations; iter++)                
                {
                    Trace.TraceInformation("Iteration {0}, J = {1}", iter, J1);
                    int incr = iter <= historySize ? 0 : iter - historySize;
                    int bound = iter <= historySize ? iter : historySize;
                    float[] alphas = new float[bound];
                    // q = grad1

                    #region calc grad via updates
                    float[] q = (float[])grad1.Clone();
                    for (int i = bound - 1; i >= 0; i--)
                    {
                        int j = i + incr;
                        
                        // y = yj'sj
                        float y = 0.0f;
                        for (int k = 0; k < featureCount; k++)
                        {
                            y += yhistory[(j % historySize) * featureCount + k] * shistory[(j % historySize) * featureCount + k];
                        }
                        ys[j % historySize] = y;                        

                        // a = 1/y * sj' * q
                        float alpha = 0.0f;
                        for (int k = 0; k < featureCount; k++)
                        {
                            alpha += shistory[(j % historySize) * featureCount + k] * q[k];
                        }
                        alphas[i] = alpha = alpha / y; // save alpha

                        // q = q - a*yj
                        for (int k = 0; k < featureCount; k++)
                        {
                            q[k] -= alpha * yhistory[(j % historySize) * featureCount + k];
                        }                        
                    }

                    float[] r = (float[])q.Clone();
                    for (int i = 0; i < bound; i++)
                    {
                        int j = i + incr;

                        // y = yj'sj
                        float y = ys[j%historySize];

                        // beta = 1/y * yj' * r
                        float beta = 0.0f;
                        for (int k = 0; k < featureCount; k++)
                        {
                            beta += yhistory[(j % historySize) * featureCount + k] * r[k];
                        }
                        beta = beta / y;

                        // r = r + sj * (alpha(i+1)-beta);
                        for (int k = 0; k < featureCount; k++)
                        {
                            r[k] += (alphas[i] - beta) * shistory[(j % historySize) * featureCount + k];
                        }
                    }

                    #endregion
                    // p = -r
                    for (int k = 0; k < featureCount; k++)
                        p[k] = -r[k];
                    
                    #region Simple line search
                    // sl = -p'*p
                    float sl = 0.0f;
                    for (int k = 0; k < featureCount; k++)
                        sl += -p[k] * p[k];

                    float a = 1.0f; // TODO: dig more info on initial value; 
                    for (int i = 0; i < 150; i++) // 0.5f^150 => 0.0f => termination due do delta grad == 0 (seems excessive)
                    {
                        // thetaN = thetabuf+a*p
                        for (int k = 0; k < featureCount; k++)
                            thetai[k] = thetabuf[k] + a * p[k];

                        commands.WriteToBuffer(thetai, bufTheta, false, null); // TODO: use cubic interpolation instead of direct evaluation
                        Ji = lrCost(commands);
                        if (Ji <= J1 + c1 * a * sl)
                        {
                            break;
                        }
                        a = 0.5f * a;
                    }
                    #endregion

                    if (Single.IsNaN(Ji))
                    {
                        Trace.TraceWarning("Ji is NaN on step {0} (a = {1}), terminating", iter, a);
                        break;
                    }

                    gradi = lrGrad(commands); // thetai is already in the buffer

                    // update gradient and theta
                    float yy = 0.0f;
                    for (int k = 0; k < featureCount; k++)
                    {
                        shistory[(iter % historySize) * featureCount + k] = thetai[k] - thetabuf[k];
                        yy += yhistory[(iter % historySize) * featureCount + k] = gradi[k] - grad1[k];
                        thetabuf[k] = thetai[k];
                        grad1[k] = gradi[k];
                    }

                    J1 = Ji;

                    if (yy == 0.0f)
                    {
                        break;
                    }
                }

                Trace.TraceInformation("Iterations {0}, J = {1}", iter, J1);
                return thetabuf;
            }
        }

        private float lrCost(ComputeCommandQueue commandQueue)
        {
            float[] J = new float[1];

            commandQueue.Execute(kernelCostFunction1, null, new long[] { exampleCount / 4 }, null, null);
            commandQueue.AddBarrier();
            commandQueue.Execute(kernelCostFunction2, null, new long[] { localWorkgroupSize }, new long[] { localWorkgroupSize }, null);
            commandQueue.AddBarrier();
            commandQueue.ReadFromBuffer(bufJ, ref J, true, null);

            return J[0];
        }

        private float[] lrGrad(ComputeCommandQueue commands)
        {
            float[] grad = new float[featureCount];
            commands.Execute(kernelGradientStep1, null, new long[] { exampleCount / 4 }, null, null);
            commands.AddBarrier();
            commands.Execute(kernelGradientStep2, null, new long[] { localWorkgroupSize, featureCount / 4 }, new long[] { localWorkgroupSize, 1 }, null);
            commands.AddBarrier();
            commands.ReadFromBuffer(bufGrad, ref grad, true, null);
            return grad;
        }
    }
}
