﻿using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Reinforcement;
using Neural;
using Computing;
using Probability;

namespace sau
{
    class Advisor : CCDecisionMaker
    {
        private double[] state; // 0 - alpha, 1 - q, 2 - theta, 3 - desired theta
        private MLPerceptron2 nn;
        private ASampler sampler;

        double gamma = 0.8;
        double beta = 0.0005;

        double maxDecision = 1.4;
        double decisionStep = 0.05;

        Random random = new Random();

        double T = 0.1;

        int stepNumber = 1;

        Vector lastDecision = new Vector(1, 0);

        public Advisor()
        {
            sampler = new ASampler();
            state = new double[4];
            nn = new MLPerceptron2();
            Vector averages;
            Vector stdDev;
            nn.Build(5, new int[] {5, 1}, new CellType[] {CellType.Arcustangent, CellType.Arcustangent});
            averages = new Vector(5, 0);
            stdDev = new Vector(5, 1);
            nn.SetInputDescription(averages, stdDev);
            nn.InitWeights(1.0);
            Vector v = new Vector(nn.WeightsCount, sampler.SampleFromNormal(0, 0.5));
            nn.AddToWeights(v, 1);
        }

        //  we start in state "state"
        public void CCStartInState(double[] state)
        {
            Array.Copy(state, this.state, state.Length);
        }

        //  what action do you advise to do?
        public void CCAdviseAction(ref double[] decision)
        {
            //Vector decisionV = new Vector(1);
            //nn.Approximate(new Vector(state), ref decisionV);
            //Array.Copy(decisionV.Table, decision, decisionV.Dimension);
            //if (Math.Abs(decision[0]) > 1.4)
            //{
            //    decision[0] = Math.Sign(decision[0]) * 1.4;
            //}

            Vector currentState = new Vector(state);

            double decisionTmp = SelectDecision(currentState);

            lastDecision[0] = decisionTmp;

            decision[0] = decisionTmp;

            stepNumber++;

            //change T
            if (stepNumber % 400 == 0 && T > 0.0199)
                T -= 0.01;
        }

        //  what happened then? 
        public void CCThisHappened(double reward, 
                                   double[] next_state)
        {
            
            //change aproximator

            double[] out_gradient = new double[1];
            Vector weight_gradient = new Vector(5);

            Vector vstate = new Vector(state);
            Vector vnextstate = new Vector(next_state);

            out_gradient[0] = beta*(reward + gamma*(CalculateMaxQ(vnextstate)[0] - CalculateQ(vstate, lastDecision)[0]));

            Array.Copy(next_state, state, 4);

            nn.BackPropagateGradient(new Vector(out_gradient), ref weight_gradient);
            //nn.AddToWeights(weight_gradient, 1);
        }

        private double SelectDecision(Vector vstate)
        {
            Vector result = new Vector(1);
            double totalProbability = 0;

            double randomNumber = GetRandomNumber(0, 1);

            for (double dec = -maxDecision; dec <= maxDecision; dec += decisionStep)
            {
                result[0] = dec;

                totalProbability += GetDecisionProbability(vstate, result);

                if (totalProbability >= randomNumber)
                    return dec;
            }
            return maxDecision;
        }

        private double GetDecisionProbability(Vector vstate, Vector vdecision)
        {
            double probability;
            double sum = 0;

            probability = Math.Exp(CalculateQ(vstate, vdecision)[0] / T);
            
            for (double i = -maxDecision; i <= maxDecision; i += decisionStep)
            {
                Vector vdecisionTmp = new Vector(1, i);

                sum += Math.Exp(CalculateQ(vstate, vdecisionTmp)[0] / T);
            }

            probability /= sum;

            return probability;
        }

        private Vector CalculateTotalQ(Vector vstate)
        {
            Vector result = new Vector(1);
            Vector total = new Vector(1);

            for (double i = -maxDecision; i <= maxDecision; i += decisionStep)
            {
                Vector vdecision = new Vector(1, i);

                result = CalculateQ(vstate, vdecision);

                total[0] += result[0];
            }

            return total;
        } 

        private double CalculateReward()
        {
            //TODO:: change reward maybe?
            double reward = -(state[2] - state[3]); // -dif^2

            return reward;
        }

        private Vector CalculateMaxQ(Vector vstate)
        {
            Vector maxValue = new Vector(1, -1000);
            Vector result = new Vector(1);

            for (double i = -maxDecision; i <= maxDecision; i += decisionStep)
            {
                Vector vdecision = new Vector(1, i);

                result = CalculateQ(vstate, vdecision);

                if (result[0] > maxValue[0])
                {
                    maxValue = result;
                }
            }

            return maxValue;
        }

        private Vector CalculateQ(Vector vstate, Vector vdecision)
        {
            Vector input = new Vector(5);

            for (int i = 0; i < 4; ++i)
            {
                input[i] = vstate[i];
            }
            input[4] = vdecision[0];

            Vector result = new Vector(1);

            nn.Approximate(input, ref result);

            return result;
        }

        public double GetRandomNumber(double minimum, double maximum)
        {
            return random.NextDouble() * (maximum - minimum) + minimum;
        }
    }
}
