﻿using System;
using System.Collections.Generic;
using System.Collections;
using System.Linq;
using System.Text;
using Reinforcement;
using Probability;
using Neural;
using Computing;
namespace SAUlot
{
    public class AirplaneACRepeated: CCDecisionMaker
    {
        public event RaportForm.RaportThis RaportAppend;
        public event RaportForm.RaportTitle RaportSetTitle;
        public event RaportForm.RaportStep RaportSetStep;
        public event RaportForm.RaportClear RaportClearCharts;

        private ASampler Sampler;
        private MLPerceptron2 Actor; // w sensie Pi
        private MLPerceptron2 Critic; // w sensie V
        private double[] State {get;set;}
        public ArrayList History; //historia stanow
        public double BetaFit{get;set;}
        public double BetaVt { get; set; }
        private double AdvisedAction;
        public double Gamma { get; set; }
        public double Deviation { get; set; }
        private double B = 20.0;
        private Vector LastActorApprox;
        private int InputNumber;
        private int OutputNumber;
        private double Reward;
        private int Step;
        private int Episode;
        private CellType ActorSecondLayerType;
        private CellType CriticSecondLayerType;
        private int FirstLayerNeuronNumber;
        private struct HistoryState
        {
            public double[] state;
            public double[] next_state;
            public double decision;
            public double politic;
            public double reward;
          
           

            public HistoryState(double[] s, double[] ns, double dec, double r, double pol)
            {
                state = s;
                next_state = ns;
                decision = dec;
                reward = r;
                politic = pol;
            }
        }



         public AirplaneACRepeated(int input, int output, double beta_ac, double beta_crit, double gamma, double std_dev, int fstlayer)
        {
            Sampler = new ASampler();
            History = new ArrayList();
            LastActorApprox = new Vector(1);
            InputNumber = input;
            OutputNumber = output;
            BetaFit = beta_ac;
            BetaVt = beta_crit;
            Gamma = gamma;
            Deviation = std_dev;
            FirstLayerNeuronNumber = fstlayer;
            Step = 0;
            Episode = 0;
            ActorSecondLayerType = Neural.CellType.Linear;
            CriticSecondLayerType = Neural.CellType.Linear;
            //Init();
        }

        public AirplaneACRepeated(int input, int output, double beta_ac, double beta_crit, double gamma, double std_dev,int fstlayer, CellType actor, CellType critic)
        {
            Sampler = new ASampler();
            History = new ArrayList();
            LastActorApprox = new Vector(1);
            InputNumber = input;
            OutputNumber = output;
            BetaFit = beta_ac;
            BetaVt = beta_crit;
            Gamma = gamma;
            Deviation = std_dev;
            Step = 0;
            Episode = 0;
            FirstLayerNeuronNumber = fstlayer;
            ActorSecondLayerType = actor;
            CriticSecondLayerType = critic;
            //Init();
        }

        public void Init()
        {
            History.Clear();
            Computing.Vector averages;
            Computing.Vector stddevs;

            RaportClearCharts();

            Actor = new Neural.MLPerceptron2();
            Actor.Build(InputNumber,new int[] { FirstLayerNeuronNumber, OutputNumber }, new Neural.CellType[]{Neural.CellType.Arcustangent, Neural.CellType.Arcustangent});
            averages = new Computing.Vector(InputNumber,0);
            stddevs = new Computing.Vector(InputNumber,1);
            Actor.SetInputDescription(averages, stddevs);
            Actor.InitWeights();
            Actor.AddToWeights(new Vector(Actor.WeightsCount, Sampler.NextDouble()/10), 1);

            Critic = new Neural.MLPerceptron2();
            Critic.Build(InputNumber, Neural.CellType.Arcustangent, new int[] { 50, OutputNumber });
            averages = new Computing.Vector(InputNumber, 0);
            stddevs = new Computing.Vector(InputNumber, 1);
            Critic.SetInputDescription(averages, stddevs);
            Critic.InitWeights();  
            Critic.AddToWeights(new Vector(Critic.WeightsCount, Sampler.NextDouble()/10), 1);
        }

        public void CCStartInState(double[] state)
        {
            State = state;
        }

        public void CCAdviseAction(ref double[] decision)
        {
            Actor.Approximate(new Vector(State.Take(InputNumber).ToArray<double>()), ref LastActorApprox);
            decision[0] = Sampler.SampleFromNormal(LastActorApprox[0], Deviation);
            AdvisedAction = decision[0];
        }

        public void CCThisHappened(double reward, double[] next_state)
        {
            Vector critic_approx = new Vector(1);
            Vector actor_approx  = new Vector(1);
            Step += 1;
            Reward += reward;
            //Actor.Approximate(new Vector(State.Take(4).ToArray<double>()), ref actor_approx);
            History.Add(new HistoryState(State, next_state, AdvisedAction, reward, LastActorApprox[0]));
            if (next_state[next_state.Length - 1] == 1)
            {
                Reward = Reward / Step;
                RaportAppend(2, Reward);
                Episode += 1;
                Step = 0;
                Reward = 0;
            }
            for (int i = 0; i < (History.Count > 50 ? 50 : History.Count); ++i)
            {
                //punkt a
                HistoryState state =(HistoryState)History[Sampler.Next() % History.Count];
                Critic.Approximate(new Vector(state.state.Take(InputNumber).ToArray<double>()), ref critic_approx);
                Actor.Approximate(new Vector(state.state.Take(InputNumber).ToArray<double>()),ref actor_approx);
                
                //punkt b
                double dt = 0.0;
                if (state.next_state[state.next_state.Length-1] == 1)
                    dt = state.reward - critic_approx[0];
                else
                {
                    double gamma = 0.3;
                    Vector critic_approx_next = new Vector(1);
                    Critic.Approximate(new Vector(state.next_state.Take(InputNumber).ToArray<double>()), ref critic_approx_next);
                    dt = state.reward + gamma * critic_approx_next[0] - critic_approx[0];
                }

                // estymatory punkt c
                //poprawka aktora
                double[] dec = new double[1];
                dec[0] = (state.decision - state.politic) / Math.Pow(Deviation, 2);
                Vector action = new Vector(dec);
                Vector actor_weights = new Vector();
                Actor.BackPropagateGradient(action, ref actor_weights);
                actor_weights.MultiplyBy(dt * Math.Min(actor_approx[0] / state.politic, B));
                //poprawka krytyka
                Vector critic_weights = new Vector(Critic.WeightsCount, 1);
                Critic.BackPropagateGradient(new Vector(1, dt), ref critic_weights);
                critic_weights.MultiplyBy(dt * Math.Min(actor_approx[0] / state.politic, B));


                //punkt d
                Actor.AddToWeights(actor_weights, BetaFit);
                Critic.AddToWeights(critic_weights, BetaVt);
            }
            if (History.Count > 50)
                History.RemoveAt(0);

            RaportSetTitle(1, "reward");
            RaportSetStep(1, 0.5);
            RaportAppend(1, reward);

            RaportSetTitle(2, "Average Reward");
            RaportSetStep(2, 0.005);

            State = next_state;

        }
    }
}

