﻿using System;
using Bettzueche.RLGlue.RLComponent;
using Bettzueche.RLGlue.RLTypes;

namespace Bettzueche.RLGlue.Codec {

    /// <summary>
    /// This is a local implementation of RL-Glue. It should be identical in behavior
    /// to the RL-Glue code in the C/C++ RLGlueCore project.
    /// </summary>
    /// <remarks>
    /// Based on Java-Glue by Brian Tanner
    /// Copyright (C) 2007, Brian Tanner
    /// http://rl-glue-ext.googlecode.com/
    /// </remarks>
    public class LocalGlue : IExperiment {

        #region Private Fields

        IEnvironment E = null;
        IAgent A = null;
        GeneralAction lastAction = null;
        bool isTerminal = false;
        int numSteps = 0;
        double totalReward = 0.0d;
        int numEpisodes = 0;
        readonly object syncLock = new object(); 

        #endregion

        #region Constructors

        /// <summary>
        /// Initializes a new instance of the <see cref="LocalGlue"/> class.
        /// </summary>
        /// <param name="E">The Environment</param>
        /// <param name="A">The Agent</param>
        public LocalGlue(IEnvironment E, IAgent A) {
            this.E = E;
            this.A = A;
        }

        #endregion

        #region IExperiment Members

        /// <summary>
        /// Gets number of steps in the current (or last) episode.
        /// </summary>
        public int StepCount {
            get {
                lock (syncLock) {
                    return numSteps;
                }
            }
        }

        /// <summary>
        /// Gets total reward of current episode, last episode respectively.
        /// </summary>
        public double Return {
            get {
                lock (syncLock) {
                    return totalReward;
                }
            }
        }

        /// <summary>
        /// Gets the episode count.
        /// </summary>
        public int Episodes {
            get { 
                lock (syncLock) {
                    return numEpisodes;
                }
            }
        }

        /// <summary>
        /// Initializes Environment and Agent.
        /// </summary>
        /// <returns>Task Spec string</returns>
        public string Init() {
            lock(syncLock) {
                string taskSpec = E.InitEnvironment().ToTaskSpecString();
                A.InitAgent(taskSpec);
                numEpisodes = 0;
                numSteps = 0;
                return taskSpec;
            }
        }

        /// <summary>
        /// Starts experiment (starts environment and agent).
        /// </summary>
        /// <param name="observation">out Parameter of initial state.</param>
        /// <param name="action">out Parameter of first action to perform.</param>
        public void Start(out RLTypes.GeneralState observation, out RLTypes.GeneralAction action) {
            lock(syncLock) {
                observation = this.StartEnvironment();
                action = this.StartAgent(observation);
            }
        }

        /// <summary>
        /// Performs a learning step (env_step and agent_step).
        /// </summary>
        /// <param name="reward">out Parameter Reward of this step</param>
        /// <param name="nextObservation">out Parameter next State</param>
        /// <param name="nextAction">out paremeter of next action to perform.</param>
        /// <returns>
        /// true, if the agent reaches a terminal state. false otherwise
        /// </returns>
        public bool Step(out double reward, out RLTypes.GeneralState nextObservation, out RLTypes.GeneralAction nextAction) {
            lock(syncLock) {
                if (lastAction == null) {
                    Console.Error.WriteLine("lastAction came back as null from RL_step");
                }
                bool terminal = this.StepEnvironment(lastAction, out reward, out nextObservation);

                if (terminal) {
                    this.EndAgent(reward);
                } else {
                    lastAction = this.StepAgent(reward, nextObservation);
                }
                nextAction = lastAction;
                return terminal;
            }
        }

        /// <summary>
        /// Release (dispose) resources after an experiment.
        /// </summary>
        public void CleanUp() {
            lock(syncLock) {
                E.CleanupEnvironment();
                A.CleanupAgent();
            }
        }

        /// <summary>
        /// Performs the specified number of <see cref="Step">RL_steps</see>.
        /// </summary>
        /// <param name="maxStepsThisEpisode">Number (max) of Steps. Also 0, go without step limit to a final state.</param>
        /// <returns>exit status</returns>
        /// <remarks>
        /// int RL_episode(int numSteps);
        /// </remarks>
        public int RunEpisode(int maxStepsThisEpisode) {
            lock (syncLock) {
                int currentStep = 0;
                GeneralState observation;
                GeneralAction action;
                bool terminal = false;
                double reward;
                this.Start(out observation, out action);
                /* "RL_start sets current step to 1, so we should start x at 1" (BTanner) */
                for (currentStep = 1; !terminal && (maxStepsThisEpisode == 0 ? true : currentStep < maxStepsThisEpisode); currentStep++) {
                    terminal = this.Step(out reward, out observation, out action);
                }

                /*"Return the value of terminal to tell the caller whether the episode ended naturally or was cut off" (BTanner)
                  Do they use the terminal flag for error codes ?!? */
                return terminal ? 1 : 0;
            }
        }

        #endregion

        #region IEnvironment Members

        /// <summary>
        /// Not Supported.<para>
        /// Use <see cref="LocalGlue.Init()"/> instead.</para>
        /// </summary>
        /// <returns>
        /// NotSupportedException
        /// </returns>
        public TaskSpec.ITaskSpec InitEnvironment() {
            throw new NotSupportedException();
        }

        /// <summary>
        /// Initializes a new episode and gets initial state.
        /// </summary>
        /// <returns>
        /// Initial state of a learning episode (corresponding observation, respectively)
        /// </returns>
        public RLTypes.GeneralState StartEnvironment() {
            lock(syncLock) {
                numSteps = 1;
                isTerminal = false;
                totalReward = 0.0d;

                GeneralState o = E.StartEnvironment();
                if (o == null) {
                    Console.Error.WriteLine("o came back as null from RL_start");
                }
                return o;
            }
        }

        /// <summary>
        /// Performs specified action on the environment and returns terminal flag, reward and next state.
        /// </summary>
        /// <param name="action">action to perform</param>
        /// <param name="outReward">out Parameter, reward of the performed action.</param>
        /// <param name="outObservation">out Parameter, state (observation) after the action performed.</param>
        /// <returns>
        /// true if reached a terminal state. false otherwise
        /// </returns>
        public bool StepEnvironment(RLTypes.GeneralAction action, out double outReward, out RLTypes.GeneralState outObservation) {
            lock(syncLock) {
                bool terminal = E.StepEnvironment(action, out outReward, out outObservation);
                if (outObservation == null) {
                    Console.Error.WriteLine("Ro.o came back as null from RL_step");
                }

                totalReward += outReward;

                if (terminal) {
                    numEpisodes++;
                } else {
                    numSteps++;
                }
                return terminal;
            }
        }

        /// <summary>
        /// Not supported.<para>
        /// Use <see cref="LocalGlue.CleanUp()"/> instead.</para>
        /// </summary>
        public void CleanupEnvironment() {
            throw new NotSupportedException();
        }

        /// <summary>
        /// Sends message to environment.
        /// </summary>
        /// <param name="message"></param>
        /// <returns>Response message, maby empty string</returns>
        public string EnvMessage(string message) {
            lock(syncLock) {
                string incomingMessage = message;
                if (incomingMessage == null) {
                    incomingMessage = "";
                }
                string returnMessage = E.EnvMessage(incomingMessage);
                if (returnMessage == null) {
                    returnMessage = "";
                }
                return returnMessage;
            }
        }

        #endregion

        #region IAgent Members

        /// <summary>
        /// Not supported.<para>
        /// Use <see cref="LocalGlue.Init()"/> instead</para>
        /// </summary>
        /// <param name="task_spec"></param>
        public void InitAgent(string task_spec) {
            throw new NotSupportedException();
        }

        /// <summary>
        /// Gets the first action to perform depending on the specified initial state.
        /// </summary>
        /// <param name="observation">Initial state</param>
        /// <returns>First action choosed by the agent.</returns>
        public RLTypes.GeneralAction StartAgent(RLTypes.GeneralState observation) {
            lock(syncLock) {
                GeneralAction theAction = A.StartAgent(observation);
                    if (theAction == null)
                        Console.Error.WriteLine("theAction came back as null from RL_start");
                return theAction;
            }
        }

        /// <summary>
        /// Learning algorithm of the agent.<para>
        /// Based on the current position and observation the agent does his learning updates and
        /// returns the following action.</para>
        /// </summary>
        /// <param name="reward">current reward (of last performed action)</param>
        /// <param name="observation">current state (as after-effect of the last action</param>
        /// <returns>Next action, the agent want to perform according to his policy.</returns>
        public RLTypes.GeneralAction StepAgent(double reward, RLTypes.GeneralState observation) {
            lock (syncLock) {
                GeneralAction theAction = A.StepAgent(reward, observation);
                    if (theAction == null) {
                        Console.Error.WriteLine("theAction came back as null from agent_step");
                    }
                return theAction;
            }
        }

        /// <summary>
        /// Marks the end of an episode and gives the last reward.
        /// </summary>
        /// <param name="reward">reward of the last and final action</param>
        public void EndAgent(double reward) {
            lock (syncLock) {
                A.EndAgent(reward);
            }
        }

        /// <summary>
        /// Not supported.<para>
        /// Use <see cref="LocalGlue.CleanUp()"/> instead.</para>
        /// </summary>
        public void CleanupAgent() {
            throw new NotSupportedException();
        }

        /// <summary>
        /// Message thze specified message, eg. to set parameters oder ask values.
        /// </summary>
        /// <param name="message">message according to your message protocol</param>
        /// <returns>Response message, maybe empty string</returns>
        public string MessageAgent(string message) {
            lock(syncLock) {
                string incomingMessage = message;
                if (incomingMessage == null) {
                    incomingMessage = "";
                }
                string returnMessage = A.MessageAgent(incomingMessage);
                if (returnMessage == null) {
                    returnMessage = "";
                }
                return returnMessage;
            }
        }

        #endregion

    }
}
