﻿using System;
using System.Linq;
using System.Text;
using System.Diagnostics;

namespace Bettzueche.RLGlue.TaskSpec {
    /// <summary>
    /// 
    /// </summary>
    /// <remarks>
    /// See <a href="http://glue.rl-community.org/wiki/Task_Spec">Task Spek Wiki</a> for details.
    /// </remarks>
    class TaskSpecV2Serializer : ITaskSpecSerializer {

        internal const int PARSER_VERSION = 2;


        #region ITaskSpecSerializer Members

        /// <summary>
        /// Serializes the specified task spec object.
        /// </summary>
        /// <param name="taskSpecObject">The task spec object.</param>
        /// <returns>
        /// The Task Spec String
        /// </returns>
        public string Serialize(ITaskSpec taskSpecObject) {
            throw new NotImplementedException();
        }

        /// <summary>
        /// Deserializes the specified task spec string.
        /// </summary>
        /// <param name="taskSpecString">The task spec string.</param>
        /// <returns>
        /// The corresponding TaskSpec instance.
        /// </returns>
        public ITaskSpec Deserialize(string taskSpecString) {
            /* Break the task spec into its four component parts
             * The version number
             * The task style (episodic/continuous)
             * The observation data
             * The action data 
             */
            TaskSpecV20 taskSpec = new TaskSpecV20();
            taskSpecString = this.removeWhiteSpace(taskSpecString);
            StringTokenizer tokenizer = new StringTokenizer(taskSpecString, ':');
            try {

                int numberOfTokens = tokenizer.countTokens();
                if (numberOfTokens > 5) throw new ArgumentException("TaskSpecV2 shouldn't parse task specs with more than 5 sections");
                if (numberOfTokens < 4) throw new ArgumentException("TaskSpecV2 shouldn't parse task specs with less than 4 sections");

                String versionString = tokenizer.nextToken();
                String taskStyle = tokenizer.nextToken();
                String observationString = tokenizer.nextToken();
                String actionString = tokenizer.nextToken();
                String rewardString;

                taskSpec.Version = versionString;
                double vers = Double.Parse(versionString);

                // Task specs of version > 1 have reward info that needs to be parsed
                if (vers >= PARSER_VERSION) {
                    if (tokenizer.hasMoreTokens())
                        rewardString = tokenizer.nextToken();
                    else
                        rewardString = "[]";
                }
                else {
                    Trace.TraceError("WARNING: task spec parser is version: " +  PARSER_VERSION + " Your task spec is: " + vers);
                    Trace.TraceError("Attempting to parse anyway!");
                    rewardString = "";
                }

                char episodic = taskStyle.ElementAt(0);
                // check to make sure this is a valid task type
                if (!(episodic == 'e' || episodic == 'c')) {
                    Trace.TraceError("Invalid task type. Specify episodic (e) or continuous (c)");
                    throw new RLGlueException("Invalid task type. Specify episodic (e) or continuous (c)");
                }
                else
                    taskSpec.Propblemtype = episodic+"";

                parseRLTypes(observationString, taskSpec.Observations);
                parseRLTypes(actionString, taskSpec.Actions);
                if (vers >= PARSER_VERSION)
                    taskSpec.Reward = parseRewards(rewardString);
                if (!constraintCheck(taskSpec))
                    return null;
            }
            catch (Exception) {
                return null;
            }
            return taskSpec;
        }

        #endregion

        #region Private Helper Methods

        String removeWhiteSpace(String input) {
            StringTokenizer whiteTokens = new StringTokenizer(input, ' ');
            String output = whiteTokens.nextToken();
            while (whiteTokens.hasMoreTokens()) {
                output += whiteTokens.nextToken();
            }
            return output;
        }

        void parseRLTypes(String rlTypeString, RLTypesSpec typesSpec)/* throws Exception*/
        {
            /* Break the observation into its three component parts
             * The number of dimensions to the observation
             * The types of the observation
             * The ranges of the observations
             */
            StringTokenizer tokenizer = new StringTokenizer(rlTypeString, '_');
            String dimensionString = tokenizer.nextToken();
            String typesDefString = tokenizer.nextToken();

            AbstractRange[] ranges;
            parseTypesAndDimensions(typesDefString, out ranges);
            parseRanges(tokenizer, ranges);
            foreach (AbstractRange range in ranges) {
                var iRange = range as IntRange;
                if (iRange != null)
                    typesSpec.AddDiscrete(iRange);
                else
                    typesSpec.AddContinuous(range as DoubleRange);
            }
        }
        

        DoubleRange parseRewards(String rewardString)/* throws Exception*/
        {
            var range = new DoubleRange();
            //if both min and max rewards are defined
            if (this.rangeKnown(rewardString)) {
                //rewardString = rewardString.substring(1, rewardString.length()-1);
                StringTokenizer rewardTokenizer = new StringTokenizer(rewardString, ',');
                range.Min = this.validValue(rewardTokenizer.nextToken());
                range.Max = this.validValue(rewardTokenizer.nextToken());
            }
            else {
                range.Min = Double.NegativeInfinity;
                range.Max = Double.PositiveInfinity;
            }
            return range;
        }

        double validValue(String valueString) {
            if (valueString.Equals("[-inf", StringComparison.InvariantCultureIgnoreCase))
                return Double.NegativeInfinity;
            else if (valueString.Equals("inf]", StringComparison.InvariantCultureIgnoreCase))
                return Double.PositiveInfinity;
            else if (valueString.Equals("["))
                return Double.NaN;
            else if (valueString.Equals("]"))
                return Double.NaN;
            else {
                if (valueString.ElementAt(0) == '[')
                    valueString = valueString.Substring(1);
                else if (valueString.ElementAt(valueString.Length - 1) == ']') {
                    if (valueString.Length == 1)
                        return Double.NaN;
                    valueString = valueString.Substring(0, valueString.Length - 1);
                }
                return Double.Parse(valueString);
            }
        }

        bool rangeKnown(String valueRange) {
            if (valueRange.Equals("[,]"))
                return false;
            else if (valueRange.Equals("[]"))
                return false;
            else
                return true;
        }

        bool constraintCheck(TaskSpecV20 taskSpec)
        {
            foreach (var iRange in taskSpec.Observations.Discrete.Concat(taskSpec.Actions.Discrete)) {
                if (iRange.Min > iRange.Max)
                    return false;
            }
            foreach (var dRange in taskSpec.Observations.Continuous.Concat(taskSpec.Actions.Continuous)) {
                if (dRange.Min > dRange.Max)
                    return false;
            }
            if (taskSpec.Reward.Min > taskSpec.Reward.Max)
                return false;
            return true;
        }

        void parseTypesAndDimensions(String typesString, out AbstractRange[] ranges)
        {
            // Discard the [ ] around the types string
            typesString = typesString.Substring(1, typesString.Length - 1);

            // Split up the observation types
            StringTokenizer typesTokenizer = new StringTokenizer(typesString, ',');

            /* Parse the data out of obsTypesString.
             */
            ranges = new AbstractRange[typesTokenizer.countTokens()];

            /* We get the observation type from the tokenizer, 
             * add it to the obs_types array, and update the discrete and continuous dimensions
             */
            int currentTypeIndex = 0;
            while (typesTokenizer.hasMoreTokens()) {
                char type = typesTokenizer.nextToken().ElementAt(0);
                switch (type) {
                    case 'i':
                        ranges[currentTypeIndex] = new IntRange();
                        break;

                    case 'f':
                        ranges[currentTypeIndex] = new DoubleRange();
                        break;

                    default:
                        throw new Exception("Unknown Type: " + type);
                }
                currentTypeIndex += 1;
            }
        }

        void parseRanges(StringTokenizer tokenizer, AbstractRange[] ranges) {
            int currentRange = 0;
            while (tokenizer.hasMoreTokens()) {
                String typeRange = tokenizer.nextToken();
                //observationRange = observationRange.Substring(1, observationRange.Length - 1);
                StringTokenizer rangeTokenizer = new StringTokenizer(typeRange, ',');
                if (this.rangeKnown(typeRange)) {
                    if (ranges[currentRange] is IntRange) {
                        IntRange iRange = (IntRange)ranges[currentRange];
                        if (this.rangeKnown(typeRange)) {
                            iRange.Min = (int)this.validValue(rangeTokenizer.nextToken());
                            iRange.Max = (int)this.validValue(rangeTokenizer.nextToken());
                        }
                        else {
                            iRange.Min = int.MinValue;
                            iRange.Max = int.MaxValue;
                        }
                    }
                    else {
                        DoubleRange dRange = (DoubleRange)ranges[currentRange];
                        if (this.rangeKnown(typeRange)) {
                            dRange.Min = this.validValue(rangeTokenizer.nextToken());
                            dRange.Max = this.validValue(rangeTokenizer.nextToken());
                        }
                        else {
                            dRange.Min = double.NegativeInfinity;
                            dRange.Max = double.PositiveInfinity;
                        }
                    }
                }
                currentRange += 1;
            }
        }

        #endregion


        #region Inner helper classes

        /// <summary>
        /// 
        /// </summary>
        public class NoSuchElementException : RLGlueException { }

        /// <summary>
        /// The string tokenizer class allows an application to break a string into tokens.
        /// </summary>
        internal class StringTokenizer {
            private static readonly char[] defaultDelim = { ' ', '\n', '\r', '\t' };
            private string input;
            private char[] delimiters;
            //private string[] tokens;
            //private int tokenIdx;
            private int stringIdx;
            CharEnumerator iterator;
            bool hasMore;

            /// <summary>
            /// Constructs a string tokenizer for the specified string. The characters in the delim argument are the delimiters for separating tokens. Delimiter characters themselves will not be treated as tokens.
            /// </summary>
            /// <param name="input">String to be parsed, not null</param>
            /// <param name="delimiters">An array of Unicode characters that delimit the substrings in this instance,
            /// an empty array that contains no delimiters, or null. If the separator parameter is null or contains no characters, white-space characters are assumed to be the delimiters.</param>
            public StringTokenizer(string input, params char[] delimiters) {
                // TODO: Complete member initialization
                this.input = input;
                if (delimiters == null || delimiters.Length == 0)
                    this.delimiters = defaultDelim;
                else
                    this.delimiters = delimiters;
                //tokens = input.Split(delimiters);
                iterator = this.input.GetEnumerator();
                hasMore = iterator.MoveNext();
            }

            /// <summary>
            /// Returns the next token from this string tokenizer.
            /// </summary>
            /// <returns>the next token from this string tokenizer</returns>
            /// <exception cref="NoSuchElementException">if there are no more tokens in this tokenizer's string.</exception>
            internal string nextToken() {
                if (!hasMore)
                    throw new NoSuchElementException();
                StringBuilder sb = new StringBuilder();
                char c;
                do {
                    c = iterator.Current;
                    hasMore = iterator.MoveNext();
                    stringIdx++; //needed for countTokens()
                    if (delimiters.Contains(c))
                        break;
                    sb.Append(c);
                } while (hasMore);
                return sb.ToString();
            }

            /// <summary>
            /// Tests if there are more tokens available from this tokenizer's string.<para>
            /// If this method returns true, then a subsequent call to nextToken with no argument will successfully return a token.</para>
            /// </summary>
            /// <returns>true if and only if there is at least one token in the string after the current position; false otherwise.</returns>
            internal bool hasMoreTokens() {
                return hasMore && !delimiters.Contains(iterator.Current);
            }

            /// <summary>
            /// Calculates the number of times that this tokenizer's nextToken method can be called before it generates an exception.<para>
            /// The current position is not advanced.</para>
            /// </summary>
            /// <returns>the number of tokens remaining in the string using the current delimiter set.</returns>
            internal int countTokens() {
                String temp = input.Substring(stringIdx);
                return temp.Split(delimiters).Length;
                //return tokens.Length;
            }

            /// <summary>
            /// 
            /// </summary>
            /// <param name="delimiter"></param>
            /// <returns></returns>
            internal string nextToken(params char[] delimiter) {
                this.delimiters = delimiter == null ? defaultDelim : delimiter;
                return nextToken();
            }
        }

        #endregion
    }



}
