﻿using System;
using System.Collections.Generic;
using System.IO;
using Bettzueche.RLGlue.RLComponent;
using Bettzueche.RLGlue.Codec;
using Bettzueche.RLGlue.RLTypes;

namespace RunMinesSarsaExperiment {
  /**
   * Experiment program that does some of the things that might be important when
   * running an experiment.  It runs an agent on the environment and periodically
   * asks the agent to "freeze learning": to stop updating its policy for a number
   * of episodes in order to get an estimate of the quality of what has been learned
   * so far.
   *
   * The experiment estimates statistics such as the mean and standard deviation of
   * the return gathered by the policy and writes those to a comma-separated value file
   * called results.csv.
   *
   * This experiment also shows off some other features that can be achieved easily
   * through the RL-Glue env/agent messaging system by freezing learning (described
   * above), having the environment start in specific starting states, and saving
   * and loading the agent's value function to/from a binary data file.
   * @author Brian Tanner
   */
    class SampleExperiment {

        IExperiment rlGlue = new NetworkGlue();


        internal static void Main(string[] args) {
            SampleExperiment theExperiment = new SampleExperiment();
            theExperiment.runExperiment();
        }


        private void saveResultsToCSVFile(List<EvaluationPoint> results, String fileName) {
            using(StreamWriter writer = new StreamWriter(File.Open(fileName, FileMode.Create))) {
                try {
                    writer.WriteLine("#Results from SampleExperiment.java.  First line is means, second line is standard deviations.");
                    foreach (EvaluationPoint point in results) {
                        writer.Write("" + point.mean + ";");
                    }
                    writer.WriteLine();
                    foreach (EvaluationPoint point in results) {
                        writer.Write("" + point.standardDeviation + ";");
                    }
                    writer.WriteLine();
                }
                catch (IOException ex) {
                    Console.WriteLine("Problem writing results out to file: " + fileName + " :: " + ex.Message);
                }
            }
        }

        internal class EvaluationPoint {

            public double mean;
            public double standardDeviation;

            public EvaluationPoint(double mean, double standardDeviation) {
                this.mean = mean;
                this.standardDeviation = standardDeviation;
            }
        }

        /**
         * Tell the agent to stop learning, then execute n episodes with his current
         * policy.  Estimate the mean and variance of the return over these episodes.
         * @return
         */
        internal EvaluationPoint evaluateAgent() {
            int i = 0;
            double sum = 0;
            double sum_of_squares = 0;
            double this_return = 0;
            double mean;
            double variance;
            int n = 10;

            rlGlue.MessageAgent("freeze learning");
            for (i = 0; i < n; i++) {
                /* We use a cutoff here in case the policy is bad
                and will never end an episode */
                rlGlue.RunEpisode(5000);
                this_return = rlGlue.Return;
                sum += this_return;
                sum_of_squares += this_return * this_return;
            }
            rlGlue.MessageAgent("unfreeze learning");

            mean = sum / (double)n;
            variance = (sum_of_squares - (double)n * mean * mean) / ((double)n - 1.0f);
            return new EvaluationPoint(mean, Math.Sqrt(variance));
        }

        /*
            This function will freeze the agent's policy and test it after every 25 episodes.
         */
        void printScore(int afterEpisodes, EvaluationPoint theScore) {
            Console.WriteLine("{0}\t\t{1:F2}\t\t{2:F2}", afterEpisodes, theScore.mean, theScore.standardDeviation);
        }


        /// <summary>
        /// 
        /// </summary>
        void offlineDemo() {
            List<EvaluationPoint> results = new List<EvaluationPoint>();
            EvaluationPoint initialScore = evaluateAgent();
            printScore(0, initialScore);
            for (int i = 0; i < 20; i++) {
                for (int j = 0; j < 25; j++) {
                    rlGlue.RunEpisode(0);
                }
                EvaluationPoint currentScore = evaluateAgent();
                printScore((i + 1) * 25, currentScore);
                results.Add(currentScore);
            }

            Console.WriteLine("The results of this experiment have been saved to a" +
                    " comma-separated value file called results.csv that you may open with Matlab, Octave, Excel, etc.");

            saveResultsToCSVFile(results, "results.csv");

        }

        /// <summary>
        /// 
        /// </summary>
        public void runExperiment() {
            rlGlue.Init();
            Console.WriteLine("Starting offline demo\n----------------------------\nWill alternate learning for 25 episodes, then freeze policy and evaluate for 10 episodes.\n");
            Console.WriteLine("After Episode\tMean Return\tStandard Deviation\n-------------------------------------------------------------------------");
            offlineDemo();

            Console.WriteLine("\nNow we will save the agent's learned value function to a file....");

            rlGlue.MessageAgent("save_policy valuefunction.dat");

            Console.WriteLine("\nCalling RL_cleanup and RL_init to clear the agent's memory...");

            rlGlue.CleanUp();
            rlGlue.Init();


            Console.WriteLine("Evaluating the agent's default policy:\n\t\tMean Return\tStandardDeviation\n------------------------------------------------------");
            EvaluationPoint thisScore=evaluateAgent();
            printScore(0, thisScore);

            Console.WriteLine("\nLoading up the value function we saved earlier.");
            rlGlue.MessageAgent("load_policy valuefunction.dat");

            Console.WriteLine("Evaluating the agent after loading the value function:\n\t\tMean Return\tStandardDeviation\n------------------------------------------------------");
            thisScore=evaluateAgent();
            printScore(0, thisScore);

            Console.WriteLine("Telling the environment to use fixed start state of 2,3.");
            rlGlue.EnvMessage("set-start-state 2 3");

            GeneralState obs; GeneralAction action;
            rlGlue.Start(out obs, out action);

            Console.WriteLine("Telling the environment to print the current state to the screen.");
            rlGlue.EnvMessage("print-state");

            Console.WriteLine("Evaluating the agent a few times from a fixed start state of 2,3:\n\t\tMean Return\tStandardDeviation\n-------------------------------------------");
            thisScore=evaluateAgent();
            printScore(0, thisScore);

            Console.WriteLine("Evaluating the agent again with the random start state:\n\t\tMean Return\tStandardDeviation\n------------------------------------------------------");
            rlGlue.EnvMessage("set-random-start-state");
            thisScore=evaluateAgent();
            printScore(0, thisScore);

            Console.WriteLine("\nProgram Complete.");
            rlGlue.CleanUp();


        }
    }
}
