/*
clsquare - closed loop simulation system
Copyright (c) 2004, 2008 Neuroinformatics Group, Prof. Dr. Martin Riedmiller,
University of Osnabrueck

Authors: Martin Riedmiller, Sascha Lange

All rights reserved.

Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:

   * Redistributions of source code must retain the above copyright
     notice, this list of conditions and the following disclaimer.
   * Redistributions in binary form must reproduce the above copyright
     notice, this list of conditions and the following disclaimer in
     the documentation and/or other materials provided with the
     distribution.
   * Neither the name of the <ORGANIZATION> nor the names of its
     contributors may be used to endorse or promote products derived
     from this software without specific prior written permission.

THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */

#ifndef _STATISTICS_H_
#define _STATISTICS_H_
#define MAX_COSTS 1000
#include "setdef.h"
#include "global.h"

  /** Provides useful statistical information about the performance of the used learning algorithm (controller).
   * see documentation for different statistical modes*/
class Statistics{
 public:
  /** The statistics module is notified every control cycle about the current state of the process.   
   * calls the update method for the statistics of current sequence. 
   * \param state: present state
   * \param observation: observation of current state
   * \param action: executed action in current state
   * \param reward: reward given by the plant for executing action in current state
   * \param cycle: control cycle in current episode
   * \param episode: current episode
   * \param total_time: time elapsed since start of simulation loop.
   * \param episode_time: time elapsed since start of current episode
   * \param total_num_of_cycles: number of control cycles since start of simulation loop.
   * \return true, for success. */
     bool notify(const double* state, const double* observation, const double* action, 
                 const double reward, const long cycle, const long episode,
                 const double total_time, const double episode_time, const long total_num_of_cycles);

  /** Initializes statistics. 
   * \param state_dim: dimension of the state space
   * \param observation_dim: dimension of observation space
   * \param action_dim: dimension of action space.
   * \param delta_t: duration of one control cycle.
   * \param cycles_per_episode: maximal number of control cycles per episode
   * \param fname: dile, which contains conifiguration
   * \return true, for success. */ 
  bool init(int _state_dim, int _observation_dim, int _action_dim,  double _delta_t, long _max_cycles_per_episode, const char *fname=0);

  /** close statistics. */
  bool deinit();

  /** Notifies, that an episode has started. 
   * \param initial_state: initial state of episode
   * \param initial_observation: initial observation of episode
   * \param episode_ctr: started episode */   
  void notify_episode_starts(const double* initial_state, const double* initial_observation, const long episode_ctr);

  /** Notifies, that an episode has stopped. 
   * collect all remaining statistical information of current episode  
   * \param final_state: final state.
   * \param final_observation: final observed state. 
   * \param final_reward: final reward of terminal state 
   * \param is_terminal_state: true, if final state of episode is a terminal state
   * \param episode_ctr: current episode. */   
  void notify_episode_stops(const double* final_state, const double* final_observation, const double final_reward, 
                            const bool is_terminal_state, const long episode_ctr);

 protected:
  /** No legend of statistics is printed into the statistics file. */
  bool noheader;

  /** Subset of the state space, called the working area. */
  SetDef working_area;

  /** Subset of the state space to be reached by the learning agent. */
  SetDef goal_area;

  /** Subset of the state space to be avoided by the learning agent. */
  SetDef avoid_area;

  /** Statistical mode.
   * \li 0: standardized benchmark
   * \li 1: raw  
   * \li -1: default (only rewards)
   */
  int statistics_mode;
  
  enum {OBSERVATION_BASED=0, STATE_BASED};
  int type_of_definitions; ///<<  The space that is used for definitions of goal- and working area. Either OBSERVATION_BASED or STATE BASED.  
  int state_dim;           ///< Dimension of the state space

  /** Dimension of observation space. */
  int observation_dim;
  
  /** Dimension of action space. */
  int action_dim;

  /** Duration of one control cycle in seconds. */
  float delta_t;

  /** Maximal length of an episode in control cylcles. */
  long max_cycles_per_episode;

  /** Frequency of an output (print to file) of the statistic module. */
  long average_over_n_episodes; 

  /** Filename of statistics file. */
  char prot_fname[MAX_STR_LEN];
    
  /** Outsput stream (stream to file) of statistics.  */
  std::ostream *out;
 
  /** Open file with statistics. */
  bool open_file();

  long episode_ctr_internal;
  

  /** Reads configuration of statistics module.
   * \param fname: file which contains configuration of statistics module.
   * \return true for success 
   *  
   * Possible options:    
   * \li goal_area: area of goal states
   * \li working_area: working area
   * \li avoid_area: area of states to avoid by the controller
   * \li average_over_n_episodes: average over a number of episodes
   * \li statistics_mode: mode
   * \li statistics_file: file to print the statistics
   * \li noheader: prints no legend */  
  bool read_options(const char * fname);

  /** Initialize statistics for current episode. */
  void init_curr_perf();

  /** Initialize cummulated statistics. */
  void init_acc_perf();

  /** Collect some statstical information at the end of an episode.
   \param final_observation: final observation of current episode */
  void finish_episode(const double* final_state, const double* final_observation, const double final_reward, const bool is_terminal_state);

  /** Updates statistics for current episode. 
   * \param observsation: observation in current state 
   * \param action: action executed in current state 
   * \param reward: reward earned for executing action in current state */
  void update_curr_perf(const double* final_state, const double* observation, const double* action, const double reward);

  /** Updates cummulated statistics. */
  void update_acc_perf();

  /** Outputs cummulated statistics and resets all statistics */
  void write2file(const long episode);

  /** Outputs cummulated statistics (mode:standard benchmark) and resets all statistics. */
  void write2file_mode0(const long episode);

  /** Outputs cummulated statistics (mode:raw) and resets all statistics. */
  void write2file_mode1(const long episode);
  
  /** Outputs cummulated statistics (mode: default) and resets all statistics. */
  void write2file_mode_default(const long episode);
 

  struct{
    /** current episode ended in a terminal state. */
    bool touched_terminal;

    /** touched a goal state on current episode. */
    bool touched_goal;
    
    /** touched an "avoid" state on current episode. */
    bool touched_avoid;

    /** touched a state outside of working area on current episode */
    bool touched_out_of_working;

    /** current episode ended in a goal state. */
    bool ended_in_goal;

    /** touched an "avoid" state or a state outside the working area on current episode. */
    bool crashed;

    /** number of control cycles outside the goal area on current episode. */
    long cycles_out_of_goal;

    /** number of control cycles until the current episode crashed. */
    long cycles_until_crashed;   

    /** number of control cycles until a goal state is reached on current episode. */
    long cycles_until_reached_goal;

    /** number of control cycles on current sequence. */
    long num_cycles;

    /** number of control cycles until the process stays permanently in the goal area on current episode. */
    long cycles_until_permanent_goal;
    
    /** cumulated rewards of the current episode. */
    double reward;

    /** cumulated rewards of the current episode. */
    double final_reward;

    
  } curr_perf; 
  
  
  struct{
     /** number of episodes that touched a goal state. */
    long touched_goal;
    
    /**  number of episodes that touched an "avoid" state. */
    long touched_avoid;

    /**  number of episodes that touched a state outside the working area.  */
    long touched_out_of_working;

    /**  number of episodes that ended in a goal state. */
    long ended_in_goal;
   
    /** number of episodes that crashed. */
    long crashed;

    /** average (over all episodes) number of control cycles out of the goal area. */
    double average_cycles_out_of_goal;

    /** average (over all episodes) number of control cycles until a episode crashed. */
    double average_cycles_until_crashed;   

    /** average (over all episodes) number of control cycles until a episode reached a goal state. */
    double average_cycles_until_reached_goal;

    /** average (over all episodes) number of control cycles until the process stays permanently in the goal area. */
    double average_cycles_until_permanent_goal;
    
    /** average cummulated reward over all episodes. */
    double average_reward;

    /** average cummulated reward over all episodes. */
    double average_final_reward;

    /** cummulated control cycles over all episodes. */
    long cycles_total;

    /** number of episodes. */
    long num_episodes;
  } acc_perf; 

};

#endif

