#!/usr/bin/env python
import sys
import os
import random
random.seed(0)

import pylab

import mmlf
                 
mmlf.setupConsoleLogging(level="debug")
mmlf.initializeRWArea()

def runEpisode():
    worldConfigObject = \
        {'worldPackage': 'linear_markov_chain', 
         'environment': {'moduleName': 'linear_markov_chain_environment', 
                         'configDict': {'length': 51}}, 
         'monitor': {'policyLogFrequency': 10}, 
         'agent': {'moduleName': 'random_agent', 
                   'configDict': {'Reward_log_frequency': 100}}}
    
    world = mmlf.loadWorld(worldConfigObject, useGUI=False)   
    world.createMonitor()
    world.run(1.0)
    world.stop()
    
    return world.agent.rewardDict[0]

def finiteHorizon(rewardList, horizon):
    returnList = []
    for t in range(len(rewardList)):
        returnList.append(sum(rewardList[t:t+horizon]))
    return returnList

def infiniteHorizonDiscounted(rewardList, gamma):
    returnList = []
    for t in range(len(rewardList)):
        summe = j = 0
        while (t+j < len(rewardList)):
            limit = gamma**(j) * rewardList[t+j]
            summe += limit
            j+=1
        returnList.append(summe)
    return returnList

def averageReward(rewardList):
    returnList = []
    for t in range(len(rewardList)):
        returnList.append(float(sum(rewardList[t:])) / (len(rewardList)-t))
    return returnList

rewards = runEpisode()

pylab.plot(finiteHorizon(rewards, 10), label="finite horizon")
pylab.plot(infiniteHorizonDiscounted(rewards, 0.9), label="infinite horizon discounted")
pylab.plot(averageReward(rewards), label="average")
pylab.legend(loc=0)
pylab.show()

