'''

Module containing Q-learning for the domination game. 

Update function loosely based on Sutton & Barto's Q-Learning:
http://www.cse.iitm.ac.in/~cs670/book/node65.html

'''

from OpenNero import *
import action_script
from constants import *
from custom_functions import *

import os, shutil
import cPickle
import math
import random, time
from operator import itemgetter
from copy import copy
from pprint import pprint
import time

time.sleep(1)
file = open("pid-file", "r")
pid = file.read()
file.close()
os.remove("pid-file")

print "process-id:", pid

COUNTER_FILE = "counter" + pid 

class QBrain(AgentBrain):
    '''
    Class that holds the Q-dictionary and the necessary update functions.
    
    Holds reference to the parameters and the pickle file.
    '''
    
    action_set = []
    previous_state = None
    last_cell = 0
    
    def __init__(self):
        '''
        Initializes the Q-Dictionary and sets all parameters.
        '''

        AgentBrain.__init__(self) # do not remove!
        try:
            AgentBrain.q_space
        except:
            AgentBrain.started = False
            
            AgentBrain.q_space = {}
            AgentBrain.q_space['episodes'] = 0
            
            AgentBrain.evaluate = EVALUATE
            if EVALUATE:
                print "EVALUATION MODE"
            AgentBrain.eval_count = 0
            if FIXED_SEED:
                AgentBrain.q_space['seed'] = SEED
            else:
                AgentBrain.q_space['seed'] = None
            
            AgentBrain.record_count = 999999999
            self.set_file_pointers()
            self.load_files()
    
    ##################### AgentBrain methods #######################
    def initialize(self, init_info):
        """
        init_info contains a description of the observations the agent will see
        and the actions the agent will be able to perform
        """
        self.init_info = init_info
        self._init_actionset()
        
        return True
    
    def start(self, t, sensors):
        """
        Take in the initial sensors and return the first action.
        Update state to initialize the belief and state spaces immediately
        Get an action, and lock it
        Update previous state and action for future value updates
        """
        
        """
        belief space:
        {grid cell no:initial number of pellets, or -1 if not yet observed}
        """
        if not AgentBrain.started:
            AgentBrain.belief_space = []
            for i in range(N_GRID):
                AgentBrain.belief_space.append(-1)
            if RELATIVE_BELIEF:
                AgentBrain.rel_bel_space = []
                for i in range(N_GRID):
                    AgentBrain.rel_bel_space.append(-1)
            
            if not EVALUATE:
                AgentBrain.eval_count += 1
                if EVAL_INTERVAL > 1 and AgentBrain.eval_count%EVAL_INTERVAL == 0:
                    AgentBrain.evaluate = True                    
                else:
                    AgentBrain.evaluate = False
            # print "eval_count:",AgentBrain.eval_count
            print "evaluate:",AgentBrain.evaluate
            
            AgentBrain.positions = dict()
            AgentBrain.grid_targets = dict()
            AgentBrain.time = time.time()   
            AgentBrain.finished = False
            AgentBrain.cum_reward = 0
            AgentBrain.total_crumbs = 0
            AgentBrain.options_taken = 0
            AgentBrain.q_space['episodes'] += 1
            AgentBrain.started = True
            print "State space size: %d"%(max(len(AgentBrain.q_space)-3, 0))
        self.options_taken = 0
        self.options_taken_a = [0] * len(self.action_set)
        writeCounter(AgentBrain.q_space['episodes'])
        
        self.step_count = 0
        self.cum_reward = 0
        AgentBrain.grid_targets[self] = -1
        self.update_belief_space(sensors)
        self.update(sensors, None)
        action = self.get_action(sensors)
        
        self.previous_action = action
        self.previous_state = copy(self.guessed_state)
        self.previous_state_list = copy(self.guessed_state_list)
        self.last_cell = 0
        self.n_steps_taken_option = 0
        self.reward_this_option = 0
        self.final_crumb = False
        
        return action
    
    def act(self, t, sensors, reward):
        """
        Take in new sensors and reward from previous action and return the next action
        Update state and belief
        if action lock can be released, choose a new action, otherwise, stick to the old one.
        """
        self.cum_reward += reward[0]
        AgentBrain.cum_reward += reward[0]
        AgentBrain.total_crumbs += reward[0] -MOVE_PENALTY
        self.reward_this_option += reward[0] * GAMMA ** self.n_steps_taken_option
        self.n_steps_taken_option += 1
        self.step_count += 1
        
        self.update_belief_space(sensors)
  
        if sensors[2] == 1: #Objective reached or stuck, choose new action
            self.update(sensors, reward)
            action = self.get_action(sensors)
            self.n_steps_taken_option = 0
            self.reward_this_option = 0
            self.previous_state = copy(self.guessed_state)
            self.previous_state_list = copy(self.guessed_state_list)
            
            self.previous_action = action
            self.options_taken += 1  
            AgentBrain.options_taken += 1
            self.options_taken_a[int(action[0])] += 1     
        else:
            action = self.previous_action
            if(ANYTIME_UPDATE):
                self.update(sensors, reward)
                self.previous_state = copy(self.guessed_state)
                self.previous_state_list = copy(self.guessed_state_list)
                
                self.previous_action = action                
        #if sum(AgentBrain.rel_bel_space) == 0:
            #print "Believed to be finished!"
        
        return action
    
    def end(self, t, reward):
        """
        take in final reward. Update once more
        """
        
        if(self.final_crumb):
            reward[0] += FINAL_REWARD 
                
        self.cum_reward += reward[0]
        AgentBrain.cum_reward += reward[0]
        #AgentBrain.total_crumbs += 1        
        self.reward_this_option += reward[0]  * GAMMA ** self.n_steps_taken_option
        
        # print "Final reward: ", reward[0]
        
        self.update_belief_space(None)        
        self.update(None, reward)
        
        if AgentBrain.finished == False:
            AgentBrain.finished = True

            if not EVALUATE and EVAL_INTERVAL <= 1:
                self.save_results()
                self.save_qspace()
            elif AgentBrain.evaluate:
                self.save_results()
            else:
                self.save_qspace()
            
            print "Episode %d ended in %d steps in %d seconds" % (AgentBrain.q_space['episodes'], self.step_count, time.time()-AgentBrain.time)
            print "Cumulative reward for all agents this episode: %.1f, crumbs: %d"%(AgentBrain.cum_reward, AgentBrain.total_crumbs)
            AgentBrain.started = False
            # print 'AgentBrain.rel_bel_space: ', AgentBrain.rel_bel_space
        # print 'self.guessed_state: ', self.guessed_state
        print "Reward for agent %d this episode: %d"%(self.sid, self.cum_reward)
        print 'Total options taken: ',self.options_taken_a
        # print 'AgentBrain.belief_space: \t', AgentBrain.belief_space
        # print 'self.lastActionv:        \t', self.lastActionv
        return True
    
    def destroy(self):
        """
        called when the agent is destroyed
        """
        return True
    
    ##################### Q learning methods #######################
    
    def _init_actionset(self):
        '''
        Create actionset
        
        Separate function for easy modification
        '''
        mina = self.init_info.actions.min(0)
        maxa = self.init_info.actions.max(0)
        self.action_set = range(int(mina), int(maxa)+1)
    
    def _get_actionset_from_qspace(self):
        '''
        Extracts set of actions from loaded file
        '''
        self.action_set = AgentBrain.q_space[AgentBrain.q_space.keys()[0]].keys()
    
    def _init_state(self, state):
        '''
        Create initial q_space dictionary
        '''
        states = self.get_same_states(state)
        for st in states:
            AgentBrain.q_space[st] = {}
        
            if(NSEW_ACTIONS):
                grid = get_grid_from_state(st)
                nsew_action_set = give_nsew_options(grid)
                for a in nsew_action_set:
                    AgentBrain.q_space[st][int(a)] = 0.0
            else:
                for a in self.action_set:
                    AgentBrain.q_space[st][int(a)] = 0.0
    
    def get_same_states(self,state, action=None):
        """
        rotate and mirror the state to get all possible states identical to the one concerned
        8 possibilities: original, horizontal mirror, vertical mirror, diagonal:bl-tr, diagonal:tl-br, 90deg, 180deg, 270deg
        """
        # original
        states = [copy(state)]
        
        if not SAME_STATES:
            if action is None:
                return self.tupelize_states(states)
            else:
                return zip(self.tupelize_states(states), [action])
        # If not square, diagonal, 90 and 270 degrees cannot be done
        if GRID_HEIGHT == GRID_WIDTH == 3:
            # Currently hard coded for 3x3 and 5x5
            # horizontal mirror:
            states.append(copy(state[6:]) + copy(state[3:6])+ copy(state[0:3]))
            # vertical mirror:            
            s = copy(state)
            states.append(  s[2:3]+ s[1:2]+ s[0:1]+\
                            s[5:6]+ s[4:5]+ s[3:4]+\
                            s[8:9]+ s[7:8]+ s[6:7])
            # diagonal bl-tr mirror:            
            s = copy(state)
            states.append(  s[8:9]+ s[5:6]+ s[2:3]+\
                            s[7:8]+ s[4:5]+ s[1:2]+\
                            s[6:7]+ s[3:4]+ s[0:1] )
            # diagonal tl-br mirror:            
            s = copy(state)
            states.append(  s[0:1]+ s[3:4]+ s[6:7]+\
                            s[1:2]+ s[4:5]+ s[7:8]+\
                            s[2:3]+ s[5:6]+ s[8:9] )
            # 90:            
            s = copy(state)
            states.append(  s[6:7]+ s[3:4]+ s[0:1]+\
                            s[7:8]+ s[4:5]+ s[1:2]+\
                            s[8:9]+ s[5:6]+ s[2:3] )
            #180:
            s = copy(state)
            states.append(  s[8:9]+ s[7:8]+ s[6:7]+\
                            s[5:6]+ s[4:5]+ s[3:4]+\
                            s[2:3]+ s[1:2]+ s[0:1] )
            #270:
            s = copy(state)
            states.append(  s[2:3]+ s[5:6]+ s[8:9]+\
                            s[1:2]+ s[4:5]+ s[7:8]+\
                            s[0:1]+ s[3:4]+ s[6:7] )
                            
        elif GRID_HEIGHT == GRID_WIDTH == 5:
            # horizontal mirror:
            states.append(copy(state[20:]) + copy(state[15:20]) + copy(state[10:15]) +\
                            copy(state[5:10]) + copy(state[0:5]))
            # vertical:
            s = copy(state)
            states.append(    s[4:5]+  s[3:4]+  s[2:3]+  s[1:2]+  s[0:1]+\
                             s[9:10]+  s[8:9]+  s[7:8]+  s[6:7]+  s[5:6]+\
                            s[14:15]+s[13:14]+s[12:13]+s[11:12]+s[10:11]+\
                            s[19:20]+s[18:19]+s[17:18]+s[16:17]+s[15:16]+\
                            s[24:25]+s[23:24]+s[22:23]+s[21:22]+s[20:21])
            # bl-tr:
            s = copy(state)
            states.append(  s[24:25]+s[19:20]+s[14:15]+ s[9:10]+  s[4:5]+\
                            s[23:24]+s[18:19]+s[13:14]+  s[8:9]+  s[3:4]+\
                            s[22:23]+s[17:18]+s[12:13]+  s[7:8]+  s[2:3]+\
                            s[21:22]+s[16:17]+s[11:12]+  s[6:7]+  s[1:2]+\
                            s[20:21]+s[15:16]+s[10:11]+  s[5:6]+  s[0:1])
            # tl-br:
            s = copy(state)
            states.append(    s[0:1]+  s[5:6]+s[10:11]+s[15:16]+s[20:21]+\
                              s[1:2]+  s[6:7]+s[11:12]+s[16:17]+s[21:22]+\
                              s[2:3]+  s[7:8]+s[12:13]+s[17:18]+s[22:23]+\
                              s[3:4]+  s[8:9]+s[13:14]+s[18:19]+s[23:24]+\
                              s[4:5]+ s[9:10]+s[14:15]+s[19:20]+s[24:25])
            # 90:
            s = copy(state)
            states.append(  s[20:21]+s[15:16]+s[10:11]+  s[5:6]+  s[0:1]+\
                            s[21:22]+s[16:17]+s[11:12]+  s[6:7]+  s[1:2]+\
                            s[22:23]+s[17:18]+s[12:13]+  s[7:8]+  s[2:3]+\
                            s[23:24]+s[18:19]+s[13:14]+  s[8:9]+  s[3:4]+\
                            s[24:25]+s[19:20]+s[14:15]+ s[9:10]+  s[4:5])
            # 180:
            s = copy(state)
            states.append(  s[24:25]+s[23:24]+s[22:23]+s[21:22]+s[20:21]+\
                            s[19:20]+s[18:19]+s[17:18]+s[16:17]+s[15:16]+\
                            s[14:15]+s[13:14]+s[12:13]+s[11:12]+s[10:11]+\
                             s[9:10]+  s[8:9]+  s[7:8]+  s[6:7]+  s[5:6]+\
                              s[4:5]+  s[3:4]+  s[2:3]+  s[1:2]+  s[0:1])
            # 270:
            s = copy(state)
            states.append(    s[4:5]+ s[9:10]+s[14:15]+s[19:20]+s[24:25]+\
                              s[3:4]+  s[8:9]+s[13:14]+s[18:19]+s[23:24]+\
                              s[2:3]+  s[7:8]+s[12:13]+s[17:18]+s[22:23]+\
                              s[1:2]+  s[6:7]+s[11:12]+s[16:17]+s[21:22]+\
                              s[0:1]+  s[5:6]+s[10:11]+s[15:16]+s[20:21])
        else:
            print "THIS GRID SIZE IS NOT YET IMPLEMENTED, NO STATE SIMPLIFICATION MADE"
        tup = []
        for ss in states:
            tup.append(self.tupelize_state(ss))
        
        if action is not None:
            if GRID_HEIGHT == GRID_WIDTH == 3:
                actions = copy(ACTION_TRANSFORM_3[action])
            elif GRID_HEIGHT == GRID_WIDTH == 5:
                actions = copy(ACTION_TRANSFORM_5[action])
            return zip(tup, actions)
        
        return tup
    
    def tupelize_state(self,state):
        t= tuple(item for sublist in state for item in sublist)
        return t
    
    def _get_a_v_max(self, sensor):
        '''
        Returns action-value that has highest q-value in <state>
        '''
        sumv = 0
        n = 0

        # Update belief space: take average over known cells, to determine above or under average
        for val in AgentBrain.rel_bel_space:
            if val > -1:
                sumv += val
                n += 1
        average = sumv/n

        guessed_state = []
        # guess a state: list of [a,b,c]=[above/under average crumbs, current_agent is there, other_agents are there] per grid cell
        for i, v in enumerate(AgentBrain.rel_bel_space):
            state_cell = [1, 0, 0]
            if i == sensor[0]:
                state_cell[1] = 1
            for agent, cell in AgentBrain.positions.items():
                if i == cell and agent != self:
                    state_cell[2] = 1

            if v == 0:
                state_cell[0] = 0
            elif v >= average or v == -1:
                state_cell[0] = 2
            else:
                state_cell[0] = 1
            
            guessed_state.append(state_cell)

        self.guessed_state_list = guessed_state
        # Convert guessed_state to flat tuple
        self.guessed_state = self.tupelize_state(guessed_state)
        # If necessary, initialize state
        if self.guessed_state not in AgentBrain.q_space:
            self._init_state(self.guessed_state_list)
        # Pick the action with highest value in current guessed state
        actionvalues = []
        # Coordinated
        if(COORDINATE_TARGETS):
            possible_actions = copy(AgentBrain.q_space[self.guessed_state])
            for (agent, grid) in AgentBrain.grid_targets.items():
                if grid in possible_actions and agent != self and len(possible_actions) > 1:
                    del possible_actions[grid]
                    #print "Removing action %d" % (grid)
                    
            actionvalues = sorted(possible_actions.items(), key=itemgetter(1), reverse=True)
            
        # non-coordinated
        else:        
            actionvalues = sorted(AgentBrain.q_space[self.guessed_state].items(), key=itemgetter(1), reverse=True)
        # Choose randomly from the best actions:
        high = actionvalues[0][1]
        choose = [actionvalues[0]]
        for a,v in actionvalues[1:]:
            if v == high:
                choose.append((a,v))
            else:
                break
        s = random.sample(choose,1)[0]
        # Add guessed state for debugging purposes
        s=(s[0], s[1], self.guessed_state)
        self.lastActionv = actionvalues
        return s
    
    
    def get_action(self, sensors):
        '''
        Return the action for the agent, this is random epsilon percent of the time,
        otherwise decide on the current belief state
        '''
        action = self.init_info.actions.get_instance()
        
        
        if not AgentBrain.evaluate and (random.random() < EPSILON):
            options = give_nsew_options(sensors[0])
            # print "RANDOM"
            #             print EPSILON
            #             
            if(COORDINATE_TARGETS):
                for (agent, grid) in AgentBrain.grid_targets.items():                    
                    if grid in options and agent != self and len(options) > 1:
                        options.remove(grid)
                        #print "removing option %d" % (grid)
                            
            nr_actions = len(options)
            action[0] = options[int(math.floor(nr_actions*random.random()))]
            #print 'action: %d'%(action[0])
        else:
            s = self._get_a_v_max(sensors)
            #print s
            #print "\t", s[0:2]
            action[0] = s[0]
            #print action
            
        
        AgentBrain.grid_targets[self] = action[0]
        
        return action
    
    def update_belief_space(self, sensors):
        
        if sensors is not None:
            cell = int(sensors[0]) # Cell in which the agent is
            n = int(sensors[1]) # no of crumbs in that cell
            
            # track position
            AgentBrain.positions[self] = cell            
            if (AgentBrain.belief_space[cell] == -1):
                # Update number of crumbs in cell upon first sense of this cell
                AgentBrain.belief_space[cell] = n
            if RELATIVE_BELIEF:
                # always update crumbs n in cell, for the relative average
                AgentBrain.rel_bel_space[cell] = n
            if SENSE_RADIUS > 0:
                for i in range(3, len(sensors), 2): # process crumb information from other cells
                    cell = int(sensors[i]) # Cell 
                    if cell > -1: # cell is not 'outside' field
                        n = int(sensors[i + 1]) # no of crumbs in that cell
                    
                        if (AgentBrain.belief_space[cell] == -1):
                            # Update number of crumbs in cell upon first sense of this cell
                            AgentBrain.belief_space[cell] = n
                        if RELATIVE_BELIEF:
                            # always update crumbs n in cell, for the relative average
                            AgentBrain.rel_bel_space[cell] = n
            else:
                for i in range(3, len(sensors), 2): # process crumb information from ALL other cells
                    cell = int(sensors[i]) # Cell 
                    n = int(sensors[i+1])
                    if (AgentBrain.belief_space[cell] == -1):
                        # Update number of crumbs in cell upon first sense of this cell
                        AgentBrain.belief_space[cell] = n
                    if RELATIVE_BELIEF:
                        # always update crumbs n in cell, for the relative average
                        AgentBrain.rel_bel_space[cell] = n
                        
            #pprint(AgentBrain.rel_bel_space)

    def update(self, sensors, reward):
        '''
        Update current state-action Q value Q(s,a) and set previous state
        First update belief space.
        Then update state space if action is finished
        '''
        if AgentBrain.evaluate:
            # When evaluating: don't update
            return
        if reward is not None:

            # Get previous state and action, and their values
            p_action = self.previous_action[0]
            p_state = tuple(self.previous_state)
            p_state_list = tuple(self.previous_state_list)
            
        
            if sensors is not None:
                # Get maximal value over the action of the current state
                if p_state not in AgentBrain.q_space:
                    self._init_state(p_state_list)
                if p_action not in AgentBrain.q_space[p_state]:
                    return
                q_sa = AgentBrain.q_space[p_state][p_action] 
                    
                q_max = self._get_a_v_max(sensors)[1]
               
            else:
                if p_state not in AgentBrain.q_space:
                    self._init_state(p_state)
                if p_action not in AgentBrain.q_space[p_state]:
                    return
                q_sa = AgentBrain.q_space[p_state][p_action] 
                q_max = 0
            # Update the previous state action pair value
            
            if(ANYTIME_UPDATE):
                q = q_sa + ALPHA* (reward[0] + GAMMA * q_max - q_sa )
            else:
                q = q_sa + ALPHA* (self.reward_this_option + GAMMA**self.n_steps_taken_option * q_max - q_sa ) 
            for st, act in self.get_same_states(p_state_list, p_action):
                AgentBrain.q_space[st][act] = q
            #print GAMMA
            #print "Updating state %s with action %s with %d steps with gamma of %f to state-action value of %f" % (p_state, p_action,  self.n_steps_taken_option, GAMMA**self.n_steps_taken_option, q_sa + ALPHA* (self.reward_this_option + GAMMA**self.n_steps_taken_option * q_max - q_sa ))                
            
        else:
            # If no reward (on start): make sure the guessed state is initialized
            self._get_a_v_max(sensors)
            

    ##################### File I/O methods #######################
    
    def set_file_pointers(self):
        '''
        Function that correctly sets up the path and name of the file in which 
        the instance will be saved
        '''
        this_path = os.path.abspath(os.path.dirname(__file__))
        if str(type(SAVEFILE)) == "<type 'str'>":
            AgentBrain.fullpath = this_path + os.sep + SAVEFILE
        else:
            AgentBrain.fullpath = this_path + os.sep + 'QSpace' + \
                '-e' + str(EPSILON).replace('.','_') + \
                'a' + str(ALPHA).replace('.','_') + 'g' +\
                str(GAMMA).replace('.', '_')
    
    def load_files(self):
        '''
        Get q_space from file, if it exists.
        '''
        if EVALUATE:
            try:
                f_q = open(AgentBrain.fullpath + Q_EXT, 'rb')
            except IOError as e:
                print "UNKNOWN FILENAME! WILL NOT EVALUATE"
                writeCounter(-1)
                raise e
            else:
                print "EVALUATING: ", AgentBrain.fullpath, " -> ",f_q
                AgentBrain.q_space = cPickle.load(f_q)
                f_q.close()
                self.init_resultsfile('eval')
            print 'episodes in q_space:', AgentBrain.q_space['episodes']
            AgentBrain.q_space['episodes'] = 0
                
        else:
            if os.path.isfile(AgentBrain.fullpath+Q_EXT):
                # Iterate until a filename is found that doesn't exist yet
                i=1
                while os.path.isfile(AgentBrain.fullpath + '-'+str(i)+Q_EXT):
                    i+=1
                AgentBrain.fullpath += '-'+str(i)
                global SAVEFILE
                SAVEFILE = SAVEFILE + '-'+str(i)
                print "FILE REQUESTED EXISTS ALREADY, A NEW FILE WILL BE CREATED AT %s"%(AgentBrain.fullpath)
            else:
                print "FILE REQUESTED WILL BE INITIALIZED"
            self.set_settingsfile()
            self.init_resultsfile()
    
    def save_qspace(self, verbose=True):
        '''
        Saves pickle with qvalues
        '''
        mode = 'wb'
        qspacefile = open(AgentBrain.fullpath+Q_EXT, mode)
        cPickle.dump(AgentBrain.q_space, qspacefile, cPickle.HIGHEST_PROTOCOL)
        qspacefile.close()
        if verbose:
            print "Instance saved to", AgentBrain.fullpath+Q_EXT
        if self.step_count < AgentBrain.record_count:
            bestfile = open(AgentBrain.fullpath+'-BEST'+Q_EXT, mode)
            cPickle.dump(AgentBrain.q_space, bestfile, cPickle.HIGHEST_PROTOCOL)
            bestfile.close()
            AgentBrain.record_count = self.step_count

    def init_resultsfile(self, evalstr=''):
        '''
        Saves cumulative reward and time taken in results.csv, added to the current one.
        '''
        f_r = open(AgentBrain.fullpath+RES_EXT+evalstr, 'w')
        st = "Episode, total_reward, time_taken, crumb_amount, Number of steps, state space size, options_taken\n"
        f_r.write(st)
        f_r.close()
        print "New resultsfile made at %s"%AgentBrain.fullpath+RES_EXT+evalstr
    
    def save_results(self):
        '''
        Saves cumulative reward and time taken in results.csv, added to the current one.
        '''
        if EVALUATE:
            evalstr = 'eval'
        else:
            evalstr = ''
        f = open(AgentBrain.fullpath+RES_EXT+evalstr, 'a')
        st = "%d, %d, %.3f, %d, %d, %d, %d\n"%(AgentBrain.q_space['episodes'], AgentBrain.cum_reward, time.time()-AgentBrain.time, AgentBrain.total_crumbs, self.step_count, max(len(AgentBrain.q_space)-3, 0), AgentBrain.options_taken)
        f.write(st)
        f.close()
        print "RESULTS SAVED"
    
    def set_settingsfile(self):
        '''
        Saves the settingsfile under the given filename, for evaluating purposes
        '''
        cf = ConfigParser.RawConfigParser()
        cf.read('./Roomba/temp-settings.cfg')
        cf.set('experiment', 'savefile', SAVEFILE) # fullpath might have a -1/2/3 added to it
        f = open(AgentBrain.fullpath+SETTINGS_EXT, "w")
        cf.write(f)
        f.close()
    
    def get_rel_bel_space(self):
        """
        Written for debugging purposes, used in module (is_episode_over())
        """
        return AgentBrain.rel_bel_space
        
def writeCounter(count):
    try:
        file = open(COUNTER_FILE, 'w')
        file.write(str(count))
        file.close()
        if count == -1:
            time.sleep(10) # to ensure run_roomba picks it up and exits.. 
    except SyntaxError: 
        print "SyntaxError writeCounter.."
    except IOError:
        print "IOError writeCounter.."

    #print 'counter %i written to file %s and closed!' % (count, COUNTER_FILE)
    
