'''
Layout for QLearning, not implemented/functional

This is an approach for the movement function that would use qlearning
it involves a simplified state space and a pair of
QLearners:
 QL1 -> take image (True/False)
 QL2 -> move, eat
States Spaces -> 2, 1 for each learner.
Simplify energy to be a binary
 Energy -> low or high
Image Label to be 4 possibilities ('no plant', 'none','P','N')
 State Space 1 = last move * Energy -> gives 8 possible states
 State Space 2 = last move * Energy * Image Label - > gives 32 possible states
For actions we have 2 sets (of course in line with the QLearners above)
 Action 1 = take image -> 2 possible actions
 Action 2 = move, eat -> 4 * 2 possible actions
Rewards -> use change in energy between rounds
This would give qtables of the following dimensions
 QL Table 1 = 8 * 2 = 16 -> nice an small
 QL Table 2 = 32 * 8 = 256 -> not as small...but significant reduction from using all energy values
 Restrict actions in case that there is no plant
The QLearners should use e-greedy, but we'll need a relatively long epoch in training
to get a reasonable exploration of the space so train on a lot of games:
 1000 actions per epoch

Here's the pythonic psuedo code 
'''
'''module level support functions, dicts and variables'''
Num_States_1 = 8
Num_States_2 = 32
Energy = { 'high' : 0, 'low' : 1}
Move = {'up' : 0, 'down' : 1, 'left' : 2, 'right' : 3}
ImageLabel = {'no plant' : 0, 'none' : 1, 'N' : 2, 'P' : 3}
Actions_1 = [False,True]
Actions_2 = [('up',False),('down',False),('left',False),('right',False),('up',True),('down',True),('left',True),('right',True)]
'''this will store state and action between rounds, assume we start in 0 -> up, high energy, no image label'''
SA_old = {'state_1_old' : 0, 'action_1_old' : None, 'state_2_old' : 0, 'action_2_old' : None}

classifier = get_our_classifier()

def getEnergy(energy):
    '''takes numeric energy level and rates it as either 'high' or 'low' so can be looked up in Energy dict'''
    if (energy > 16): return 'high'
    else: return 'low'

def getState1(lastmove, energylevel):
    '''gives an index value for state in state space 1'''
    lastmove_val = Move[lastmove]
    energy_val = Energy[(getEnergy(energylevel))]
    return lastmove_val + (energy_val * len(Move))  # move gives 0-3, so multiply energy by 4 to move through sections of table  

def getState2(lastmove,energylevel,imagelabel):
    '''gives an index value for state space 2'''
    sub_state = getState1(lastmove,energylevel)
    img = ImageLabel[imagelabel]
    return sub_state + (img * Num_States_1)  # sub_state gives 0-7, so multiply img by 8 to move through sections of table  

'''the code to go in getMove()'''
def getMove(inputs):
    last_reward = inputs.change_in_energy  
    
    #use QLearner 1 to decide whether we need an image
    current_state_1 = getState1(inputs.lastmove, inputs.energylevel)  
    image = 'no plant'
    takes_image = 0
    #if there is a plant take image
    if (inputs.there_is_a_plant):
        takes_image = QLearner1(current_state_1, SA_old['state_1_old'], SA_old['action_1_old'], last_reward)
        if (Actions_1[takes_image]): image = classifier.classify(inputs.get_image)
        else: image = 'none'
    #update state dict
    SA_old['state_1_old'] = current_state_1
    SA_old['action_1_old'] = takes_image
    
    #use QLearner 2 to decide where we move and if we eat -> will force eat=False if no plant
    current_state_2 = getState2(inputs.lastmove, inputs.energylevel, image)
    move, eat = QLearner2(current_state_2, SA_old['state_2_old'], SA_old['action_2_old'], last_reward)
    #if no plant, force eat == false
    if (image == 'no plant'): eat = False
    #update state actions
    SA_old['state_2_old'] = current_state_2
    SA_old['action_2_old'] = takes_image
    
    return move,eat
    