""" Library containing some useful functions for
    reinforcement learning
"""

### IMPORTS ###
import math
import os
import random
import socket
import domination

from utilities import *

# Shortcuts
inf = float('inf')
pi  = math.pi

### FUNCTIONS ###

def learning_run(episodes,field,handicap=0.0,action_type=1,collide_mode=0,capture_mode=0,include_others=True, reset_table=True, rendered=False):
    name = 'learn_%s_%deps'%(field,episodes)
    if handicap > 0:
        name += '_h%d'%(handicap*100)
    if action_type == 0:
        name += '_grid'
    if collide_mode == 1:
        name += '_sticky'
    if capture_mode != 0:
        name += '_first' if (capture_mode == 1) else '_majority'
    if include_others == False:
        name += '_individual'
    q_file = os.path.join('q_tables',name + socket.gethostname())
    if os.path.exists(q_file) and reset_table:
        os.remove(q_file)
    settings = domination.Settings(max_turn = 2*pi, max_score=100, max_steps=250, max_speed=16, max_range=32, max_see=1000)
    domination.run_games(red_brain='agent_q.py', blue_brain='agent_adjustable.py',
                         field=os.path.join('maps',field+'.txt'), new_maps=False,
                         games=episodes,
                         red_init={'action_type':action_type,'include_others':include_others,'q_table_file':q_file},
                         blue_init={'handicap':handicap},
                         settings=settings, rendered=rendered,
                         output=os.path.join('results',name+'.csv'))


### PROCEDURE ###

def do_some_task():
    r = random.randint(1,16)
    # * 1 player maze with/without sliding. Shows how much slower learning is if collisions stop the agent completely.
    if r==1: learning_run(200, 'tiny_1p', action_type=0, collide_mode=0)
    if r==2: learning_run(200, 'tiny_1p', action_type=0, collide_mode=1)
    
    # * 1v1 with/without pathfinding (+ without handicap)
    if r==3: learning_run(30000, 'tiny_1v1', handicap=0.00)
    if r==4: learning_run(30000, 'tiny_1v1', handicap=0.05)
    if r==5: learning_run(30000, 'tiny_1v1', handicap=0.10)
    if r==6: learning_run(30000, 'tiny_1v1', handicap=0.15)
    
    if r==7: learning_run(30000, 'tiny_1v1', action_type=0, handicap=0.20)
    if r==8: learning_run(30000, 'tiny_1v1', action_type=1, handicap=0.20)

    # * 2v2 vs different handicaps 
    if r==9: learning_run(15000, 'tiny_2v2', handicap=0.00)
    if r==10: learning_run(15000, 'tiny_2v2', handicap=0.05)
    if r==11: learning_run(15000, 'tiny_2v2', handicap=0.10)
    if r==12: learning_run(15000, 'tiny_2v2', handicap=0.15)
    if r==13: learning_run(15000, 'tiny_2v2', handicap=0.20)

    # * 2v2 with/without pathfinding    
    if r==14: learning_run(15000, 'tiny_2v2', handicap=0.20, action_type=0)
    
    # * 2v2 with majority cp
    if r==15: learning_run(15000, 'tiny_2v2', handicap=0.10, capture_mode=2)

    # * Full game
    if r==16: learning_run(10000, 'full', handicap=0.30, include_others=False)


learning_run(15000, 'tiny_1p', handicap=0.25, rendered=True, reset_table=False)