{--


Q-Bot 0.0.1 alpha

This module consits of all important functions for the
Q-Learning algorithm

@Version 0.0.1
@Author Benjamin Schubert

--}

import Representation as R
import Data.Array.Repa as Repa
import Data.List.Extras.Argmax
import qualified Data.Vector.Unboxed as V

--Constants to mess around--
a = 90
b = 80

z = 0.5 -- used as tradeof between one's onw life and the oponen's life z = [0,1] 

gamma = 0.8

--learn phase algo

--learns somthing during a round
learn:: Int->Action->Game->Q->Int->(Int,Action,Q)
learn temp action game q oldLife= (newTemp,newAction,newQ)
                    where 
                        currQSA = Repa.unsafeIndex q $ R.getIndex (R.turns game) (R.card action) (R.field action) (R.app action)
                        newAction = chooseAction temp q game
                        newQSA = Repa.unsafeIndex q $ R.getIndex ((R.turns game)+1) (R.card newAction) (R.field newAction) (R.app newAction)
                        newQ = updateQ q currQSA newQSA temp oldLife
                        newTemp = 10000 - (R.turns game)
                        

{--
A new Action is chosen with an bolzmann and simulated annealing approach
so at the beginning it is more likely to explore more new unknown states as 
at the end. 

@TODO possAction has to be implemented bot not in this module
--}
chooseAction::Int->Q->Game->Action
chooseAction temp q game = argmax (\x->exp 
                                  ((Repa.unsafeIndex q $ R.getIndex ((R.turns game)+1) (R.card a) (R.field a) (R.app a))
                                  /temp))
                                  $ possAction game
                                  
{--
Q is updated according to a simple function. 
Q(s,a)<-Q(s,a)+alpha_k(reward(s,a)+gammaQ(s',a')-Q(s,a))

where Q(s,a) dnotes the current state and Q(s',a') the next.
alpha_k depens on the number of turns alpha_k = a/(b-k)

A better udate function would be the discount rule.
But finding a Max in a high dim matrix is probably to expenssiv

maybe later then ..

transforme Q into Vector in order to update with mutation.
be aware of the linear transformation form Repa.Array to Vector!
indexing accordingly!

--}
update:: Q->Float->Float->Int->Int->Q
update q currQSA newQSA k oldLife= Repa.fromVector ( (Repa.toVector q)  V.// [(pos,newValue)] )
                            where 
                                 pos = 0 ---please enter here correct 4D->1D trasnformation
                                 alpha = a / (b - k)
                                 newValue = cuurQSA + alpha * ( (reward game oldLife ) + gamma * newQSA - currQSA)
                                 

{--
The reward of an action is the change in op/pro status

z is a parameter to ballance the tradeof betwenn prob life and op life

oldLife is the difference of prob/op life of the last state.
--}
reward::Game->Int->Flat
reward game oldLife = (z*(R.prob.life game) - (1-z)*(R.op.life game)) - oldLife


  
                                 
                                 

                             
                                
                             
    
              