"""
Gaussian Process Reinforcement Learning

Author: Joseph Reisinger

Implementation based on:

Engel, Y. Mannor, S. Meir, R. (2005) Reinforcement learning with Gaussian
processes. Proceedings of ICML 2005. 

This package implements the online Monte-Carlo Gaussian Process TD (MC-GPTD).
Deterministic and non-deterministic transition models with and without matrix
sparsification.
"""

from plastk.base import *
from plastk.params import *
from plastk.rl import is_terminal
from plastk.rl.td import TDAgent
import plastk.utils as utils
#from c_kernels import GaussianKernel

from Numeric import reshape,array,identity,zeros,ones,matrixmultiply,transpose,dot,argmax,concatenate
from LinearAlgebra import inverse
from math import exp,log

import PyInline

EPSILON = 0.00001 # rounding error tolerance

class DummyKernel:
    def __init__(self):
        self.D = []
    def k(self,s,a):
        return [1 for i in self.D]
    def insert(self,s,a):
        self.D.append((s,a))
    def dictionary_size(self):
        return len(self.D)
    def __call__(self,s,a,s2,a2):
        return 1

class GaussianProcessTDAgent(TDAgent):
    """
    A TD-agent using nonparametric gaussian process function approximation. Two
    subclasses exist for the deterministic and nondeterministic transition
    cases.

    Parameters: 

    nu -- The sparsity threshold for rejecting new entries into the dictionary
    when using nonparametric updates. A value of 0 is equivalent to using a
    non-sparsifying representation (and is very computationally expensive).
    
    sigma -- The prior reward variance. Represents the intrinsic
    "noise" of the reward process.

    covariance_function -- The covariance function (kernel) to use for nondeterministic
    gaussian processes.
    """
    nu            = Magnitude(default=0.0)
    #nu            = Number(default=0.0)
    sigma         = Number(default=1.0)
    covariance_function = Parameter(default=None)#GaussianKernel(1.0, 1.0))

    def __init__(self,**params):
        super(GaussianProcessTDAgent,self).__init__(**params)

        assert( self.lambda_ == 1.0 ) # For now force GPTD(1)

        # Don't allow any other step methods because they could muck up the machinery here
        # TODO: eventually fix this to allow q-learning as well
        self.step_method = 'gpsarsa'

        # This cacheing results in a significant speedup
        self.kernel = self.covariance_function

        self.sigma2 = self.sigma**2

        # Rename alpha as "a", in order to avoid overloading with learning rate
        self.alpha_ = zeros((0,1),'f')
        self.a = ones((1,1),'f')
        self.C = zeros((0,0),'f')
        self.Kinv = None

        # An array of zeros convenient for computing a
        self.z = zeros((0,1),'f')

        self.kt = zeros((0,1),'f')

        # Debug stuff
        self.dbg_dictionary_size = 0
        self.dbg_total_steps = 0
        self.dbg_steps_in_episode = 0
        self.dbg_ald_badness = 0 # Count how many times the ALD test criterion is negative (this should only happen with non-positive definite kernels)

    def _gpsarsa_training(self,sensation,action,reward,next_sensation,next_action):
        """
        Perform a single GP-SARSA training step given (s,a,r,s',a'). 
        """
        rho = self.rho(reward)

        if is_terminal(next_sensation):
            value = 0
            self.verbose('TERMINATE at %d rho %f (badness: %d) D: %d compression: %f' % 
                    (self.dbg_steps_in_episode, rho, self.dbg_ald_badness, self.dbg_dictionary_size, float(self.dbg_dictionary_size) / self.dbg_total_steps))
        else:
            value = self.Q(next_sensation,next_action)

        last_value = self.Q(sensation,action)

        self.verbose("controller step = %d, rho = %.2f" % (self.total_steps,rho))
        self.verbose(("Q(t-1) = %.5f, Q(t) = %.5f, diff = %.5f,"+ "terminal? = %d")
                      % (last_value,value,value-last_value, is_terminal(next_sensation)))        

        # Call the appropriate update function given the model we are using.
        self.update_Q(sensation,action,rho,next_sensation,next_action)

    def update_Kinv(self, delta):
        # Kinv_t = 1/\delta_t [[\delta_t Kinv_{t-1} + a_t a_t^T, -a_t],[-a_t^T,1]]
        dkaa = delta*self.Kinv + matrixmultiply(self.a, transpose(self.a))
        first = concatenate((dkaa, -transpose(self.a)), axis=0) # Row append a onto the old Kinv
        extend_a = concatenate((-self.a, [[1]]), axis=0) # Row append a 1 to self.a
        self.Kinv = 1/delta*concatenate((first, extend_a),axis=1) # column append the extended a

    def k(self, s, a):
        """ Compute the sensation projected onto the kernelized space spanned by the sensations up until now """
        # \mathbf{k}_t(x) = (k_(x_1,x),...,k(x_t,x))^T
        return reshape(array(self.kernel.k(s,a),'f'),(self.kernel.dictionary_size(),1))
        
        #b = reshape(array(self.kernel.k(s,a),'f'),(self.kernel.dictionary_size(),1))
        #c = array([[self.kernel(s1,a1,s,a)] for (s1,a1) in self.D],'f')
        #assert abs(sum(b-c)) < 0.00001
        #return b

    def update_D(self,s,a):
        """ Add (s,a) to the dictionary of saved training patterns (and clear the k cache since it is wrong) """
        self.kernel.insert(s,a)
        self.dbg_dictionary_size += 1
        self.verbose('ADD D: %d compression: %f' % (self.dbg_dictionary_size, float(self.dbg_dictionary_size) / (self.dbg_total_steps+1)))
        #self.D.append((s,a))

    def Q(self,state,action=None):
        """
        Compute Q(s,a) from the parametric weight vector.
        """
        assert( action is not None )
        if self.kernel.dictionary_size() > 0:
            # Standard action selection: return the mean:
            return sum(matrixmultiply(transpose(self.alpha_),self.k(state,action)))[0]
        else:
            return 0.0

    def _start_episode(self,sensation):
        """
        On the very first episode, we need to inject the first sensation into D, before Q is called.
        """
        # HACK to allow us to not have to go through the parameter interface, but to also allow the 
        # covariance function to be changed in a reasonable way
        self.kernel = self.covariance_function 
        action =  super(GaussianProcessTDAgent,self)._start_episode(sensation)
        list_sensation = sensation

        assert( not is_terminal(sensation) )
        self.dbg_steps_in_episode = 0
        self.c = zeros((self.kernel.dictionary_size(),1),'f')
        self.d = 0
        self.s = float('inf') # 1/s = 0
        ktt = self.kernel(list_sensation,action,list_sensation,action)

        if self.kernel.dictionary_size() > 0:
            self.kt = self.k(list_sensation,action)
            self.a  = matrixmultiply(self.Kinv,self.kt)
            delta   = ktt - matrixmultiply(transpose(self.a),self.kt)
        else: # We haven't accumulated anything in the dictionary, so set delta to ktt
            delta   = ktt

        if self.kernel.dictionary_size() == 0 or delta > self.nu:
            # Initialize K_0^{-1} = 1/k(x_0,x_0)
            if self.Kinv == None: # a is empty, so this reduces to 1/delta
                self.Kinv = array([[1.0/ktt]],'f')
            else: # normal Kinv update
                self.update_Kinv(delta)
            self.h = concatenate((self.a,[[-self.gamma]]), axis=0)
            self.a = concatenate((self.z,[[1]]),axis=0)
            self.alpha_ = concatenate((self.alpha_,[[0]]),axis=0)
            first = concatenate((self.C, transpose(self.z)), axis=0)
            self.C = concatenate((first, zeros((self.C.shape[0]+1,1),'f')),axis=1) 
            self.z = concatenate((self.z,[[0]]),axis=0)
            self.c = concatenate((self.c,[[0]]),axis=0)
            self.update_D(list_sensation,action)
            self.kt = concatenate((self.kt,[[ktt]]),axis=0)

        return action

class StochasticGPTDAgent(GaussianProcessTDAgent):
    """ 
    Contains the specific Q update routines for performing Gaussian Process
    regression on the rewards, using a nonparametric representation and assuming
    stochastic transitions.
    """

    def update_Q(self,ox,action,rho,x,next_action):
        """ Perform a single GP-SARSA training step given (s,a,r,s',a') using a
        number of parameters equal to the length of the sensation vector. """
        self.dbg_total_steps += 1
        self.dbg_steps_in_episode += 1
        self.okt = self.kt
        self.oa  = self.a
        self.oc  = self.c
        self.os  = self.s
        self.od  = self.d

        coef  = self.gamma * self.sigma2 / self.os

        # Terminal sensations have a special form and need to be dealt with accordingly
        if is_terminal(x):
            dk = self.okt 
            dktt = matrixmultiply(transpose(self.oa),self.okt)
            self.h = self.oa
            self.c = coef*self.oc + matrixmultiply(self.C,dk) - self.h
            self.s = self.sigma2 - self.gamma*self.sigma2*coef - matrixmultiply(transpose(dk),self.c+coef*self.oc)
            self.d = coef*self.od + matrixmultiply(transpose(dk),self.alpha_) - rho
            self.kt  = self.k(ox,action)
            self.a = matrixmultiply(self.Kinv, self.kt)
        else:
            ktt = self.kernel(x,next_action,x,next_action)
            self.kt  = self.k(x,next_action)
            self.a = matrixmultiply(self.Kinv, self.kt)
            delta = (ktt - matrixmultiply(transpose(self.a),self.kt))[0][0]  + EPSILON # counteract rounding error?

            if delta < 0:
                self.dbg_ald_badness += 1

            dk = self.okt - self.gamma*self.kt
            self.d = coef*self.od + matrixmultiply(transpose(dk),self.alpha_) - rho

            if delta - self.nu > 0:
                self.h = concatenate((self.oa,[[-self.gamma]]),axis=0)
                dktt = matrixmultiply(transpose(self.oa),self.okt - 2*self.gamma*self.kt)+self.gamma**2*ktt
                self.c = coef*self.oc + matrixmultiply(self.C,dk)-self.oa
                self.s = (1+self.gamma**2)*self.sigma2 + dktt - self.gamma*self.sigma2*coef - matrixmultiply(transpose(dk),self.c+coef*self.oc+self.oa)
                self.c = concatenate((self.c,[[self.gamma]]),axis=0)
                self.alpha_ = concatenate((self.alpha_,[[0]]),axis=0)
                first = concatenate((self.C, zeros((1,self.C.shape[1]),'f')), axis=0)
                self.C = concatenate((first, zeros((self.C.shape[0]+1,1),'f')),axis=1) 
                self.update_D(x,next_action)
                self.update_Kinv(delta)
                self.a = concatenate((self.z,[[1]]),axis=0)
                self.z = concatenate((self.z,[[0]]),axis=0)
                self.kt = concatenate((self.kt,[[ktt]]),axis=0)
            else:
                self.h = self.oa - self.gamma*self.a
                dktt = matrixmultiply(transpose(self.h),dk)
                self.c = coef*self.oc + matrixmultiply(self.C,dk) - self.h
                self.s = (1+self.gamma**2)*self.sigma2 - self.gamma*self.sigma2*coef - matrixmultiply(transpose(dk),self.c+coef*self.oc)
            self.verbose('delta: %r (badness: %d) D: %d compression: %f' % (delta, self.dbg_ald_badness, self.dbg_dictionary_size, float(self.dbg_dictionary_size) / self.dbg_total_steps))

        self.alpha_ = self.alpha_ + self.c*self.d/self.s
        self.C        = self.C + 1/self.s * matrixmultiply(self.c, transpose(self.c))

