import random
import gym
import numpy
from numpy import array
from numpy import ndarray

import gym.envs.classic_control


class SAImp:

    testWeight = array([1.0, 1.0, 1.0, 1.0])
    bestWeight = array([0.0, 0.0, 0.0, 0.0])

    testReward = 0.0
    bestReward = 0.0

    #the action that the agent will do next
    nextAction = int(0);

    #shakeStep*random()
    shakeStep=1.0

    #rate of learning
    learningRate=0.2

    #how many episodes will be used to train
    times=100

    #have trained already
    trainedTimes=0

    def Shake(self):
        self.shakeStep = float(self.times - self.trainedTimes) / float(self.times)
        print 'Episode'
        if self.testReward>=self.bestReward:
            self.bestWeight=self.testWeight
            self.bestReward=self.testReward
        self.testWeight=array([0.0,0.0,0.0,0.0])
        for _ in range(0,4,1):
            self.testWeight[_]=self.bestWeight[_]+self.shakeStep*(random.random()-0.5)*2.0*self.learningRate
        self.testReward=0.0

    def GO(self):
        env = gym.make('CartPole-v0')
        env.monitor.start("/tmp/gym-results4", None, True)
        assert isinstance(env,gym.envs.classic_control.CartPoleEnv)
        env.reset()
        while(True):
            env.render()
            observation, reward, done, info = env.step(int(self.nextAction))
            self.Process(observation, reward, done, info)
            if done:
                print "bestReward:\t"+ self.bestReward.__str__()+"\t"+self.bestWeight.__str__()
                print "testReward:\t"+self.testReward.__str__()+"\t"+self.testWeight.__str__()
                self.trainedTimes+=1
                if self.trainedTimes>=self.times:
                    break
                self.Shake()
                env.reset()

        print "Done!\n"
        print "BestReward:"+self.bestReward.__str__()
        print "BestWeight:"+self.bestWeight.__str__()

        print "BestWeightDisplay:\n"
        env.reset()
        self.testReward=0.0
        while(True):
            env.render()
            observation, reward, done, info = env.step(int(self.nextAction))
            tm = 0
            self.testReward=self.testReward+reward
            for _ in range(0, 4, 1):
                tm += observation[_] * self.bestWeight[_]
            if tm > 0:
                self.nextAction = 1
            else:
                self.nextAction = 0
            if done:
                print "reset"
                print "testReward:"+self.testReward.__str__()
                self.testReward=0.0
                env.reset()

        env.monitor.close()

    def Process (self, dobservation, dreward, ddone, dinfo):
        observation = dobservation
        """:type : numpy.ndarray"""
        reward = dreward
        """:type : float"""
        done = ddone
        """:type : bool"""
        info = dinfo
        """:type : dict"""
        # show the type
        self.testReward+=reward
        tm=0
        for _ in range(0,4,1):
            tm+=observation[_]*self.testWeight[_]
        if tm>0:
            self.nextAction=1
        else:
            self.nextAction=0

ggg=SAImp()
ggg.GO()

