import numpy as np
a=np.array((1,2,3))
# a=np.where(max(np.array((1,2,3))))
b=np.argmax(a)
class a:
    def __init__(self,Maxiter):
        self.feature_size=64
        self.actions=[(1,0),(-1,0),(0,-1),(0,1)]
        self.weight=None
        self.count=0
        self.Maxiter=Maxiter
    def move_nextstate(self,state,action):
        sx,sy=self.int_to_point(state)
        next_sx=sx+self.actions(action)[0]
        next_sy=sy+self.actions(action)[1]
        if next_sx<0 :
            next_sx=0
        if next_sx>self.grid_size:
            next_sx=self.grid_size
        if next_sy<0 :
            next_sy=0
        if next_sy>self.grid_size:
            next_sy=self.grid_size
        next_state=next_sx*128+next_sy
        return next_state

    def state_to_appro(self):
        pass

    def getstatefeature(self):
        pass

    def get_action(self,state):
        actions = self.getLegalActions(state)
        if not actions.size:
            return 0.0
        else:
            return np.argmax([self.getQValue(state, action) for action in actions])
        pass

    def value_iteration(self,w):
        start_state = np.random.randint(128 * 128)
        state = start_state
        action = self.get_action(state)
        if np.random.uniform()<self.wind:
            ac=action
        else:
            ac=np.random.randint(4)
        for i in range(self.iterNUM):
            next_state=self.move_nextstate(state,ac)

            feature=self.getstatefeature(self.state_to_appro(next_state))
            reward=w.T.dot(feature)
            correction=reward+self.getValue(next_state)-self.getQvalue(state,ac)
            if self.Qvalue is None:
                self.Qvalue = self.alpha * correction * feature
            else:
                self.Qvalue += self.alpha * correction * feature

            state=next_state
            action=self.get_action(state)
            if np.random.uniform() < self.wind:
                ac = action
            else:
                ac = np.random.radiant(4)



    def getValue(self,state):
        actions = self.getLegalActions(state)
        if not actions.size:
            return 0.0
        else:
            return max([self.getQValue(state, action) for action in actions])

    def getQvalue(self,state,action):
        if not self.weights is None:
            features = self.getFeatures(state, action)
            return self.weights.T.dot(features)
        else:
            return 0.0

    def generate_trajectories(self,w,n_trajectories,trajectory_length):
        itnum=100
        for i in range(itnum):
            self.value_iteration(w)

        features=[]

        for i in range(n_trajectories):
            start_state = np.random.randint(128 * 128)
            state = start_state
            action = self.get_action(state)
            if np.random.uniform() < self.wind:
                ac = action
            else:
                ac = np.random.randint(4)
            for i in range(trajectory_length):
                next_state = self.move_nextstate(state, ac)
                feature = self.getstatefeature(self.state_to_appro(next_state))
                reward = w.T.dot(feature)
                correction = reward + self.getValue(next_state) - self.getQvalue(state, ac)
                if self.Qvalue is None:
                    self.Qvalue = self.alpha * correction * feature
                else:
                    self.Qvalue += self.alpha * correction * feature
                state = next_state
                action = self.get_action(state)
                if np.random.uniform() < self.wind:
                    ac = action
                else:
                    ac = np.random.radiant(4)
                features=features.append(feature)

        return features/(n_trajectories*trajectory_length)
