import time

import numpy as np


class Q_learn:
    acts = ["left","right"]
    esps = 100
    roads = []
    L=0.9
    lr=0.1
    def __init__(self, size, blockx, blocky, target):
        self.size = size
        self.qtable = np.zeros((size, 2), dtype=float)
       # self.block = [blockx, blocky]
        self.target = target

    def createmaze(self):
        self.maze = np.zeros((self.size), dtype=int)
        #self.maze[self.block[0], self.block[1]] = -1
        self.maze[self.target] = 1

    def choiceact(self, E, stage):
        pro = np.random.random()
        if (pro >= E):
            actions = self.qtable[stage, :]
            maxQ = actions.max()
            maxactions = np.where(actions == maxQ)[0]
            action = np.random.choice(maxactions)
        else:
            action = np.random.randint(0, high=2)
        return action

    def updatemaze(self, stage, ep, step):
        if (stage == self.target):
            self.roads.append(stage)
            print(f"Eps:{ep:<5}step:{step:<5}")
            print(self.roads)
            self.roads = []
            #time.sleep(1)
            return False
        else:
            self.roads.append(stage)
            return True

    def mazefeedback(self, action, stage):
        nextmove=stage
        target=self.target
        reward=0
        action=self.acts[action]
        if (action == "right"):
            nextmove = stage+1
        if (action == "left"):
            nextmove = stage-1
            if(nextmove<0):
                nextmove+=1
                reward=-10
        if(nextmove==target):
                reward=100
        return nextmove,reward
    def training(self):
        E=0.1
        for ep in range(self.esps):
            is_end = self.updatemaze(0, ep, 0)
            Stage = 0
            step = 0
            E=0.9*E
            while is_end:
                Action = self.choiceact(E, Stage)
                act=self.acts[Action]
                Stagenew,reward=self.mazefeedback(Action,Stage)
                q_pre=self.qtable[Stage,Action]
                if(Stagenew!=self.target):
                    q_target=reward+self.L*self.qtable[Stagenew,:].max()
                else:
                    q_target=reward
                    is_end=False
                self.qtable[Stage,Action]+=self.lr*(q_target-q_pre)
                Stage=Stagenew
                step+=1
                is_end=self.updatemaze(Stage,ep,step)
        return self.qtable
if __name__ == '__main__':
    qmaze = Q_learn(50,2,2,49)
    qmaze.createmaze()
    print(qmaze.training())
