from WorldModel import *
from agnet import *
from tqdm import tqdm

from matplotlib import pyplot as plt

fps=10000000
plotmax=10000
import os




class Env(WorldModel):
    def __init__(self,max_epoch=NUM_EPISODES,step_max=MAX_STEPS,reload=None,load_mem=False,batch_size=256):
        super().__init__()
        self.max_epoch=max_epoch
        self.step_max=step_max
        
        self.agent=Agent(self.get_state_space(),self.get_action_space(),reload=reload,load_mem=load_mem,batch_size=batch_size)
        print(self.width(),self.height())

        
        
        



    def transform(self,state):
        s=state
        s=(s-400)/400
        # s=torch.FloatTensor(s)
        return s


    def start_train(self):
        self.trange=tqdm(range(self.max_epoch))
        self.max_reward=0.0
        self.tr_list=[]
        self.total_r=0.0
        self.epoch_list=[]
        self.max_r_ls=[]
        self.first=True
        self.epoch=0
        self.epoch_step=0
        self.act=0
        self.seq_len=0
        self.state=None
        self.state_seq=[]
        self.frame_index=0
        self.train_timer=QTimer(self)
        self.train_timer.setSingleShot(True)
        # QTimer.singleShot(1.0/fps,self.train_a_step)
        self.train_timer.timeout.connect(self.train_a_step)
        self.train_timer.start(1.0/fps)
        # self.train_timer.singleShot(1.0/fps, self.train_a_step)
        return



    def end_a_epoch(self):
        self.epoch_step=0
        if (self.total_r>self.max_reward or self.first) and self.epoch>5:
            self.first=False
            if os.path.exists('max_'+ENV+'_%d_.pth'%(int(self.max_reward))):
                
                os.remove('max_'+ENV+'_%d_.pth'%(int(self.max_reward)))
            self.max_reward=self.total_r
            self.agent.save(path='max_'+ENV+'_%d_.pth'%(int(self.max_reward)))
            print('save max %.6f'%self.max_reward)
        self.max_r_ls.append(self.max_reward)
        self.tr_list.append(self.total_r)
        print(self.epoch,'tr=',self.total_r)
        self.epoch_list.append(self.epoch)
        self.tr_list=self.tr_list if len(self.tr_list)<plotmax else self.tr_list[1:]
        self.epoch_list=self.epoch_list if len(self.epoch_list)<plotmax else self.epoch_list[1:]
        self.max_r_ls=self.max_r_ls if len(self.max_r_ls)<plotmax else self.max_r_ls[1:]
        plt.cla()
        plt.title('TRAIN')
        plt.plot(self.epoch_list,self.tr_list,color='g',label='total_reward')
        plt.plot(self.epoch_list,self.max_r_ls,color='r',label='max_reward')
        plt.legend()
        plt.savefig('train_2.png')
        if self.epoch %UPDATE_STEP==0 and self.epoch!=0:
            self.agent.update()
            print('update')
        if self.epoch %SAVE_STEP==0 and self.epoch!=0:
            self.agent.save()
        self.trange.set_description('%d epoch: max_reward=%.8f avg-reward=%.8f'%(self.epoch,self.max_reward,self.total_r))
        self.total_r=0.0
        self.epoch+=1
        self.fresh_epoch()
        if self.epoch==self.max_epoch:
            return True
        else:
            return False


    def train_a_step(self):  


        self.epoch_step+=1
        if self.epoch==0: 
            self.fresh_epoch()
            self.state=self.get_current_state()
            self.state=self.transform(self.state)
    
        if self.seq_len<Seq_max:
            self.state_seq.append(self.state)
            self.seq_len+=1
            self.state,self.reward=self.agent1.one_step(self.act)
            self.update_world()
            self.state=self.transform(self.state)
            self.train_timer.start(1.0/fps)
            # QTimer.singleShot(1.0/fps,self.train_a_step)
            return
        else:
            self.state_seq=self.state_seq[1:]
            self.state_seq.append(self.state)
            
        self.frame_index+=1
        self.state=torch.from_numpy(np.array(self.state_seq).reshape(1,-1)).type(torch.FloatTensor)
        gpu_state=None
        if torch.cuda.is_available():
            gpu_state=self.state.to(self.agent.brain.device)

        if self.frame_index%SKIP==0:
            self.frame_index=0
            action=self.agent.get_action(gpu_state,self.epoch)
            self.act=action.item()

        obs_next,reward=self.agent1.one_step(self.act)
        self.update_world()
        obs_next=self.transform(obs_next)
        r=reward
        reward=torch.FloatTensor([reward])
        state_next=self.state_seq[1:]
        state_next.append(obs_next)
        state_next= torch.from_numpy(np.array(state_next).reshape(1,-1)
            ).type(torch.FloatTensor)
        self.agent.memorize(self.state,torch.LongTensor([self.act]).view(1,-1),state_next,reward)
        self.agent.update_q()
        self.state=obs_next
        self.total_r+=r
        result=False
        if self.epoch_step>=self.step_max:
            result=self.end_a_epoch()
        
        if result==True:

            self.train_timer.stop()
            print('train Finshed')
            

            return 


        else:
            # self.train_timer.singleShot(1.0/fps, self.train_a_step)
            self.train_timer.start(1.0/fps)
            # QTimer.singleShot(1.0/fps,self.train_a_step)
            pass



        
        



    def start_play(self):
        self.frame_index=0
        self.state_seq=[]
        self.seq_len=0
        self.chosed_act=0
        self.fresh_epoch()
        self.state=self.get_current_state()
        self.state=self.transform(self.state)
        self.play_timer=QTimer()
        self.play_timer.setSingleShot(True)
        self.play_timer.timeout.connect(self.play)
        self.play_timer.start(sleep_time)

    def play(self):
            if self.seq_len<Seq_max:
                self.state_seq.append(self.state)
                self.seq_len+=1
                self.state,reward=self.agent1.one_step(self.chosed_act)
                self.update_world()
                self.state=self.transform(self.state)
                self.play_timer.start(sleep_time)
                return
            else:
                self.state_seq=self.state_seq[1:]
                self.state_seq.append(self.state)
                
            self.frame_index+=1
            self.state=torch.from_numpy(np.array(self.state_seq).reshape(1,-1)).type(torch.FloatTensor)
            gpu_state=None
            if torch.cuda.is_available():
                gpu_state=self.state.to(self.agent.brain.device)
            if self.frame_index%SKIP==0:
                self.frame_index=0
                action=self.agent.get_action(gpu_state,self.max_epoch)
                self.chosed_act=action.item()

            obs_next,reward=self.agent1.one_step(self.chosed_act)
            self.update_world()
            obs_next=self.transform(obs_next)
            reward=torch.FloatTensor([reward])
            state_next=self.state_seq[1:]
            state_next.append(obs_next)
            state_next= torch.from_numpy(np.array(state_next).reshape(1,-1)
                ).type(torch.FloatTensor)
            self.state=obs_next
            self.play_timer.start(sleep_time)
            

if __name__=='__main__':
    
    app = QApplication(sys.argv)
    # fps=100
    trainer = Env(reload='./max_Tennis_14_0_.pth',load_mem=False,batch_size=8192,max_epoch=1000000,step_max=2000)
    # trainer.start_train()
    trainer.start_play()
    sys.exit(app.exec_())
 