import numpy as np
import os, sys
import random
import torch
import time
import torch.nn as nn
import torch.nn.functional as F
from network.actor_continue import ActorContinue
from network.basic_critic import BasicCritic
from common.replay_buffer import ReplayBUffer


class DDPGAgent():
    def __init__(self, state_dim, action_dim, args,):
        self.replay_buffer = ReplayBUffer(args)
        self.state_size = state_dim
        self.action_size = action_dim
        self.action_bound = args.action_bound
        self.gamma = args.gamma
        self.tau = args.tau
        self.lr_a = args.lr_a
        self.lr_c = args.lr_c
        self.batch_size = args.batch_size
        self.device = args.device
        self.var = args.var
        self.var_delta = args.var_delta

        self.actor = ActorContinue(self.state_size, self.action_size, self.action_bound, args)
        self.target_actor = ActorContinue(self.state_size, self.action_size, self.action_bound, args)
        self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr= self.lr_a)

        self.critic = BasicCritic(self.state_size, self.action_size, args)
        self.target_critic = BasicCritic(self.state_size, self.action_size, args)
        self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr= self.lr_c)
        for p1 in self.target_actor.parameters():
            p1.requires_grad = False
        for p2 in self.target_critic.parameters():    
            p2.requires_grad = False

    def action(self,state):
        with torch.no_grad():
            state = torch.FloatTensor(state).view(1, -1).to(self.device)
            action =  self.actor(state)[0]
        return action

    def random_selection_action(self,state):
        with torch.no_grad():
            state = torch.FloatTensor(state).view(1, -1).to(self.device)
            action =  self.actor(state)[0]
            action = np.clip(np.random.normal(action, self.var), -self.action_bound, self.action_bound)
            self.var = self.var * self.var_delta
        return action
    
    def learn(self):
        #更新网络参数
        for param, target_param in zip(self.critic.parameters(), self.target_critic.parameters()):
            target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)

        for param, target_param in zip(self.actor.parameters(), self.target_actor.parameters()):
            target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)

        state_batch, action_batch, reward_batch, next_state_batch, done_batch = self.replay_buffer.sample(self.batch_size)
        state_batch = torch.FloatTensor(state_batch).to(self.device)
        action_batch = torch.FloatTensor(action_batch).to(self.device).view(self.batch_size,1)
        reward_batch = torch.FloatTensor(reward_batch).to(self.device).view(self.batch_size,1)
        next_state_batch = torch.FloatTensor(next_state_batch).to(self.device)
        done_batch = torch.FloatTensor(done_batch).to(self.device).view(self.batch_size,1) 
        
        q_value = self.critic(state_batch, action_batch)
        next_q_value = self.target_critic(next_state_batch, self.target_actor(next_state_batch)).view(self.batch_size,1)
        target_q_value = reward_batch + self.gamma * next_q_value

        self.critic_optimizer.zero_grad()
        critic_loss = F.mse_loss(q_value, target_q_value)
        critic_loss.backward()
        self.critic_optimizer.step()

        action = self.actor(state_batch)
        q = self.critic(state_batch, action)
        self.actor_optimizer.zero_grad()
        actor_loss = -torch.mean(q)
        actor_loss.backward()
        self.actor_optimizer.step()

        return critic_loss.item()

    def save(self,episodes):
        time_now = time.strftime('%y%m_%d%H%M')
        dir = 'model/{}'.format("DDPG")
        if not os.path.exists(dir): # make the path
            print("dont have this dir")
            os.mkdir(dir)
        dir1 = dir + '/{}_{}episodes_actor.pth'.format(time_now,episodes)
        dir2 = dir + '/{}_{}episodes_critic.pth'.format(time_now,episodes)
        torch.save(self.actor.state_dict(), dir1)
        torch.save(self.critic.state_dict(), dir2)

    def load(self, path):
        actor_dir = '{}_actor.pth'.format(path)
        critic_dir = '{}_critic.pth'.format(path)
        self.actor.load_state_dict(torch.load(actor_dir))
        self.critic.load_state_dict(torch.load(critic_dir))
        print('actor network load successed')
        print('critic network load successed')