# import argparse
# import os, sys, random
# import numpy as np
# import torch
# import torch.nn as nn
# import torch.nn.functional as F
# import torch.optim as optim
# from itertools import count
# from torch.distributions import Normal
# from tensorboardX import SummaryWriter
# from route import RouteEnv

# parser = argparse.ArgumentParser()
# parser.add_argument('--mode', default='train', type=str)                   # mode = 'train' or 'test'
# parser.add_argument('--tau',  default=0.005, type=float)                   # target smoothing coefficient

# parser.add_argument('--actor_learning_rate', default=1e-7, type=float)
# parser.add_argument('--critic_learning_rate', default=1e-6, type=float)
# parser.add_argument('--gamma', default=0.99, type=float)                   # discounted factor
# parser.add_argument('--capacity', default=1000000, type=int)               # replay buffer size
# parser.add_argument('--batch_size', default=100, type=int)                 # mini batch size

# parser.add_argument('--log_interval', default=5000, type=int)              # save interval
# parser.add_argument('--load', default=False, type=bool)                    # load model
# parser.add_argument('--exploration_noise', default=25, type=float)
# parser.add_argument('--max_episode', default=10000, type=int)              # num of games
# parser.add_argument('--test_iteration', default=10, type=int)              # test iteration
# parser.add_argument('--train_iteration', default=10000, type=int)          # train iteration
# parser.add_argument('--update_iteration', default=200, type=int)           # train update iteration
# args = parser.parse_args()

# script_name = os.path.basename(__file__)
# script_name = os.path.splitext(script_name)[0]
# log_directory = './Log/' + script_name +'/'
# pak_directory = './Pak/' + script_name +'/'
# device = 'cuda' if torch.cuda.is_available() else 'cpu'

# class ReplayBuffer():
#     def __init__(self, max_size=args.capacity):
#         self.storage = []
#         self.max_size = max_size
#         self.ptr = 0

#     def push(self, data):
#         if len(self.storage) == self.max_size:
#             self.storage[int(self.ptr)] = data
#             self.ptr = (self.ptr + 1) % self.max_size
#         else:
#             self.storage.append(data)

#     def sample(self, batch_size):
#         ind = np.random.randint(0, len(self.storage), size=batch_size)
#         x, y, u, r, d = [], [], [], [], []

#         for i in ind:
#             X, Y, U, R, D = self.storage[i]
#             x.append(np.array(X, copy=False))
#             y.append(np.array(Y, copy=False))
#             u.append(np.array(U, copy=False))
#             r.append(np.array(R, copy=False))
#             d.append(np.array(D, copy=False))

#         return np.array(x), np.array(y), np.array(u), np.array(r).reshape(-1, 1), np.array(d).reshape(-1, 1)


# class Actor(nn.Module):
#     def __init__(self, state_dim, action_dim, max_action):
#         super(Actor, self).__init__()

#         self.l1 = nn.Linear(state_dim, 400)
#         self.l2 = nn.Linear(400, 300)
#         self.l3 = nn.Linear(300, action_dim)

#         self.max_action = max_action

#     def forward(self, x):
#         x = F.relu(self.l1(x))
#         x = F.relu(self.l2(x))
#         x = self.max_action * torch.tanh(self.l3(x))
#         return x


# class Critic(nn.Module):
#     def __init__(self, state_dim, action_dim):
#         super(Critic, self).__init__()

#         self.l1 = nn.Linear(state_dim + action_dim, 400)
#         self.l2 = nn.Linear(400 , 300)
#         self.l3 = nn.Linear(300, 1)

#     def forward(self, x, u):
#         x = F.relu(self.l1(torch.cat([x, u], 1)))
#         x = F.relu(self.l2(x))
#         x = self.l3(x)
#         return x


# class DDPG(object):
#     def __init__(self, state_dim, action_dim, max_action):
#         self.actor = Actor(state_dim, action_dim, max_action).to(device)
#         self.actor_target = Actor(state_dim, action_dim, max_action).to(device)
#         self.actor_target.load_state_dict(self.actor.state_dict())
#         self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=args.actor_learning_rate)

#         self.critic = Critic(state_dim, action_dim).to(device)
#         self.critic_target = Critic(state_dim, action_dim).to(device)
#         self.critic_target.load_state_dict(self.critic.state_dict())
#         self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=args.critic_learning_rate)
#         self.replay_buffer = ReplayBuffer()
#         os.makedirs(log_directory, exist_ok=True)
#         os.makedirs(pak_directory, exist_ok=True)
#         self.writer = SummaryWriter(log_directory)

#         self.num_critic_update_iteration = 0
#         self.num_actor_update_iteration = 0
#         self.num_training = 0

#     def random_action(self, action, env:RouteEnv):
#         action = (action + np.random.normal(0, args.exploration_noise, size=env.action_space.n)).clip(
#                                 env.action_low, env.action_high)
#         return np.round(action).astype(int)

#     def select_action(self, state, env:RouteEnv):
#         state = torch.FloatTensor(state.reshape(1, -1)).to(device)
#         action = self.random_action(self.actor(state).cpu().data.numpy().flatten(), env)

#         while not env.is_action_available(action):
#             action = self.random_action(action, env)
#         return action

#     def update(self):
#         for it in range(args.update_iteration):
#             # Sample replay buffer
#             x, y, u, r, d = self.replay_buffer.sample(args.batch_size)
#             state = torch.FloatTensor(x).to(device)
#             action = torch.FloatTensor(u).to(device)
#             next_state = torch.FloatTensor(y).to(device)
#             done = torch.FloatTensor(1-d).to(device)
#             reward = torch.FloatTensor(r).to(device)

#             # Compute the target Q value
#             target_Q = self.critic_target(next_state, self.actor_target(next_state))
#             target_Q = reward + (done * args.gamma * target_Q).detach()

#             # Get current Q estimate
#             current_Q = self.critic(state, action)

#             # Compute critic loss
#             critic_loss = F.mse_loss(current_Q, target_Q)
#             self.writer.add_scalar('Loss/critic_loss', critic_loss, global_step=self.num_critic_update_iteration)
#             # Optimize the critic
#             self.critic_optimizer.zero_grad()
#             critic_loss.backward()
#             self.critic_optimizer.step()

#             # Compute actor loss
#             actor_loss = -self.critic(state, self.actor(state)).mean()
#             self.writer.add_scalar('Loss/actor_loss', actor_loss, global_step=self.num_actor_update_iteration)

#             # Optimize the actor
#             self.actor_optimizer.zero_grad()
#             actor_loss.backward()
#             self.actor_optimizer.step()

#             # Update the frozen target models
#             for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
#                 target_param.data.copy_(args.tau * param.data + (1 - args.tau) * target_param.data)

#             for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
#                 target_param.data.copy_(args.tau * param.data + (1 - args.tau) * target_param.data)

#             self.num_actor_update_iteration += 1
#             self.num_critic_update_iteration += 1

#     def save(self, episode=None):
#         if episode is not None:
#             torch.save(self.actor.state_dict(), pak_directory + '{}_actor.pth'.format(episode))
#             torch.save(self.critic.state_dict(), pak_directory + '{}_critic.pth'.format(episode))
#         else:
#             torch.save(self.actor.state_dict(), pak_directory + 'actor.pth')
#             torch.save(self.critic.state_dict(), pak_directory + 'critic.pth')
#         print("====================================")
#         print("Model has been saved...")
#         print("====================================")

#     def load(self):
#         self.actor.load_state_dict(torch.load(pak_directory + 'actor.pth', weights_only=True))
#         self.critic.load_state_dict(torch.load(pak_directory + 'critic.pth', weights_only=True))
#         print("====================================")
#         print("model has been loaded...")
#         print("====================================")

#     def game_loop(self, env:RouteEnv, car_index):

#         if args.mode == 'test':
#             for i in range(args.test_iteration):
#                 total_reward = 0
#                 resolved = False
#                 path = []
#                 state = env.reset(car_index)
#                 path.append(state[0])

#                 #for t in range(args.max_episode):
#                 while not resolved:
#                     action = self.select_action(state, env)
#                     next_state, reward, done, resolved, _ = env.step(action[0])
#                     total_reward += reward
                    
#                     state = next_state
#                     path.append(state[0])

#                     if resolved:
#                         print("car {} find a solution!".format(car_index))
#                         print('reward: \t{:.4f}, path is {}, Steill have Energy = {} Time = {}'.format(reward, path, state[2], state[1]))
#                         #print("Iteration: \t{}, Episode: \t{}, Total Reward: \t{}".format(i, t, total_reward))
#                     if done:
#                         path.clear()
#                         state = env.reset(car_index)
#                         path.append(state[0])
#                         continue

#         elif args.mode == 'train':
#             for i in range(args.train_iteration):
#                 total_reward = 0
#                 path = []
#                 state = env.reset(car_index)
#                 path.append(state[0])

#                 for t in range(args.max_episode):
#                     action = self.select_action(state, env)

#                     next_state, reward, done, resolved, _ = env.step(action[0])
#                     self.replay_buffer.push((state, next_state, action, reward, np.float64(done)))

#                     state = next_state
#                     total_reward += reward
#                     path.append(state[0])
                        
#                     if resolved:
#                         print("car {} find a solution!".format(car_index))
#                         print('reward: \t{:.4f}, path is {}, Steill have Energy = {} Time = {}'.format(reward, path, state[2], state[1]))
#                         self.writer.add_scalar('reward/resolved_reward', reward, global_step=i)

#                         if i % (args.log_interval) == 0:
#                             self.save(i)
#                         break
#                     if done:
#                         path.clear()
#                         break

#                 self.update()
#                 print("Iteration: \t{} Total Reward: \t{:.4f}".format(i, total_reward))
#                 self.writer.add_scalar('reward/total_reward', total_reward, global_step=i)

#         else:
#             raise NameError("mode wrong!!!")
#         env.close()
#         self.writer.close()

# def run():
#     env = RouteEnv()
#     state_dim = env.observation_space.shape[0]
#     action_dim = env.action_space.n
#     max_action = float(env.action_high)

#     agent = DDPG(state_dim, action_dim, max_action)

#     if args.mode == 'test' or args.load:
#         agent.load()

#     for id in range(env.car_num):
#         agent.game_loop(env, id)

# if __name__ == '__main__':
#     run()
