# import os, sys, random
# import numpy as np
# import torch
# import torch.nn as nn
# import torch.nn.functional as F
# import torch.optim as optim
# from itertools import count
# from torch.distributions import Normal
# from tensorboardX import SummaryWriter
# from route import RouteEnv

# script_name = os.path.basename(__file__)
# script_name = os.path.splitext(script_name)[0]
# log_directory = './Log/' + script_name +'/'
# pak_directory = './Pak/' + script_name +'/'
# device = 'cuda' if torch.cuda.is_available() else 'cpu'

# class Replay_buffer():
#     def __init__(self, max_size=0):
#         self.storage = []
#         self.max_size = max_size
#         self.ptr = 0

#     def push(self, data):
#         if len(self.storage) == self.max_size:
#             self.storage[int(self.ptr)] = data
#             self.ptr = (self.ptr + 1) % self.max_size
#         else:
#             self.storage.append(data)

#     def sample(self, batch_size):
#         ind = np.random.randint(0, len(self.storage), size=batch_size)
#         x, y, u, r, d = [], [], [], [], []

#         for i in ind:
#             X, Y, U, R, D = self.storage[i]
#             x.append(np.array(X, copy=False))
#             y.append(np.array(Y, copy=False))
#             u.append(np.array(U, copy=False))
#             r.append(np.array(R, copy=False))
#             d.append(np.array(D, copy=False))

#         return np.array(x), np.array(y), np.array(u), np.array(r).reshape(-1, 1), np.array(d).reshape(-1, 1)

# class Actor(nn.Module):

#     def __init__(self, state_dim, action_dim):
#         super(Actor, self).__init__()

#         self.fc1 = nn.Linear(state_dim, 400)
#         self.fc2 = nn.Linear(400, 300)
#         self.fc3 = nn.Linear(300, 1)

#         self.max_action = action_dim

#     def forward(self, state):
#         a = F.relu(self.fc1(state))
#         a = F.relu(self.fc2(a))
#         a = torch.tanh(self.fc3(a)) + 1
#         a = a * self.max_action / 2
#         return a


# class Critic(nn.Module):

#     def __init__(self, state_dim, action_dim):
#         super(Critic, self).__init__()

#         self.fc1 = nn.Linear(state_dim + 1, 400)
#         self.fc2 = nn.Linear(400, 300)
#         self.fc3 = nn.Linear(300, 1)

#     def forward(self, state, action):
#         state_action = torch.cat([state, action], 1)

#         q = F.relu(self.fc1(state_action))
#         q = F.relu(self.fc2(q))
#         q = self.fc3(q)
#         return q


# class NetWorkProxy():
#     def __init__(self, state_dim, action_dim, action_range, args):

#         self.args = args
#         self.actor = Actor(state_dim, action_dim).to(device)
#         self.actor_target = Actor(state_dim, action_dim).to(device)
#         self.critic_1 = Critic(state_dim, action_dim).to(device)
#         self.critic_1_target = Critic(state_dim, action_dim).to(device)
#         self.critic_2 = Critic(state_dim, action_dim).to(device)
#         self.critic_2_target = Critic(state_dim, action_dim).to(device)

#         if self.args.mode == 'test' or self.args.load:
#             self.load()
#         else:
#             self.actor_target.load_state_dict(self.actor.state_dict())
#             self.critic_1_target.load_state_dict(self.critic_1.state_dict())
#             self.critic_2_target.load_state_dict(self.critic_2.state_dict())

#         self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=args.learning_rate)
#         self.critic_1_optimizer = optim.Adam(self.critic_1.parameters(), lr=args.critic1_learning_rate)
#         self.critic_2_optimizer = optim.Adam(self.critic_2.parameters(), lr=args.critic2_learning_rate)

#         self.max_action = action_dim
#         self.action_range = action_range
#         self.memory = Replay_buffer(args.capacity)
#         self.num_critic_update_iteration = 0
#         self.num_actor_update_iteration = 0
#         self.num_training = 0

#         os.makedirs(log_directory, exist_ok=True)
#         os.makedirs(pak_directory, exist_ok=True)
#         self.writer = SummaryWriter(log_directory)

#     def random_action(self, action):
#         action = action + np.random.normal(0, self.args.exploration_noise)
#         return action.clip(self.action_range[0], self.action_range[1])

#     def select_action(self, state):
#         state = torch.FloatTensor(state.reshape(1, -1)).to(device)

#         action_data = self.actor(state).cpu().data.numpy().flatten()
#         action = self.random_action(action_data)
#         return np.round(action).astype(int)[0]

#     def update(self, num_iteration):

#         if self.num_training % 500 == 0:
#             print("====================================")
#             print("model has been trained for {} times...".format(self.num_training))
#             print("====================================")
#         for i in range(num_iteration):
#             x, y, u, r, d = self.memory.sample(self.args.batch_size)
#             state = torch.FloatTensor(x).to(device)
#             action = torch.FloatTensor(u).to(device).unsqueeze(1)
#             next_state = torch.FloatTensor(y).to(device)
#             done = torch.FloatTensor(d).to(device)
#             reward = torch.FloatTensor(r).to(device)

#             # Select next action according to target policy:
#             #noise = torch.ones_like(action).data.normal_(0, self.args.policy_noise).to(device)
#             noise = np.array(np.random.normal(0, self.args.policy_noise))
#             noise = np.clip(noise, 0, self.args.noise_clip)
#             next_action = (self.actor_target(next_state) + noise)
#             next_action = next_action.clamp(self.action_range[0], self.action_range[1])

#             # Compute target Q-value:
#             target_Q1 = self.critic_1_target(next_state, next_action)
#             target_Q2 = self.critic_2_target(next_state, next_action)
#             target_Q = torch.min(target_Q1, target_Q2)
#             target_Q = reward + ((1 - done) * self.args.gamma * target_Q).detach()

#             # Optimize Critic 1:
#             current_Q1 = self.critic_1(state, action)
#             loss_Q1 = F.mse_loss(current_Q1, target_Q)
#             self.critic_1_optimizer.zero_grad()
#             loss_Q1.backward()
#             self.critic_1_optimizer.step()
#             self.writer.add_scalar('Loss/Q1_loss', loss_Q1, global_step=self.num_critic_update_iteration)

#             # Optimize Critic 2:
#             current_Q2 = self.critic_2(state, action)
#             loss_Q2 = F.mse_loss(current_Q2, target_Q)
#             self.critic_2_optimizer.zero_grad()
#             loss_Q2.backward()
#             self.critic_2_optimizer.step()
#             self.writer.add_scalar('Loss/Q2_loss', loss_Q2, global_step=self.num_critic_update_iteration)
#             # Delayed policy updates:
#             if i % self.args.policy_delay == 0:
#                 # Compute actor loss:
#                 actor_loss = - self.critic_1(state, self.actor(state)).mean()

#                 # Optimize the actor
#                 self.actor_optimizer.zero_grad()
#                 actor_loss.backward()
#                 self.actor_optimizer.step()
#                 self.writer.add_scalar('Loss/actor_loss', actor_loss, global_step=self.num_actor_update_iteration)
#                 for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
#                     target_param.data.copy_(((1- self.args.tau) * target_param.data) + self.args.tau * param.data)

#                 for param, target_param in zip(self.critic_1.parameters(), self.critic_1_target.parameters()):
#                     target_param.data.copy_(((1 - self.args.tau) * target_param.data) + self.args.tau * param.data)

#                 for param, target_param in zip(self.critic_2.parameters(), self.critic_2_target.parameters()):
#                     target_param.data.copy_(((1 - self.args.tau) * target_param.data) + self.args.tau * param.data)

#                 self.num_actor_update_iteration += 1
#         self.num_critic_update_iteration += 1
#         self.num_training += 1

#     def save(self, episode=None):
#         if episode is not None:
#             torch.save(self.actor.state_dict(), pak_directory + '{}_actor.pth'.format(episode))
#             torch.save(self.actor_target.state_dict(), pak_directory + '{}_actor_target.pth'.format(episode))
#             torch.save(self.critic_1.state_dict(), pak_directory + '{}_critic_1.pth'.format(episode))
#             torch.save(self.critic_1_target.state_dict(), pak_directory + '{}_critic_1_target.pth'.format(episode))
#             torch.save(self.critic_2.state_dict(), pak_directory + '{}_critic_2.pth'.format(episode))
#             torch.save(self.critic_2_target.state_dict(), pak_directory + '{}_critic_2_target.pth'.format(episode))
#         else:
#             torch.save(self.actor.state_dict(), pak_directory + 'actor.pth')
#             torch.save(self.actor_target.state_dict(), pak_directory + 'actor_target.pth')
#             torch.save(self.critic_1.state_dict(), pak_directory + 'critic_1.pth')
#             torch.save(self.critic_1_target.state_dict(), pak_directory + 'critic_1_target.pth')
#             torch.save(self.critic_2.state_dict(), pak_directory + 'critic_2.pth')
#             torch.save(self.critic_2_target.state_dict(), pak_directory + 'critic_2_target.pth')
#         print("====================================")
#         print("Model has been saved...")
#         print("====================================")

#     def load(self):
#         self.actor.load_state_dict(torch.load(pak_directory + 'actor.pth', weights_only=True))
#         self.actor_target.load_state_dict(torch.load(pak_directory + 'actor_target.pth',  weights_only=True))
#         self.critic_1.load_state_dict(torch.load(pak_directory + 'critic_1.pth',  weights_only=True))
#         self.critic_1_target.load_state_dict(torch.load(pak_directory + 'critic_1_target.pth',  weights_only=True))
#         self.critic_2.load_state_dict(torch.load(pak_directory + 'critic_2.pth',  weights_only=True))
#         self.critic_2_target.load_state_dict(torch.load(pak_directory + 'critic_2_target.pth',  weights_only=True))
#         print("====================================")
#         print("model has been loaded...")
#         print("====================================")

#     def game_loop(self, env:RouteEnv, car_index):

#         if self.args.mode == 'test':
#             for i in range(self.args.test_iteration):
#                 total_reward = 0
#                 resolved = False
#                 path = []
#                 state = env.reset(car_index)
#                 path.append(state[0])

#                 #for t in range(self.args.max_episode):
#                 while not resolved:
#                     action = self.select_action(state)
#                     next_state, reward, done, resolved, _ = env.step(action)
#                     total_reward += reward
                    
#                     state = next_state
#                     if not state[0] in path:
#                         path.append(state[0])
#                     else:
#                         done = True

#                     if resolved:
#                         print("car {} find a solution!".format(car_index))
#                         print('reward: \t{:.4f}, path is {}, Steill have Energy = {} Time = {}'.format(reward, path, state[2], state[1]))
#                         #print("Iteration: \t{}, Episode: \t{}, Total Reward: \t{}".format(i, t, total_reward))
#                     if done:
#                         path.clear()
#                         state = env.reset(car_index)
#                         path.append(state[0])
#                         continue

#         elif self.args.mode == 'train':
#             for i in range(self.args.train_iteration):
#                 total_reward = 0
#                 path = []
#                 state = env.reset(car_index)
#                 path.append(state[0])

#                 for t in range(self.args.max_episode):
#                     action = self.select_action(state)

#                     next_state, reward, done, resolved, _ = env.step(action)

#                     if not next_state[0] in path:
#                         path.append(next_state[0])
#                     else:
#                         done = True

#                     self.memory.push((state, next_state, action, reward, np.float64(done)))

#                     if i+1 % 10 == 0:
#                         print('Episode {},  The memory size is {} '.format(i, len(self.memory.storage)))
#                     if len(self.memory.storage) >= self.args.capacity-1:
#                         self.update(10)

#                     state = next_state
#                     total_reward += reward

#                     if done:
#                         if resolved:
#                             print("car {} find a solution!".format(car_index))
#                             print('reward: \t{:.4f}, path is {}, Steill have Energy = {} Time = {}'.format(reward, path, state[2], state[1]))
#                             self.writer.add_scalar('reward/resolved_reward', reward, global_step=i)

#                             if i % (self.args.log_interval) == 0:
#                                 self.save(i)
#                         path.clear()
#                         break

#                 print("Iteration: \t{} Total Reward: \t{:.4f}".format(i, total_reward))
#                 self.writer.add_scalar('reward/total_reward', total_reward, global_step=i)

#         else:
#             raise NameError("mode wrong!!!")
#         env.close()
#         self.writer.close()

# def run():
#     env = RouteEnv()
#     state_dim = env.observation_space.shape[0]
#     action_dim = env.action_space.n
#     max_action = float(env.grid_size)

#     agent = NetWorkProxy(state_dim, action_dim, max_action)

#     if agent.args.mode == 'test' or agent.args.load:
#         agent.load()

#     for id in range(env.car_num):
#         agent.game_loop(env, id)

# if __name__ == '__main__':
#     run()

