# import os
# import numpy as np
# import torch as T
# import torch.nn as nn
# import torch.optim as optim
# import torch.nn.functional as F
# import numpy as np
# from tensorboardX import SummaryWriter
# from route import RouteEnv

# device = 'cuda' if T.cuda.is_available() else 'cpu'

# class ReplayBuffer:
#     def __init__(self, state_dim, action_dim, max_size, batch_size):
#         self.mem_size = int(max_size)
#         self.batch_size = batch_size
#         self.mem_cnt = 0

#         self.state_memory = np.zeros((self.mem_size, state_dim))
#         self.action_memory = np.zeros((self.mem_size, ))
#         self.reward_memory = np.zeros((self.mem_size, ))
#         self.next_state_memory = np.zeros((self.mem_size, state_dim))
#         self.terminal_memory = np.zeros((self.mem_size, ), dtype=np.bool_)

#     def store_transition(self, state, action, reward, state_, done):
#         mem_idx = self.mem_cnt % self.mem_size

#         self.state_memory[mem_idx] = state
#         self.action_memory[mem_idx] = action
#         self.reward_memory[mem_idx] = reward
#         self.next_state_memory[mem_idx] = state_
#         self.terminal_memory[mem_idx] = done

#         self.mem_cnt += 1

#     def sample_buffer(self):
#         mem_len = min(self.mem_size, self.mem_cnt)

#         batch = np.random.choice(mem_len, self.batch_size, replace=False)

#         states = self.state_memory[batch]
#         actions = self.action_memory[batch]
#         rewards = self.reward_memory[batch]
#         states_ = self.next_state_memory[batch]
#         terminals = self.terminal_memory[batch]

#         return states, actions, rewards, states_, terminals

#     def ready(self):
#         return self.mem_cnt > self.batch_size

# class DuelingDeepQNetwork(nn.Module):
#     def __init__(self, alpha, state_dim, max_action, fc1_dim, fc2_dim):
#         super(DuelingDeepQNetwork, self).__init__()

#         self.fc1 = nn.Linear(state_dim, fc1_dim)
#         self.fc2 = nn.Linear(fc1_dim, fc2_dim)
#         self.V = nn.Linear(fc2_dim, 1)
#         self.A = nn.Linear(fc2_dim, max_action)

#         self.optimizer = optim.Adam(self.parameters(), lr=alpha)
#         self.to(device)

#     def forward(self, state):
#         x = T.relu(self.fc1(state))
#         x = T.relu(self.fc2(x))

#         V = self.V(x)
#         A = self.A(x)
#         Q = V + A - T.mean(A, dim=-1, keepdim=True)

#         return Q

#     def save_checkpoint(self, checkpoint_file):
#         T.save(self.state_dict(), checkpoint_file, weights_only=True)

#     def load_checkpoint(self, checkpoint_file):
#         self.load_state_dict(T.load(checkpoint_file), weights_only=True)


# class NetWorkProxy:
#     def __init__(self, state_dim, action_dim, max_action, args):
#         script_name = os.path.basename(__file__)
#         script_name = os.path.splitext(script_name)[0]
#         self.log_directory = './Log/' + script_name +'/'
#         self.pak_directory = './Pak/' + script_name +'/'
#         self.args = args
#         self.gamma = args.gamma
#         self.tau = args.tau
#         self.epsilon = args.epsilon
#         self.eps_min = args.eps_end
#         self.eps_dec = args.eps_dec
#         self.batch_size = args.batch_size
#         self.action_space = round(max_action)
#         self.reward_step_counter = 0
#         self.learn_step_counter = 0

#         self.q_eval = DuelingDeepQNetwork(args.learning_rate, state_dim, self.action_space, args.fc1_dim, args.fc2_dim)
#         self.q_target = DuelingDeepQNetwork(args.learning_rate, state_dim, self.action_space, args.fc1_dim, args.fc2_dim)
#         self.memory = ReplayBuffer(state_dim=state_dim, action_dim=action_dim,
#                                    max_size=args.capacity, batch_size=args.batch_size)

#         if self.args.mode == 'test' or self.args.load:
#             self.load()

#         self.update_network_parameters(tau=1.0)

#         os.makedirs(self.log_directory, exist_ok=True)
#         os.makedirs(self.pak_directory, exist_ok=True)
#         self.writer = SummaryWriter(self.log_directory)

#     def update_network_parameters(self, tau=None):
#         if tau is None:
#             tau = self.tau

#         for q_target_params, q_eval_params in zip(self.q_target.parameters(), self.q_eval.parameters()):
#             q_target_params.data.copy_(tau * q_eval_params + (1 - tau) * q_target_params)

#     def remember(self, state, action, reward, state_, done):
#         self.memory.store_transition(state, action, reward, state_, done)

#     def decrement_epsilon(self):
#         self.epsilon = self.epsilon - self.eps_dec \
#             if self.epsilon > self.eps_min else self.eps_min

#     def select_action(self, state, isTrain=True):
#         state = T.unsqueeze(T.FloatTensor(state), 0).to(device)
#         q_vals = self.q_eval.forward(state)
#         action = T.argmax(q_vals).item()

#         if (np.random.random() < self.epsilon) and isTrain:
#             action = np.random.choice(self.action_space)
#         return action

#     def learn(self):
#         if not self.memory.ready():
#             return
        
#         self.learn_step_counter += 1

#         states, actions, rewards, next_states, terminals = self.memory.sample_buffer()
#         batch_idx = T.arange(self.batch_size, dtype=T.long).to(device)
#         states_tensor = T.tensor(states, dtype=T.float).to(device)
#         actions_tensor = T.tensor(actions, dtype=T.long).to(device)
#         rewards_tensor = T.tensor(rewards, dtype=T.float).to(device)
#         next_states_tensor = T.tensor(next_states, dtype=T.float).to(device)
#         terminals_tensor = T.tensor(terminals).to(device)

#         with T.no_grad():
#             q_ = self.q_target.forward(next_states_tensor)
#             max_actions = T.argmax(self.q_eval.forward(next_states_tensor), dim=-1)
#             q_[terminals_tensor] = 0.0
#             target = rewards_tensor + self.gamma * q_[batch_idx, max_actions]
#         q = self.q_eval.forward(states_tensor)[batch_idx, actions_tensor]

#         loss = F.mse_loss(q, target.detach())
#         self.q_eval.optimizer.zero_grad()

#         self.writer.add_scalar('Loss/loss_func', loss, global_step=self.learn_step_counter)

#         loss.backward()
#         self.q_eval.optimizer.step()

#         self.update_network_parameters()
#         self.decrement_epsilon()

#     def save(self, episode=None):
#         if episode is not None:
#             self.q_eval.save_checkpoint(self.pak_directory + 'Q_eval_{}.pth'.format(episode))
#             self.q_target.save_checkpoint(self.pak_directory + 'Q_target_{}.pth'.format(episode))
#         else:
#             self.q_eval.save_checkpoint(self.pak_directory + 'Q_eval.pth'.format(episode))
#             self.q_target.save_checkpoint(self.pak_directory + 'Q_target.pth'.format(episode))
#         print("====================================")
#         print("Model has been saved...")
#         print("====================================")

#     def load(self):
#         self.q_eval.load_checkpoint(self.pak_directory + 'Q_eval.pth')
#         self.q_target.load_checkpoint(self.pak_directory + 'Q_target.pth')
#         print("====================================")
#         print("model has been loaded...")
#         print("====================================")

#     def game_loop(self, env:RouteEnv, car_index):

#         if self.args.mode == 'test':
#             for i in range(self.args.test_iteration):
#                 total_reward = 0
#                 resolved = False
#                 state = env.reset(car_index)

#                 #for t in range(self.args.max_episode):
#                 while not resolved:
#                     action = self.select_action(state, False)
#                     next_state, reward, done, resolved, _ = env.step(action)
#                     total_reward += reward

#                     state = next_state

#                     if resolved:
#                         print("car {} find a solution!".format(car_index))
#                         print('reward: \t{}, path is {}, Steill have Energy = {} Time = {}'.format(reward, env.path, state[2], state[1]))
#                         #print("Iteration: \t{}, Episode: \t{}, Total Reward: \t{}".format(i, t, total_reward))
#                     if done:
#                         state = env.reset(car_index)
#                         total_reward = 0
#                         continue

#         elif self.args.mode == 'train':
#             for i in range(self.args.train_iteration):
#                 total_reward = 0
#                 state = env.reset(car_index)

#                 for t in range(self.args.max_episode):
#                     self.reward_step_counter += 1
#                     action = self.select_action(state, True)

#                     next_state, reward, done, resolved, _ = env.step(action)

#                     self.remember(state, action, reward, next_state, done)
#                     self.learn()

#                     state = next_state
#                     total_reward += reward

#                     if done:
#                         if resolved:
#                             print("car {} find a solution!".format(car_index))
#                             print('reward: \t{:.4f}, path is {}, Steill have Energy = {} Time = {}'.format(reward, env.path, state[2], state[1]))
#                             #print("Iteration: \t{} Total Reward: \t{:.4f}".format(i, total_reward))
#                             self.writer.add_scalar('reward/resolved_reward', reward, global_step=self.reward_step_counter)

#                             if i == (self.args.train_iteration - 1):
#                                 #self.save(i)
#                                 self.save()
#                         break

#                 print("Iteration: \t{} Total Reward: \t{:.4f}".format(i, total_reward))
#                 self.writer.add_scalar('reward/total_reward', total_reward, global_step=self.reward_step_counter)

#         else:
#             raise NameError("mode wrong!!!")
#         env.close()
#         self.writer.close()