# import os, sys, random
# import numpy as np
# import torch
# import torch.nn as nn
# import torch.nn.functional as F
# from tensorboardX import SummaryWriter
# from route import RouteEnv


# script_name = os.path.basename(__file__)
# script_name = os.path.splitext(script_name)[0]
# log_directory = './Log/' + script_name +'/'
# pak_directory = './Pak/' + script_name +'/'
# device = 'cuda' if torch.cuda.is_available() else 'cpu'

# class Net(nn.Module):
#     def __init__(self, state_dim, action_dim):
#         super(Net, self).__init__()
#         self.fc1 = nn.Linear(round(state_dim), 128)
#         self.fc1.weight.data.normal_(0,0.1)
#         self.fc2 = nn.Linear(128, 64)
#         self.fc2.weight.data.normal_(0,0.1)
#         self.out = nn.Linear(64, round(action_dim))
#         self.out.weight.data.normal_(0,0.1)

#     def forward(self,x):
#         x = self.fc1(x)
#         x = F.relu(x)
#         x = self.fc2(x)
#         x = F.relu(x)
#         action_prob = self.out(x)
#         return action_prob

# class NetWorkProxy():
#     def __init__(self, state_dim, action_dim, action_range, args):
#         super(NetWorkProxy, self).__init__()
#         self.args = args
#         self.state_dim = state_dim
#         self.action_dim = action_dim
#         self.action_range = action_range
#         self.capacity = round(self.args.capacity)
#         self.eval_net = Net(state_dim, action_dim).to(device)
#         self.target_net = Net(state_dim, action_dim).to(device)

#         if self.args.mode == 'test' or self.args.load:
#             self.load()

#         self.target_net.load_state_dict(self.eval_net.state_dict())
#         self.learn_step_counter = 0
#         self.reward_step_counter = 0
#         self.memory_counter = 0
#         self.memory = np.zeros((self.capacity, state_dim * 2 + 2))
#         self.optimizer = torch.optim.Adam(self.eval_net.parameters(), lr=self.args.learning_rate)
#         self.loss_func = nn.MSELoss().to(device)

#         os.makedirs(log_directory, exist_ok=True)
#         os.makedirs(pak_directory, exist_ok=True)
#         self.writer = SummaryWriter(log_directory)

#     def select_action(self, state):
#         state = torch.unsqueeze(torch.FloatTensor(state), 0).to(device)
#         if np.random.randn() >= self.args.epsilon:
#             action_value = self.eval_net.forward(state).cpu()
#             action = torch.max(action_value, 1)[1].data.numpy()
#             action = action[0]
#         else: # random policy
#             action = np.random.randint(0, self.action_dim)
#             action = action
#         return action

#     def store_transition(self, state, action, reward, next_state):
#         transition = np.hstack((state, [action, reward], next_state))
#         index = self.memory_counter % self.capacity
#         self.memory[index, :] = transition
#         self.memory_counter += 1

#     def update(self):
#         if self.learn_step_counter % self.args.max_episode == 0:
#             self.target_net.load_state_dict(self.eval_net.state_dict())
#         self.learn_step_counter+=1

#         sample_index = np.random.choice(self.capacity, self.args.batch_size)
#         batch_memory = self.memory[sample_index, :]
#         batch_state = torch.FloatTensor(batch_memory[:, :self.state_dim]).to(device)
#         batch_action = torch.LongTensor(batch_memory[:, self.state_dim:self.state_dim+1].astype(int)).to(device)
#         batch_reward = torch.FloatTensor(batch_memory[:, self.state_dim+1:self.state_dim+2]).to(device)
#         batch_next_state = torch.FloatTensor(batch_memory[:, -self.state_dim:]).to(device)

#         q_eval = self.eval_net(batch_state).gather(1, batch_action)
#         q_next = self.target_net(batch_next_state).detach()
#         q_target = batch_reward + self.args.gamma * q_next.max(1)[0].view(self.args.batch_size, 1)
#         loss = self.loss_func(q_eval, q_target)

#         self.writer.add_scalar('Loss/loss_func', loss, global_step=self.learn_step_counter)

#         self.optimizer.zero_grad()
#         loss.backward()
#         self.optimizer.step()

#     def save(self, episode=None):
#         if episode is not None:
#             torch.save(self.eval_net.state_dict(), pak_directory + '{}_eval_net.pth'.format(episode))
#         else:
#             torch.save(self.eval_net.state_dict(), pak_directory + 'eval_net.pth')
#         print("====================================")
#         print("Model has been saved...")
#         print("====================================")

#     def load(self):
#         self.eval_net.load_state_dict(torch.load(pak_directory + 'eval_net.pth', weights_only=True))
#         print("====================================")
#         print("model has been loaded...")
#         print("====================================")

#     def game_loop(self, env:RouteEnv, car_index):

#         if self.args.mode == 'test':
#             for i in range(self.args.test_iteration):
#                 total_reward = 0
#                 resolved = False
#                 state = env.reset(car_index)

#                 #for t in range(self.args.max_episode):
#                 while not resolved:
#                     action = self.select_action(state)
#                     next_state, reward, done, resolved, _ = env.step(action)
#                     total_reward += reward
                    
#                     state = next_state

#                     if resolved:
#                         print("car {} find a solution!".format(car_index))
#                         print('reward: \t{}, path is {}, Steill have Energy = {} Time = {}'.format(total_reward, env.path, state[2], state[1]))
#                         total_reward = 0
#                     if done:
#                         state = env.reset(car_index)
#                         total_reward = 0
#                         continue

#         elif self.args.mode == 'train':
#             for i in range(self.args.train_iteration):
#                 total_reward = 0
#                 state = env.reset(car_index)

#                 for t in range(self.args.max_episode):
#                     self.reward_step_counter += 1
#                     action = self.select_action(state)

#                     next_state, reward, done, resolved, _ = env.step(action)
#                     self.store_transition(state, action, reward, next_state)

#                     state = next_state
#                     total_reward += reward

#                     if done:
#                         if resolved:
#                             print("car {} find a solution!".format(car_index))
#                             print('reward: \t{:.4f}, path is {}, Steill have Energy = {} Time = {}'.format(reward, env.path, state[2], state[1]))
#                             print("Iteration: \t{} Total Reward: \t{:.4f}".format(i, total_reward))
#                             self.writer.add_scalar('reward/resolved_reward', reward, global_step=self.reward_step_counter)

#                             if i == (self.args.train_iteration - 1):
#                                 #self.save(i)
#                                 self.save()
#                         break

#                 self.update()
#                 #print("Iteration: \t{} Total Reward: \t{:.4f}".format(i, total_reward))
#                 self.writer.add_scalar('reward/total_reward', total_reward, global_step=self.reward_step_counter)

#         else:
#             raise NameError("mode wrong!!!")
#         env.close()
#         self.writer.close()