import os, sys, random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from collections import namedtuple
from itertools import count
from torch.distributions import Normal, Categorical
from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler
from tensorboardX import SummaryWriter
from route import RouteEnv

script_name = os.path.basename(__file__)
script_name = os.path.splitext(script_name)[0]
log_directory = './Log/' + script_name +'/'
pak_directory = './Pak/' + script_name +'/'
device = 'cuda' if torch.cuda.is_available() else 'cpu'

Transition = namedtuple('Transition', ['state', 'action',  'a_log_prob', 'reward', 'next_state'])

PPO2 = False

class Actor(nn.Module):
    def __init__(self, num_state, max_action):
        if PPO2:
            super(Actor, self).__init__()
            self.fc1 = nn.Linear(num_state, 64)
            self.fc2 = nn.Linear(64,8)
            self.action_head = nn.Linear(8, max_action)
        else:
            super(Actor, self).__init__()
            self.fc1 = nn.Linear(num_state, 100)
            self.action_head = nn.Linear(100, max_action)

    def forward(self, x):
        if PPO2:
            x = F.leaky_relu(self.fc1(x))
            x = F.leaky_relu(self.fc2(x))
        else:
            x = F.relu(self.fc1(x))
        return F.softmax(self.action_head(x), dim=1)

class Critic(nn.Module):
    def __init__(self, num_state):
        if PPO2:
            super(Critic, self).__init__()
            self.fc1 = nn.Linear(num_state, 64)
            self.fc2 = nn.Linear(64, 8)
            self.state_value= nn.Linear(8, 1)
        else:
            super(Critic, self).__init__()
            self.fc1 = nn.Linear(num_state, 100)
            self.state_value = nn.Linear(100, 1)

    def forward(self, x):
        if PPO2:
            x = F.leaky_relu(self.fc1(x))
            x = F.leaky_relu(self.fc2(x))
        else:
            x = F.relu(self.fc1(x))
        return self.state_value(x)

class NetWorkProxy():
    def __init__(self, state_dim, max_action, action_range, args):
        self.args = args
        self.actor_net = Actor(state_dim, max_action).float()
        self.critic_net = Critic(state_dim).float()
        self.buffer = []
        self.max_action = max_action
        self.counter = 0
        self.training_step = 0
        self.reward_step_counter = 0

        if self.args.mode == 'test' or self.args.load:
            self.load()

        self.actor_optimizer = optim.Adam(self.actor_net.parameters(), lr=args.learning_rate)
        self.critic_net_optimizer = optim.Adam(self.critic_net.parameters(), lr=args.critic_rate)

        os.makedirs(log_directory, exist_ok=True)
        os.makedirs(pak_directory, exist_ok=True)
        self.writer = SummaryWriter(log_directory)

    def select_action(self, state):
        state = torch.from_numpy(state).float().unsqueeze(0)
        with torch.no_grad():
            action_prob = self.actor_net(state)
        c = Categorical(action_prob)
        action = c.sample()

        return action.item(), action_prob[:, action].item()

    def store_transition(self, transition):
        self.buffer.append(transition)
        self.counter += 1
        return self.counter % self.args.capacity == 0

    def update(self):
        if self.training_step % 1000 == 0:
            print('train {} times'.format(self.training_step))
        self.training_step += 1

        if PPO2:
            state = torch.tensor(np.array([t.state for t in self.buffer]), dtype=torch.float)
            action = torch.tensor(np.array([t.action for t in self.buffer]), dtype=torch.float).view(-1, 1)
            reward = torch.tensor(np.array([t.reward for t in self.buffer]), dtype=torch.float).view(-1, 1)
            next_state = torch.tensor(np.array([t.next_state for t in self.buffer]), dtype=torch.float)
            old_action_log_prob = torch.tensor(np.array([t.a_log_prob for t in self.buffer]), dtype=torch.float).view(-1, 1)

            reward = (reward - reward.mean())/(reward.std() + 1e-10)
            with torch.no_grad():
                target_v = reward + self.args.gamma * self.critic_net(next_state)

            advantage = (target_v - self.critic_net(state)).detach()
            for _ in range(self.args.ppo_epoch): # iteration ppo_epoch 
                for index in BatchSampler(SubsetRandomSampler(range(len(self.buffer))), self.args.batch_size, True):
                    # epoch iteration, PPO core!!!
                    action_log_prob = self.actor_net(state[index]).gather(1, action[index].long())
                    ratio = torch.exp(action_log_prob - old_action_log_prob[index])
                    
                    L1 = ratio * advantage[index]
                    L2 = torch.clamp(ratio, 1-self.args.clip_param, 1+self.args.clip_param) * advantage[index]
                    action_loss = -torch.min(L1, L2).mean() # MAX->MIN desent
                    self.writer.add_scalar('Loss/actor_loss', action_loss, global_step=self.training_step)
                    self.actor_optimizer.zero_grad()
                    action_loss.backward()
                    nn.utils.clip_grad_norm_(self.actor_net.parameters(), self.args.max_grad_norm)
                    self.actor_optimizer.step()

                    value_loss = F.smooth_l1_loss(self.critic_net(state[index]), target_v[index])
                    self.writer.add_scalar('Loss/critic_loss', value_loss, global_step=self.training_step)
                    self.critic_net_optimizer.zero_grad()
                    value_loss.backward()
                    nn.utils.clip_grad_norm_(self.critic_net.parameters(), self.args.max_grad_norm)
                    self.critic_net_optimizer.step()
                    del self.buffer[:] # clear experience
        else:
            state = torch.tensor(np.array([t.state for t in self.buffer]), dtype=torch.float)
            action = torch.tensor(np.array([t.action for t in self.buffer]), dtype=torch.long).view(-1, 1)
            reward = [t.reward for t in self.buffer]
            #reward = torch.tensor([t.reward for t in self.buffer], dtype=torch.float).view(-1, 1)
            #next_state = torch.tensor([t.next_state for t in self.buffer], dtype=torch.float)
            old_action_log_prob = torch.tensor([t.a_log_prob for t in self.buffer], dtype=torch.float).view(-1, 1)

            R = 0
            Gt = []
            for r in reward[::-1]:
                R = r + self.args.gamma * R
                Gt.insert(0, R)
            Gt = torch.tensor(Gt, dtype=torch.float)

            for i in range(self.args.ppo_epoch):
                for index in BatchSampler(SubsetRandomSampler(range(len(self.buffer))), self.args.batch_size, False):
                    #with torch.no_grad():
                    Gt_index = Gt[index].view(-1, 1)
                    V = self.critic_net(state[index])
                    delta = Gt_index - V
                    advantage = delta.detach()
                    # epoch iteration, PPO core!!!
                    action_prob = self.actor_net(state[index]).gather(1, action[index]) # new policy

                    ratio = (action_prob/old_action_log_prob[index])
                    surr1 = ratio * advantage
                    surr2 = torch.clamp(ratio, 1 - self.args.clip_param, 1 + self.args.clip_param) * advantage

                    # update actor network
                    action_loss = -torch.min(surr1, surr2).mean()  # MAX->MIN desent
                    self.writer.add_scalar('Loss/actor_loss', action_loss, global_step=self.training_step)
                    self.actor_optimizer.zero_grad()
                    action_loss.backward()
                    nn.utils.clip_grad_norm_(self.actor_net.parameters(), self.args.max_grad_norm)
                    self.actor_optimizer.step()

                    #update critic network
                    value_loss = F.mse_loss(Gt_index, V)
                    self.writer.add_scalar('Loss/critic_loss', value_loss, global_step=self.training_step)
                    self.critic_net_optimizer.zero_grad()
                    value_loss.backward()
                    nn.utils.clip_grad_norm_(self.critic_net.parameters(), self.args.max_grad_norm)
                    self.critic_net_optimizer.step()
                    del self.buffer[:] # clear experience

    def save(self, episode=None):
        if episode is not None:
            torch.save(self.actor_net.state_dict(), pak_directory + '{}_actor.pth'.format(episode))
            torch.save(self.critic_net.state_dict(), pak_directory + '{}_critic.pth'.format(episode))
        else:
            torch.save(self.actor_net.state_dict(), pak_directory + 'actor.pth')
            torch.save(self.critic_net.state_dict(), pak_directory + 'critic.pth')
        print("====================================")
        print("Model has been saved...")
        print("====================================")

    def load(self):
        self.actor_net.load_state_dict(torch.load(pak_directory + 'actor.pth', weights_only=True))
        self.critic_net.load_state_dict(torch.load(pak_directory + 'critic.pth',  weights_only=True))
        print("====================================")
        print("model has been loaded...")
        print("====================================")

    def game_loop(self, env:RouteEnv):

        if self.args.mode == 'test':
            for i in range(self.args.test_iteration):
                total_reward = 0
                resolved = False
                state = env.reset()

                #for t in range(self.args.max_episode):
                while not resolved:
                    action, action_prob = self.select_action(state)
                    action = round(action)
                    next_state, reward, done, _ = env.step(action)
                    resolved = env.resolved
                    total_reward += reward
                    
                    state = next_state

                    if resolved:
                        print('Total Reward: \t{:.4f}, path is {}, Steill have Energy = {} Time = {}'.format(total_reward, env.path, state[2], state[1]))
                        #print("Iteration: \t{}, Episode: \t{}, Total Reward: \t{}".format(i, t, total_reward))
                    if done:
                        state = env.reset()
                        continue

        elif self.args.mode == 'train':
            for i in range(self.args.train_iteration):
                total_reward = 0
                self.reward_step_counter += 1
                resolved = False
                state = env.reset()

                #for t in range(self.args.max_episode):
                while not resolved:
                    action, action_prob = self.select_action(state)
                    action = round(action)

                    next_state, reward, done, _ = env.step(action)
                    resolved = env.resolved

                    trans = Transition(state, action, action_prob, reward, next_state)
                    if self.store_transition(trans):
                        self.update()

                    state = next_state
                    total_reward += reward

                    if done:
                        if resolved:
                            print('reward: \t{:.4f}, path is {}, Steill have Energy = {} Time = {}'.format(reward, env.path, state[2], state[1]))
                            print("Iteration: \t{} Total Reward: \t{:.4f}".format(i, total_reward))
                            self.writer.add_scalar('reward/resolved_reward', reward, global_step=self.reward_step_counter)

                            if i == self.args.train_iteration - 1:
                                self.save()
                        break

                self.writer.add_scalar('reward/total_reward', total_reward, global_step=self.reward_step_counter)
        else:
            raise NameError("mode wrong!!!")
        env.close()
        self.writer.close()

def run():
    env = RouteEnv()
    state_dim = env.observation_space.shape[0]
    action_dim = env.action_space.n
    max_action = float(env.grid_size)

    agent = NetWorkProxy(state_dim, action_dim, max_action)

    if agent.args.mode == 'test' or agent.args.load:
        agent.load()

    for id in range(env.car_num):
        agent.game_loop(env, id)

if __name__ == '__main__':
    run()

