import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np

# Hyperparameters for learning rate, training epochs, discount factor, etc.
LR_v = 2e-5              # Learning rate for value network
LR_pi = 2e-5             # Learning rate for policy network
K_epoch = 8              # Number of epochs to train the agent
GAMMA = 0.99             # Discount factor for future rewards
LAMBDA = 0.95            # Lambda for GAE (Generalized Advantage Estimation)
CLIP = 0.2               # Clipping parameter for PPO

# Policy Network for choosing actions
class Pi_net(nn.Module):
    def __init__(self):
        super(Pi_net, self).__init__()
        # Define the network layers
        self.net = nn.Sequential(
            nn.Linear(3, 64),
            nn.ReLU(),
            nn.Linear(64, 128),
            nn.ReLU(),
            nn.Linear(128, 256),
            nn.ReLU(),
        )
        # Output layers for mean and standard deviation of actions
        self.mu = nn.Linear(256, 1)       # Mean of the action distribution
        self.sigma = nn.Linear(256, 1)    # Standard deviation of the action distribution
        # Optimizer for the policy network
        self.optim = torch.optim.Adam(self.parameters(), lr=LR_pi)
 
    def forward(self, x):
        x = self.net(x)
        mu = torch.tanh(self.mu(x))               # Apply tanh activation to keep actions bounded
        sigma = F.softplus(self.sigma(x)) + 0.001 # Ensure sigma is positive and add a small value for stability
        return mu, sigma
    

# Value Network for estimating state values
class V_net(nn.Module):
    def __init__(self):
        super(V_net, self).__init__()
        # Define the network layers
        self.net = nn.Sequential(
            nn.Linear(3, 64),
            nn.ReLU(),
            nn.Linear(64, 128),
            nn.ReLU(),
            nn.Linear(128, 256),
            nn.ReLU(),
            nn.Linear(256, 1),
        )
        # Optimizer for the value network
        self.optim = torch.optim.Adam(self.parameters(), lr=LR_v)
 
    def forward(self, x):
        x = self.net(x)     # Forward pass to get the value of a state
        return x
   
    
class Agent(object):
    def __init__(self):
        # Initialize current and old policy/value networks
        self.v = V_net()                    # Current value network
        self.pi = Pi_net()                  # Current policy network
        self.old_pi = Pi_net()              # Old policy network for calculating probability ratios
        self.old_v = V_net()                # Old value network for calculating TD targets

        self.load()                         # Load previously saved model weights if available
        self.data = []                      # Experience buffer to store transitions
        self.step = 0                       # Step counter
 
    # Selects an action based on the current state
    def choose_action(self, s):
        with torch.no_grad():
            mu, sigma = self.old_pi(s)      # Get action mean and std from old policy
            dis = torch.distributions.normal.Normal(mu, sigma)  # Construct a normal distribution
            a = dis.sample()                # Sample an action from the distribution
        return a.item()                     # Return the action as a scalar
 
    # Adds a new transition to the experience buffer
    def push_data(self, transitions):
        self.data.append(transitions)
 
    # Sample the experience buffer and prepare data for training
    def sample(self):
        l_s, l_a, l_r, l_s_, l_done = [], [], [], [], []  # Lists to collect batch data
        for item in self.data:
            s, a, r, s_, done = item
            l_s.append(torch.tensor([s], dtype=torch.float))
            l_a.append(torch.tensor([[a]], dtype=torch.float))
            l_r.append(torch.tensor([[r]], dtype=torch.float))
            l_s_.append(torch.tensor([s_], dtype=torch.float))
            l_done.append(torch.tensor([[done]], dtype=torch.float))
        # Concatenate lists to form tensors for batch processing
        s = torch.cat(l_s, dim=0)
        a = torch.cat(l_a, dim=0)
        r = torch.cat(l_r, dim=0)
        s_ = torch.cat(l_s_, dim=0)
        done = torch.cat(l_done, dim=0)
        self.data = []                     # Clear the buffer after sampling
        return s, a, r, s_, done
 
    # Update policy and value networks
    def updata(self):
        self.step += 1
        s, a, r, s_, done = self.sample()   # Retrieve the sample batch
        for _ in range(K_epoch):
            with torch.no_grad():
                ''' TD Target Calculation '''
                td_target = r + GAMMA * self.old_v(s_) * (1 - done)
                
                ''' Old Policy Calculation for Loss '''
                mu, sigma = self.old_pi(s)
                old_dis = torch.distributions.normal.Normal(mu, sigma)  # Build distribution from old policy
                log_prob_old = old_dis.log_prob(a)                      # Log-probability of action under old policy
                
                # TD Error for calculating advantage
                td_error = r + GAMMA * self.v(s_) * (1 - done) - self.v(s)
                td_error = td_error.detach().numpy()
                A = []
                adv = 0.0
                # Generalized Advantage Estimation (GAE)
                for td in td_error[::-1]:              # Compute advantage in reverse order
                    adv = adv * GAMMA * LAMBDA + td[0]
                    A.append(adv)
                A.reverse()
                A = torch.tensor(A, dtype=torch.float).reshape(-1, 1)
 
            ''' Policy Loss Calculation '''
            mu, sigma = self.pi(s)
            new_dis = torch.distributions.normal.Normal(mu, sigma)      # New policy distribution
            log_prob_new = new_dis.log_prob(a)                          # Log-prob of action under new policy
            ratio = torch.exp(log_prob_new - log_prob_old)              # Probability ratio for PPO
            L1 = ratio * A
            L2 = torch.clamp(ratio, 1 - CLIP, 1 + CLIP) * A             # Clipped ratio for PPO
            loss_pi = -torch.min(L1, L2).mean()                         # PPO loss
            self.pi.optim.zero_grad()
            loss_pi.backward()                                          # Backpropagation
            self.pi.optim.step()                                        # Policy optimization step
 
            ''' Value Loss Calculation '''
            loss_v = F.mse_loss(td_target.detach(), self.v(s))          # Mean squared error for value loss
            self.v.optim.zero_grad()
            loss_v.backward()                                           # Backpropagation
            self.v.optim.step()                                         # Value optimization step
        
        # Update old policy and value networks with current parameters
        self.old_pi.load_state_dict(self.pi.state_dict())
        self.old_v.load_state_dict(self.v.state_dict())
 
    # Save model weights for policy and value networks
    def save(self):
        torch.save(self.pi.state_dict(), 'pi.pth')
        torch.save(self.v.state_dict(), 'v.pth')
        print('...save model...')
 
    # Load model weights if available
    def load(self):
        try:
            self.pi.load_state_dict(torch.load('pi.pth'))
            self.v.load_state_dict(torch.load('v.pth'))
            print('...load...')
        except:
            pass
