import torch
import torch.nn as nn
import torch.optim as optim
import gym
from env_masterworkser import LHPCSEnv
import matplotlib.pyplot as plt
import numpy as np
# Assistance: ChatGPT

# Hyper Parameter
# env = gym.make('CartPole-v1')
env = LHPCSEnv(20)
State_Space = env.observation_space.shape[0] * env.observation_space.shape[1] 
# State_Space = env.observation_space.shape[0]
hidden_size = 32
Action_Space = env.action_space.n
lr = 0.001 #learning rate
gamma = 0.9 #discount 0.99 or 0.95
K = 10 #用来控制每个收集到的经验数据被用于多少次训练。
eps = 0.2 #epsilon
TRAIN_CNT = 1000
# EPSILON = 0.9

# ==============record data ========================
record_loss = []
record_rewards = []
record_time = []
record_energy = []
# Actor Critic Net
class ActorCritic(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(ActorCritic, self).__init__()

        self.actor = nn.Sequential(
            nn.Linear(input_size, hidden_size),
            nn.ReLU(),
            nn.Linear(hidden_size, hidden_size),
            nn.ReLU(),
            nn.Linear(hidden_size, output_size),
            nn.Softmax(dim=-1)
        )

        self.critic = nn.Sequential(
            nn.Linear(input_size, hidden_size),
            nn.ReLU(),
            nn.Linear(hidden_size, hidden_size),
            nn.ReLU(),
            nn.Linear(hidden_size, 1)
        )
        self.flag = -20

    def forward(self, x):
        # x = x.flatten()
        # print(x)
        policy = self.actor(x)
        # print(policy)
        # self.flag += 1
        # if self.flag > 0:
            # exit()
        value = self.critic(x)
        return policy, value

#PPO algorithm
class PPO:
    def __init__(self, input_size, hidden_size, output_size, lr, gamma, K, eps):
        self.net = ActorCritic(input_size, hidden_size, output_size)
        self.optimizer = optim.Adam(self.net.parameters(), lr=lr) 
        self.gamma = gamma  
        self.K = K 
        self.eps = eps 

    def get_action(self, state):
        # get_action: state -> action
        state = torch.FloatTensor(state)
        policy, _ = self.net(state)
        action_probs = torch.distributions.Categorical(policy) #create Categorical and sample
        action = action_probs.sample()
        action = action.item()
        # if np.random.uniform() > EPSILON:  
            # action = np.random.randint(0, Action_Space)
        return action

    def get_value(self, state):
        state = torch.FloatTensor(state)
        _, value = self.net(state)
        return value.item()

    def train(self, buffer):
        states = torch.FloatTensor(buffer['states'])
        actions = torch.LongTensor(buffer['actions'])
        old_value = torch.FloatTensor(buffer['value'])
        returns = torch.FloatTensor(buffer['returns'])
        advantages = torch.FloatTensor(buffer['advantages'])
        old_policy = torch.stack(buffer['policy'])

        for _ in range(self.K):
            policy, value = self.net(states)

            # Compute ratio r(theta) = pi_theta(a|s) / pi(a|s)
            dist = torch.distributions.Categorical(policy)
            new_log_probs = dist.log_prob(actions)
            old_log_probs = torch.gather(old_policy, 1, actions.unsqueeze(1)).squeeze(1).log()
            ratio = (new_log_probs - old_log_probs).exp()

            # Compute surrogate loss
            clipped_ratio = torch.clamp(ratio, 1 - self.eps, 1 + self.eps)
            surrogate1 = ratio * advantages
            surrogate2 = clipped_ratio * advantages
            surrogate_loss = -torch.min(surrogate1, surrogate2).mean()

            # Compute value loss
            value_loss = ((returns - value)**2).mean()

            # Compute entropy bonus
            entropy = dist.entropy().mean()
            loss = surrogate_loss + 0.5*value_loss - 0.01*entropy
            if (len(record_loss) < 2000):
                record_loss.append(loss.item())
            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()

ppo = PPO(State_Space, hidden_size, Action_Space, lr, gamma, K, eps)

for i in range(TRAIN_CNT):
    state = env.reset()
    #flatten
    state = [item for sublist in state for item in sublist]
    done = False
    score = 0
    buffer = {'states': [], 'actions': [], 'policy': [], 'value': [], 'rewards': []}
    ep_time = 0
    ep_energy = 0
    while not done:
        action = ppo.get_action(state)
        next_state, reward, done, _, time, energy = env.step(action)
        buffer['states'].append(state)
        buffer['actions'].append(action)
        buffer['policy'].append(ppo.net.actor(torch.FloatTensor(state)).detach())
        buffer['value'].append(ppo.get_value(state))
        buffer['rewards'].append(reward)
        state = next_state
        #flatten
        state = [item for sublist in state for item in sublist]
        score += reward
        ep_time += time
        ep_energy += energy
    record_rewards.append(score)
    record_time.append(ep_time)
    record_energy.append(ep_energy)
    if i % 100 == 0:
        print('Episode: {}, Score: {}'.format(i, score), end=', ')
        print(env.actions)
    # Calculate advantages and returns
    returns = []
    advs = []
    discounted_reward = 0
    #get returns
    for r in buffer['rewards'][::-1]:
        #倒序排放
        discounted_reward = r + ppo.gamma * discounted_reward
        returns.insert(0, discounted_reward)
    returns = torch.FloatTensor(returns)
    # get advantages
    for i in range(len(buffer['value'])):
        advantage = returns[i] - buffer['value'][i]
        advs.append(advantage)
    advs = torch.FloatTensor(advs)
    # Normalize advantages
    advs = (advs - advs.mean()) / (advs.std() + 1e-8)
    # Add advantages and returns to buffer
    buffer['advantages'] = advs.detach().numpy()
    buffer['returns'] = returns.detach().numpy()
    # print(returns)
    # print(advantage)
    # exit()
    # Train the network
    ppo.train(buffer)

#=========== draw =================
# plt.plot(counter, rewards)
plt.plot(record_rewards)
# plt.plot(record_times)
plt.xlabel('counter')
# plt.ylabel('loss')
plt.ylabel('reward')
plt.savefig('./figure/reward.png')
plt.clf()

plt.plot(record_time)
plt.xlabel('counter')
plt.ylabel('time (Engine clock)')
plt.savefig('./figure/time.png')
plt.clf()

plt.plot(record_energy)
plt.xlabel('counter')
plt.ylabel('energy (J)')
plt.savefig('./figure/energy.png')
plt.clf()

plt.plot(record_loss)
plt.xlabel('counter')
plt.ylabel('loss')
plt.savefig('./figure/loss.png')


def dump_data(save_path, data):
    fp = open(save_path, 'w')
    print(data, file=fp)
    fp.close()

# dump_data('./data/reward', record_rewards)
# dump_data('./data/energy', record_energy)
# dump_data('./data/time', record_time)
# dump_data('./data/loss', record_loss)
# dump_data('./data/tasks', env.tasks)
print("============= end ================")
# fp = open('./Agent_log.txt', 'w')
# print(best_ep_r, file=fp)
# print(best_actions, file=fp)
# print(best_tasks, file=fp)
# fp.close()