import gym
import torch
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from env import Env


class PolicyNet_node(torch.nn.Module):
    def __init__(self, state_dim, hidden_dim, node_num):
        super(PolicyNet_node, self).__init__()
        self.fc1 = torch.nn.Linear(state_dim, hidden_dim)
        self.fc2 = torch.nn.Linear(hidden_dim, node_num)

    def forward(self, x):
        x = F.relu(self.fc1(x))
        return F.softmax(self.fc2(x), dim=1)


class PolicyNet_vehicle(torch.nn.Module):
    def __init__(self, state_dim, hidden_dim, vehicle_num):
        super(PolicyNet_vehicle, self).__init__()
        self.fc1 = torch.nn.Linear(state_dim, hidden_dim)
        self.fc2 = torch.nn.Linear(hidden_dim, vehicle_num)

    def forward(self, x):
        x = F.relu(self.fc1(x))
        return F.softmax(self.fc2(x), dim=1)


class REINFORCE:
    def __init__(self, state_dim, hidden_dim, node_num, vehicle_num, learning_rate, gamma,
                 device):
        self.policy_net_node = PolicyNet_node(state_dim, hidden_dim,
                                              node_num).to(device)
        self.policy_net_vehicle = PolicyNet_vehicle(state_dim, hidden_dim,
                                                    vehicle_num).to(device)
        self.optimizer1 = torch.optim.Adam(self.policy_net_node.parameters(),
                                           lr=learning_rate)  # 使用Adam优化器
        self.optimizer2 = torch.optim.Adam(self.policy_net_vehicle.parameters(),
                                           lr=learning_rate)
        self.gamma = gamma  # 折扣因子
        self.device = device

    def get_node(self, state):  # 根据动作概率分布随机采样
        state = state.to(self.device)
        probs = self.policy_net_node(state)
        probs = torch.mul(probs, torch.logical_not(state[:, 0:21]))
        node_dist = torch.distributions.Categorical(probs)
        node = node_dist.sample()
        node = node.unsqueeze(1)
        return node

    def get_vehicle(self, state):
        state = state.to(self.device)
        probs = self.policy_net_vehicle(state)
        vehicle_dist = torch.distributions.Categorical(probs)
        vehicle = vehicle_dist.sample()
        vehicle = vehicle.unsqueeze(1)
        return vehicle

    def update(self, transition_dict):
        reward_list = transition_dict['rewards']
        state_list = transition_dict['states']
        node_list = transition_dict['node']
        vehicle_list = transition_dict['vehicle']
        G = 0
        self.optimizer1.zero_grad()
        self.optimizer2.zero_grad()
        for i in reversed(range(len(reward_list))):  # 从最后一步算起
            reward = reward_list[i]
            state = state_list[i].to(self.device)
            node = node_list[i].view(-1, 1).to(self.device)
            print(state.shape)
            print(node.shape)
            log_prob = torch.log(self.policy_net_node
                                 (state).gather(1, node))
            G = self.gamma * G + reward
            loss = log_prob * G  # 每一步的损失函数
            loss.backward()  # 反向传播计算梯度
        self.optimizer1.step()

        for i in reversed(range(len(reward_list))):  # 从最后一步算起
            reward = reward_list[i]
            state = state_list[i].to(self.device)
            vehicle = vehicle_list[i].view(-1, 1).to(self.device)
            log_prob = torch.log(self.policy_net_vehicle
                                 (state).gather(1, vehicle))
            G = self.gamma * G + reward
            loss = log_prob * G  # 每一步的损失函数
            loss.backward()  # 反向传播计算梯度
        self.optimizer2.step()


learning_rate = 5e-5
num_episodes = 1000
hidden_dim = 128
gamma = 0.98
device = torch.device("cuda") if torch.cuda.is_available() else torch.device(
    "cpu")


env = Env()
# env.seed(0)
# torch.manual_seed(0)
state_dim = 3*21
node_num = 21
vehicle_num = 3
agent = REINFORCE(state_dim, hidden_dim, node_num, vehicle_num, learning_rate, gamma,
                  device)

return_list = []
terminal = torch.ones((1,63), dtype=torch.float)
for i_episode in range(int(num_episodes)):
    episode_return = 0
    transition_dict = {
        'states': [],
        'node': [],
        'next_states': [],
        'rewards': [],
        'vehicle': []
    }

    _, state = env.reset()
    state=state.view(1,-1)
    done = False
    while not done:
        vehicle = agent.get_vehicle(state)
        node = agent.get_node(state)
        _, next_state, reward, done = env.step(vehicle, node)
            # print(11)
            # continue
        reward = reward.item()
        next_state = next_state.view(1,-1)
        transition_dict['states'].append(state)
        transition_dict['node'].append(node)
        transition_dict['next_states'].append(next_state)
        transition_dict['rewards'].append(reward)
        transition_dict['vehicle'].append(vehicle)
        state = next_state
        episode_return += reward
        # print(next_state)
    return_list.append(episode_return)

    agent.update(transition_dict)

    if (i_episode + 1) % 10 == 0:
        print("episodes: %d, reward: %f"%(i_episode + 1, np.mean(return_list[-10:])))