import numpy as np
import torch
import gym
from gym import spaces
import argparse
import os

import utils
import TD3
import OurDDPG
import DDPG
import numpy as np

import gym
import numpy as np
from collections import deque

class LoadBalancingEnv(gym.Env):


    def __init__(self):
        super(LoadBalancingEnv, self).__init__()
        # 需要获取的状态——
        num_links = 5
        max_queue_size = 10
        task_arrival_rate = 0.5
        max_tasks_per_arrival = 1
        self.node_num = 20

        # 环境参数——
        self.num_links = num_links  # 网络链路数量
        self.max_queue_size = max_queue_size  # 队列最大长度
        self.task_arrival_rate = task_arrival_rate  # 任务到达率
        self.max_tasks_per_arrival = max_tasks_per_arrival  # 每次到达的最大任务数
        
        # 状态组成部分
        self.forwarding_queues = [deque(maxlen=max_queue_size) for _ in range(num_links)]  # 转发队列，使用deque限制大小
        self.computation_queue = deque(maxlen=max_queue_size)  # 计算队列，这里简化为一个全局队列
        self.link_traffic = np.zeros(num_links, dtype=np.float32)  # 网络链路流量

        # self.action_space = gym.spaces.Discrete(num_links + 1)  # +1可能表示一个“不操作”或“等待”的动作
        self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(2,), dtype=np.float32)#范围是-1到1，第一维表示node，第二维表示path
        
        # 观察空间：状态向量
        self.observation_space = gym.spaces.Dict({
            'forwarding_queues': gym.spaces.Box(low=0, high=max_queue_size, shape=(num_links,), dtype=np.int32),  # 队列长度
            'computation_queue_length': gym.spaces.Box(low=0, high=max_queue_size, shape=(), dtype=np.int32),
            'link_traffic': gym.spaces.Box(low=0.0, high=float('inf'), shape=(num_links,), dtype=np.float32),
            'load_balancing_diff_avg': gym.spaces.Box(low=-float('inf'), high=float('inf'), shape=(), dtype=np.float32),
            'new_task': gym.spaces.Dict({  # 新任务属性（每次只有一个新任务到达作为示例）
                'is_lhdr': gym.spaces.Discrete(2),  # 0或1
                'max_tolerable_delay': gym.spaces.Box(low=0.0, high=float('inf'), shape=(), dtype=np.float32),
                'computation_load': gym.spaces.Box(low=0.0, high=float('inf'), shape=(), dtype=np.float32),
                'data_volume': gym.spaces.Box(low=0.0, high=float('inf'), shape=(), dtype=np.float32),
            })
        })

    def get_state_size(self):
        self.observation_length = self.num_links*2+2+4
        return self.observation_length


    def new_task(self):
        # 生成一个新任务作为示例,可以是t时刻的所有task
        is_ihdr = 1 if np.random.rand() < 0.3 else 0
        new_task = {
            'is_lhdr': is_ihdr,
            'max_tolerable_delay': np.random.uniform(0.1, 10.0),
            'computation_load': np.random.uniform(2.0, 6.0),
            'data_volume': np.random.uniform(5.0, 10.0),
        }
        return new_task

    def get_state_flatten(self,state):
        state_values = list(state.values())
        # flattened_state = np.concatenate(
        #     [value.flatten() if isinstance(value, np.ndarray) else np.array([value]) for value in state_values])
        flattened_state = []
        for key, value in state.items():
            if isinstance(value, np.ndarray):
                flattened_state.extend(value.flatten())  # 如果是 numpy 数组，先扁平化
            elif isinstance(value, dict):
                flattened_state.extend(value.values())  # 如果是字典，提取值并扁平化
            else:
                flattened_state.append(value)  # 否则直接添加值

        # 将扁平化后的列表转为 numpy 数组
        flattened_state_array = np.array(flattened_state)
        return flattened_state_array


    def reset(self):
        # 重置环境状态——
        self.forwarding_queues = [deque(maxlen=self.max_queue_size) for _ in range(self.num_links)]
        self.computation_queue = deque(maxlen=self.max_queue_size)
        self.link_traffic = np.zeros(self.num_links, dtype=np.float32)
        self.L_t = 0
        self.J_last = 0

        # 构建观察状态
        state = {
            'forwarding_queues': np.array([len(q) for q in self.forwarding_queues], dtype=np.int32),
            'computation_queue_length': len(self.computation_queue),
            'link_traffic': self.link_traffic,
            'load_balancing_diff_avg': self.J_last,
            'new_task': self.new_task(),
        }
        state = self.get_state_flatten(state)
        return state
    
    def step(self, action):
        # 执行动作并返回下一步的状态、奖励、是否结束和额外信息（此处省略具体实现）
        state = self._get_next_state(action)  # 假设有一个方法来根据动作获取下一个状态
        reward = self._compute_reward(action, state)  # 假设有一个方法来计算奖励
        done = False  # 假设环境没有结束
        info = {}  # 额外的信息，可以为空
        return state, reward, done, info
    
    def _render(self, mode='human'):
        # 渲染环境（此处省略具体实现）
        # ...
        pass

    # 根据action获得下一个状态
    def _get_next_state(self, action):
        # 计算action
        # 只能一个一个任务的决策，除非每个时间间隙到达的任务一样多
        action_node = ( (self.node_num-1)*action[0]+self.node_num+1 ) /2
        action_path = ( (self.num_links-1)*action[1]+self.num_links+1 )/2

        # 应用动作后获得以下所有的内容——
        self.forwarding_queues = [deque(maxlen=self.max_queue_size) for _ in range(self.num_links)]
        self.computation_queue = deque(maxlen=self.max_queue_size)
        self.link_traffic = np.zeros(self.num_links, dtype=np.float32)

        # 计算负载均衡值所需的信息
        self.in_state = {
            'data': [100 for i in range(self.node_num)],  # 每个节点的任务队列的数据和
            'computing': [100 for i in range(self.node_num)]  # 每个节点的计算队列的数据和
        }
        self.fn = [np.random.uniform(50, 100) for i in range(self.node_num)]  # 每个节点的缓存大小
        self.bn = [np.random.uniform(300, 500) for i in range(self.node_num)] # 每个节点的计算能力
        self.ul = [1 for i in range(self.num_links)]#当前的链路使用量
        self.wl = [10 for i in range(self.num_links)] #链路容量
        # 所有需要获取的状态——


        μ1 = 0.5 #权重 没有具体定义
        μ2 = 0.5
        β1 = 0.5
        β2 = 0.5
        Un = []
        Ul = []
        for i in range(self.node_num):
            Un.append( μ1 * self.in_state['data'][i] / self.bn[i] + μ2 * self.in_state['computing'][i] / self.fn[i] )
        Ln = np.std(Un, ddof=1)
        for i in range(self.num_links):
            Ul.append( self.ul[i]/self.wl[i] )
        Ll = np.std(Ul,ddof=1)
        # self.L_t_last = self.L_t
        self.L_t = Ln*β1+Ll*β2

        # 计算累计负载均衡差分序列
        L_th = 0.2 #参数表定义的
        self.J_t = self.L_t-L_th+self.J_last

        state = {
            'forwarding_queues': np.array([len(q) for q in self.forwarding_queues], dtype=np.int32),
            'computation_queue_length': len(self.computation_queue),
            'link_traffic': self.link_traffic,
            'load_balancing_diff_avg': self.J_last,
            'new_task': self.new_task(),
        }
        return self.get_state_flatten(state) # 根据当前动作计算下一个状态
        
    
    def _compute_reward(self, action, state):
        z_t = (self.J_t**2)/2-(self.J_last**2)/2
        self.J_last = self.J_t

        # ——动作已完成，所有需要获取的状态——

        #时隙内所有完成（成功）任务的平均延迟，忽略下行延迟
        # 一个任务的延迟=计算被抢占等待延迟+虚拟机处理一个任务的队列延迟和计算延迟+传输延迟+传输等待延迟+接入延迟
        # 可计算为——上下行延迟+计算延迟
        all_mean_delay = 1
        O_dis=1 #由于网络负载超容量不足、计算队列满、被抢占而丢弃的任务数
        O_ove =1#超时的任务数
        # ——————

        # 根据动作和状态计算奖励
        gama1 = 0.5
        gama2 = 0.5
        gama3 = 0.5
        reward = -gama1*all_mean_delay-gama2*(O_dis+O_ove)-gama3*z_t
        print(reward)
        return reward


    

# Runs policy for X episodes and returns average reward
# A fixed seed is used for the eval environment
def eval_policy(policy, env_name, seed, eval_episodes=10):
    # eval_env = gym.make(env_name)
    eval_env = LoadBalancingEnv()
    eval_env.seed(seed + 100)

    avg_reward = 0.
    for _ in range(eval_episodes):
        state, done = eval_env.reset(), False
        while not done:
            action = policy.select_action(np.array(state))
            state, reward, done, _ = eval_env.step(action)
            avg_reward += reward

    avg_reward /= eval_episodes

    print("---------------------------------------")
    print(f"Evaluation over {eval_episodes} episodes: {avg_reward:.3f}")
    print("---------------------------------------")
    return avg_reward


if __name__ == "__main__":
    
    parser = argparse.ArgumentParser()
    parser.add_argument("--policy", default="TD3")                  # Policy name (TD3, DDPG or OurDDPG)
    parser.add_argument("--env", default="HalfCheetah-v2")          # OpenAI gym environment name
    parser.add_argument("--seed", default=0, type=int)              # Sets Gym, PyTorch and Numpy seeds
    parser.add_argument("--start_timesteps", default=25e3, type=int)# Time steps initial random policy is used
    parser.add_argument("--eval_freq", default=5e3, type=int)       # How often (time steps) we evaluate
    parser.add_argument("--max_timesteps", default=1e6, type=int)   # Max time steps to run environment
    parser.add_argument("--expl_noise", default=0.1, type=float)    # Std of Gaussian exploration noise
    parser.add_argument("--batch_size", default=256, type=int)      # Batch size for both actor and critic
    parser.add_argument("--discount", default=0.99, type=float)     # Discount factor
    parser.add_argument("--tau", default=0.005, type=float)         # Target network update rate
    parser.add_argument("--policy_noise", default=0.2)              # Noise added to target policy during critic update
    parser.add_argument("--noise_clip", default=0.5)                # Range to clip target policy noise
    parser.add_argument("--policy_freq", default=2, type=int)       # Frequency of delayed policy updates
    parser.add_argument("--save_model", action="store_true")        # Save model and optimizer parameters
    parser.add_argument("--load_model", default="")                 # Model load file name, "" doesn't load, "default" uses file_name
    args = parser.parse_args()

    file_name = f"{args.policy}_{args.env}_{args.seed}"
    print("---------------------------------------")
    print(f"Policy: {args.policy}, Env: {args.env}, Seed: {args.seed}")
    print("---------------------------------------")

    if not os.path.exists("./results"):
        os.makedirs("./results")

    if args.save_model and not os.path.exists("./models"):
        os.makedirs("./models")

    # env = gym.make(args.env)

    # Set seeds
    # env.seed(args.seed)
    # env.action_space.seed(args.seed)
    torch.manual_seed(args.seed)
    np.random.seed(args.seed)
    env = LoadBalancingEnv()
    
    # state_dim = env.observation_space.shape[0]
    state_dim = env.get_state_size()
    action_dim = env.action_space.shape[0]
    max_action = float(env.action_space.high[0])

    kwargs = {
        "state_dim": state_dim,
        "action_dim": action_dim,
        "max_action": max_action,
        "discount": args.discount,
        "tau": args.tau,
    }

    # Initialize policy
    if args.policy == "TD3":
        # Target policy smoothing is scaled wrt the action scale
        kwargs["policy_noise"] = args.policy_noise * max_action
        kwargs["noise_clip"] = args.noise_clip * max_action
        kwargs["policy_freq"] = args.policy_freq
        policy = TD3.TD3(**kwargs)
    elif args.policy == "OurDDPG":
        policy = OurDDPG.DDPG(**kwargs)
    elif args.policy == "DDPG":
        policy = DDPG.DDPG(**kwargs)

    if args.load_model != "":
        policy_file = file_name if args.load_model == "default" else args.load_model
        policy.load(f"./models/{policy_file}")

    replay_buffer = utils.ReplayBuffer(state_dim, action_dim)
    
    # Evaluate untrained policy
    evaluations = [eval_policy(policy, args.env, args.seed)]

    state, done = env.reset(), False
    episode_reward = 0
    episode_timesteps = 0
    episode_num = 0

    for t in range(int(args.max_timesteps)):
        
        episode_timesteps += 1

        # Select action randomly or according to policy
        if t < args.start_timesteps:
            action = env.action_space.sample()
        else:
            action = (
                policy.select_action(np.array(state))
                + np.random.normal(0, max_action * args.expl_noise, size=action_dim)
            ).clip(-max_action, max_action)

        # Perform action
        next_state, reward, done, _ = env.step(action) 
        done_bool = float(done) if episode_timesteps < env._max_episode_steps else 0

        # Store data in replay buffer
        replay_buffer.add(state, action, next_state, reward, done_bool)

        state = next_state
        episode_reward += reward

        # Train agent after collecting sufficient data
        if t >= args.start_timesteps:
            policy.train(replay_buffer, args.batch_size)

        if done: 
            # +1 to account for 0 indexing. +0 on ep_timesteps since it will increment +1 even if done=True
            print(f"Total T: {t+1} Episode Num: {episode_num+1} Episode T: {episode_timesteps} Reward: {episode_reward:.3f}")
            # Reset environment
            state, done = env.reset(), False
            episode_reward = 0
            episode_timesteps = 0
            episode_num += 1 

        # Evaluate episode
        if (t + 1) % args.eval_freq == 0:
            evaluations.append(eval_policy(policy, args.env, args.seed))
            np.save(f"./results/{file_name}", evaluations)
            if args.save_model: policy.save(f"./models/{file_name}")
