# -*- coding: UTF-8 -*-

# from numpy.lib.function_base import copy
import env5 as environment
import config
import copy
import sys,argparse,random
from env5 import trajectory
from ppo_shuyuan_dcn_pretrain import PPO_discrete

import torch
import numpy as np
import sys
import argparse
import pandas as pd
import time
import csv
import os
import pickle
from torch.utils.tensorboard import SummaryWriter
from object import delay
from object.user import generate_total_trace

# from sko.GA import GA
# from sko.DE import DE
# from sko.PSO import PSO
# from sko.SA import SA
# # from sko.ACA import ACA
# from sko.AFSA import AFSA


REJ_CONT_LIMIT = -2
REJ_STORE_LIMIT = -3

MAX_SUM_SIZE = 1424
MIN_SUM_SIZE = 10


# seed = args.seed
seed_pool = list(range(10, 1000))

## GLOBAL VARIABLES ##
global_requested_image_id = 0
global_task_cpu = 0
data = {}

gpus = [0]
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

class Seeder():
    def __init__(self):
        self.reset()

    def reset(self):
        self.current = iter(seed_pool)
        # self.current = 10
    def set_seed(self):
        try:
            return np.random.seed(next(self.current))
        except:
            self.reset()
            return np.random.seed(next(self.current))

class Baseline():
    def __init__(self, env, args,args1 = None,dep_th=0.2):
        # np.random.seed(10)

        self.dep_th = dep_th
        self.env = env
        self.args = args
        # self.obs = self.env.reset()
        self.task_list = self.env.allexist_task_list
        # self.first_task = self.task_list[0]
        self.visit_sequence = np.random.permutation(config.EDGE_NODE_NUM)
        self.seeder = Seeder()
        self.pre_training_list = []
        
        if self.args.store:
            self.agent = PPO_discrete(args1)
        # self.visit_sequence = list(range(config.EDGE_NODE_NUM))
    #根据共享layer的多少选节点
    def dep_agent(self, task, node_list, lb_ratio=None):

        # task: obs['task'] = [self.task_current.requested_image_id, self.task_current.cpu, self.task_current.mem, self.task_current.start_time]
        # node_list: obs['node_list'] = self.node_list
        max_score, selected = -1, -1
        requested_image_id = task.requested_image_id
        # self.seeder.set_seed()
        self.visit_sequence = np.random.permutation(len(node_list))
        task_cpu = task.task_cpu
        task_mem = task.task_mem
        for i in self.visit_sequence:
            node = node_list[i]
            if not self.get_node_conta_free(node, task_cpu, task_mem):
                if selected < 0:
                    selected = REJ_CONT_LIMIT
                continue

            total_size = sum([int(self.env.Layer[self.env.layer_dict[x][-2]].size) for x in self.env.Image[requested_image_id].layer_list])

            if total_size > self.get_node_free_disk(node):
                if selected < 0:
                    selected = REJ_STORE_LIMIT
                continue

            # score 表示task 和 node 有多少相同的layer
            score = self.dep_score(requested_image_id, node.id)

            if lb_ratio is not None:
                score_locality = self.scaled_score_locality(score)
                score_lb = (config.node_max_container_number - node.container_number) / config.node_max_container_number * 10
                score = lb_ratio * score_lb + (1 - lb_ratio) * score_locality
            
            if score > max_score:
                max_score = score
                selected = node.id
                
        if selected < 0:
            selected = config.EDGE_NODE_NUM # cloud
        
        return selected

    #和上面一个完全一样
    def dep_soft_agent(self, task, node_list, lb_ratio=None):
        max_score, selected = -1, -1
        requested_image_id = task.requested_image_id
        self.visit_sequence = np.random.permutation(len(node_list))
        task_cpu = task.task_cpu
        task_mem = task.task_mem

        for i in self.visit_sequence:
            node = node_list[i]

            if not self.get_node_conta_free(node, task_cpu, task_mem):
                if selected < 0:
                    selected = REJ_CONT_LIMIT
                continue

            total_size = sum([int(self.env.Layer[self.env.layer_dict[x][-2]].size) for x in
                              self.env.Image[requested_image_id].layer_list])

            if total_size > self.get_node_free_disk(node):
                if selected < 0:
                    selected = REJ_STORE_LIMIT
                continue

            score = self.dep_score(requested_image_id, node.id)

            if score >= self.dep_th * total_size:
                selected = node.id
                break

            if score > max_score:
                max_score = score
                selected = node.id
            
        if selected < 0:
            selected = config.EDGE_NODE_NUM # cloud

        return selected

    #根据image的数量和大小选择节点
    def kube_agent(self, task, node_list, lb_ratio=None):
        max_score, selected = -1, -1
        requested_image_id = task.requested_image_id
        self.visit_sequence = np.random.permutation(len(node_list))
        task_cpu = task.task_cpu
        task_mem = task.task_mem

        for i in self.visit_sequence:
            node = node_list[i]
            if not self.get_node_conta_free(node,task_cpu,task_mem):
                if selected < 0:
                    selected = REJ_CONT_LIMIT
                continue
            total_size = sum([int(self.env.Layer[self.env.layer_dict[x][-2]].size) for x in self.env.Image[requested_image_id].layer_list])

            if total_size > self.get_node_free_disk(node):
                if selected < 0:
                    selected = REJ_STORE_LIMIT
                continue

            node_images = node_list[i].image_01_list

            score = 0
            # 计算分数
            for index, status in enumerate(node_images):
                # print('status: {}'.format(status))
                # print('self.env.Image[index].size: {}'.format(self.env.image_list[index].size))
                score += status * int(self.env.Image[index].size)
            # score = sum([status * self.env.image_list[index].size for index, status in enumerate(node_images)])

            # 对分数的额外计算方法
            if lb_ratio is not None:
                score_locality = self.scaled_score_locality(score)
                score_lb = (config.node_max_container_number - node.container_number) / config.node_max_container_number * 10
                score = lb_ratio * score_lb + (1 - lb_ratio) * score_locality

            if score > max_score:
                max_score = score
                selected = node.id

        if selected < 0:
            selected = config.EDGE_NODE_NUM

        return selected

    #选择第一个合适的节点
    def monkey_agent(self, task, node_list, lb_ratio=None):
        selected = -1
        requested_image_id = task.requested_image_id
        self.visit_sequence = np.random.permutation(len(node_list))
        task_cpu = task.task_cpu
        task_mem = task.task_mem

        for i in self.visit_sequence:
            node = node_list[i]
            
            # if self.get_node_conta_free(node) - 1 <= 0:
            #     if selected < 0:
            #         selected = REJ_CONT_LIMIT
            #     continue
            if not self.get_node_conta_free(node,task_cpu,task_mem):
                if selected < 0:
                    selected = REJ_CONT_LIMIT
                continue

            total_size = sum([int(self.env.Layer[self.env.layer_dict[x][-2]].size) for x in self.env.Image[requested_image_id].layer_list])

            if total_size > self.get_node_free_disk(node):
                if selected < 0:
                    selected = REJ_STORE_LIMIT
                continue

            selected = node.id
            if selected >= 0:
                break

        if selected < 0:
            selected = config.EDGE_NODE_NUM # cloud
        # quit()
        return selected

    #根据下载时间选择节点
    def down_agent(self, task, node_list, lb_ratio=None):
        # estimated download time
        max_score, selected = 1, -1
        requested_image_id = task.requested_image_id
        # self.seeder.set_seed()
        self.visit_sequence = np.random.permutation(len(node_list))

        task_cpu = task.task_cpu
        task_mem = task.task_mem

        for i in self.visit_sequence:
            node = node_list[i]

            if not self.get_node_conta_free(node, task_cpu, task_mem):
                if selected < 0:
                    selected = REJ_CONT_LIMIT
                continue

            total_size = sum([int(self.env.Layer[self.env.layer_dict[x][-2]].size) for x in self.env.Image[requested_image_id].layer_list])
            # print('total_size: {}'.format(total_size))
            # print('self.get_node_free_disk(node): {}'.format(self.get_node_free_disk(node)))

            if total_size > self.get_node_free_disk(node):
                if selected < 0:
                    selected = REJ_STORE_LIMIT
                continue

            # score = self.dep_score(requested_image_id, node.id)
            score = - self.get_download_time(requested_image_id, i)

            if lb_ratio is not None:
                score_locality = self.scaled_score_locality(score)
                score_lb = (config.node_max_container_number - node.container_number) / config.node_max_container_number * 10
                score = lb_ratio * score_lb + (1 - lb_ratio) * score_locality
            
            if max_score > 0:
                max_score = score
                selected = node.id

            if score > max_score:
                max_score = score
                selected = node.id

        if selected < 0:
            selected = config.EDGE_NODE_NUM # cloud
        return selected
    
    def wait_agent(self, task, node_list, lb_ratio=None):
        # estimated download time + waiting time
        max_score, selected = 1, -1
        score_list = np.zeros(len(node_list))
        requested_image_id = task.requested_image_id
        task_cpu = task.task_cpu
        task_mem = task.task_mem
        # self.seeder.set_seed()
        self.visit_sequence = np.random.permutation(len(node_list))
        # print(self.visit_sequence)
        for i in self.visit_sequence:
            node = node_list[i]

            if not self.get_node_conta_free(node, task_cpu, task_mem):
                if selected < 0:
                    selected = REJ_CONT_LIMIT
                continue

            total_size = sum([int(self.env.Layer[self.env.layer_dict[x][-2]].size) for x in self.env.Image[requested_image_id].layer_list])
            # print('total_size: {}'.format(total_size))
            # print('self.get_node_free_disk(node): {}'.format(self.get_node_free_disk(node)))

            if total_size > self.get_node_free_disk(node):
                if selected < 0:
                    selected = REJ_STORE_LIMIT
                continue

            # score = self.dep_score(requested_image_id, node.id)
            download_time = self.get_download_time(requested_image_id, node.id)
            score = - (download_time + self.get_waiting_time(requested_image_id, node.id, download_time))
            # score = - (self.get_waiting_time(requested_image_id, node.id, download_time))
            score_list[i] = score
            

            if lb_ratio is not None:
                score_locality = self.scaled_score_locality(score)
                score_lb = (config.node_max_container_number - node.container_number) / config.node_max_container_number * 10
                score = lb_ratio * score_lb + (1 - lb_ratio) * score_locality
            
            if max_score > 0:
                max_score = score
                selected = node.id
            
            if score > max_score:
                max_score = score
                selected = node.id

        if selected < 0:
            selected = config.EDGE_NODE_NUM # cloud
        # print('selected: {}, \nscore: {}'.format(selected, np.argmax(np.array(score_list))))
        return selected

    def comp_agent(self, task, node_list, lb_ratio=None):
        # estimated download time + waiting time + computation time
        max_score, selected = 1, -1
        score_list = np.zeros(len(node_list))
        requested_image_id = task.requested_image_id
        task_cpu = task.task_cpu
        task_mem = task.task_mem
        curnt_task_siz = [i.size for i in self.env.Image if i.name == self.env.Task[task.task_id].task_image_name[0]][0]
        # self.seeder.set_seed()
        self.visit_sequence = np.random.permutation(len(node_list))
        # print(self.visit_sequence)
        for i in self.visit_sequence:
            node = node_list[i]

            if not self.get_node_conta_free(node, task_cpu, task_mem):
                if selected < 0:
                    selected = REJ_CONT_LIMIT
                continue

            total_size = sum([int(self.env.Layer[self.env.layer_dict[x][-2]].size) for x in self.env.Image[requested_image_id].layer_list])
            # print('total_size: {}'.format(total_size))
            # print('self.get_node_free_disk(node): {}'.format(self.get_node_free_disk(node)))

            if total_size > self.get_node_free_disk(node):
                if selected < 0:
                    selected = REJ_STORE_LIMIT
                continue

            # score = self.dep_score(requested_image_id, node.id)
            download_time = self.get_download_time(requested_image_id, node.id)
            score = - (download_time + self.get_waiting_time(requested_image_id, node.id, download_time) + self.get_computation_time(task_cpu, curnt_task_siz, node.id))
            # score = - (self.get_computation_time(task_cpu, curnt_task_siz, node.id))
            score_list[i] = score

            if lb_ratio is not None:
                score_locality = self.scaled_score_locality(score)
                score_lb = (config.node_max_container_number - node.container_number) / config.node_max_container_number * 10
                score = lb_ratio * score_lb + (1 - lb_ratio) * score_locality
            
            if max_score > 0:
                max_score = score
                selected = node.id

            if score > max_score:
                max_score = score
                selected = node.id

        if selected < 0:
            selected = config.EDGE_NODE_NUM # cloud
        # print('selected: {}, \nscore: {}'.format(selected, np.argmax(np.array(score_list))))
        return selected

    def rr_agent(self, task, node_list, lb_ratio=None):
        # round robin
        pass

    def lb_agent(self, task, node_list, lb_ratio=None):
        # load balancing
        pass

    # def ga_agent(self, task, node_list):
    #     # 遗传算法
    #     global global_requested_image_id
    #     global global_task_cpu

    #     global_requested_image_id = task.requested_image_id
    #     global_task_cpu = task.task_cpu

    #     ub = [len(node_list)]
    #     # ga = GA(func=self.get_single_reward, n_dim=1, size_pop=10, max_iter=200, prob_mut=0.001, lb=[0], ub=[config.EDGE_NODE_NUM], precision=1e-7)
    #     ga = GA(func=self.get_single_reward, n_dim=1, size_pop=10, max_iter=200, prob_mut=0.001, lb=[0],
    #             ub=ub, precision=1e-7)
    #     best_x, best_y = ga.run()
    #     node_id = node_list[int(best_x)-1].id

    #     return node_id

    # def de_agent(self, task, node_list):
    #     # 差分进化算法
    #     global global_requested_image_id
    #     global global_task_cpu
    #     global_requested_image_id = task.requested_image_id
    #     global_task_cpu = task.task_cpu
    #     ub = [len(node_list)]
    #     de = DE(func=self.get_single_reward, n_dim=1, size_pop=10, max_iter=200, lb=[0], ub=ub)
    #     best_x, best_y = de.run()
    #     node_id = node_list[int(best_x)-1].id
    #     return node_id

    # def pso_agent(self, task, node_list):
    #     # 粒子群算法
    #     global global_requested_image_id
    #     global global_task_cpu
    #     global_requested_image_id = task.requested_image_id
    #     global_task_cpu = task.task_cpu
    #     ub = [len(node_list)]
    #     pso = PSO(func=self.get_single_reward, dim=1, pop=10, max_iter=200, lb=[0], ub=ub, w=0.6, c1=0.5, c2=0.5)
    #     pso.run()
    #     node_id = node_list[int(pso.gbest_x)-1].id
    #     return node_id

    # def sa_agent(self, task, node_list):
    #     # 模拟退火算法
    #     global global_requested_image_id
    #     global global_task_cpu

    #     global_requested_image_id = task.requested_image_id
    #     global_task_cpu = task.task_cpu
    #     ub = [len(node_list)]
    #     print("ub is: ",ub)
    #     sa = SA(func=self.get_single_reward, x0=[1], T_max=1, T_min=1e-9, L=300, max_stay_counter=150, lb=[0], ub=ub)
    #     best_x, best_y = sa.run()
    #     node_id = node_list[int(best_x)-1].id
    #     return node_id

    # def aca_agent(self, task, node_list):
    #     # 蚁群算法
    #     global global_requested_image_id
    #     global global_task_cpu
    #     global_requested_image_id = task.requested_image_id
    #     global_task_cpu = task.task_cpu

    # def afsa_agent(self, task, node_list):
    #     # 人工鱼群算法
    #     global global_requested_image_id
    #     global global_task_cpu
    #     global_requested_image_id = task.requested_image_id
    #     global_task_cpu = task[1]
    #     afsa = AFSA(func=self.get_single_reward, n_dim=1, size_pop=50, max_iter=300, max_try_num=100, step=0.5, visual=0.3, q=0.98, delta=0.5, )

    def get_single_reward(self, p):
        # print(score)

        # from dep_score
        global global_requested_image_id
        global global_task_cpu

        node_id = int(p)
        score = 0
        
        requested_layer_names = self.env.Image[global_requested_image_id].layer_list
        requested_layer_ids = [self.env.layer_dict[x][-2] for x in requested_layer_names]
        # print("request layer id and name: ")
        # print(requested_layer_ids)
        # print(requested_layer_names)

        if node_id >= config.EDGE_NODE_NUM:
            # cloud
            for layer in requested_layer_ids:
                score += int(self.env.Layer[layer].size)
            return score

        node_layers = self.env.Edge[node_id].layer_01_list

        requested_layer_names = self.env.Image[global_requested_image_id].layer_list
        requested_layer_ids = [self.env.layer_dict[x][-2] for x in requested_layer_names]
        for layer in requested_layer_ids:
            if node_layers[layer] == -1:
                # 问题： 分数有问题？ 计算时间需不需要加进去
                score += int(self.env.Layer[layer].size)
        return score

    def pre_check(self, task, node_list, node_id):
        is_suitable = True
        if node_id == config.EDGE_NODE_NUM:
            # cloud
            return True
        requested_image_id = task.requested_image_id
        node = node_list[node_id]
        if self.get_node_conta_free(node) - 1 <= 0:
            is_suitable = False
        total_size = sum([int(self.env.Layer[self.env.layer_dict[x][-2]].size) for x in self.env.Image[requested_image_id].layer_list])
        if total_size > self.get_node_free_disk(node):
            is_suitable = False
        return is_suitable

    def dep_score(self, requested_image_id, node_id):
        node_layers = self.env.Edge[node_id].layer_01_list
        score = 0
        requested_layer_names = self.env.Image[requested_image_id].layer_list
        requested_layer_ids = [self.env.layer_dict[x][-2] for x in requested_layer_names]
        for layer in requested_layer_ids:
            if node_layers[layer] == -1:
                score += int(self.env.Layer[layer].size)
        return score
    
    def get_download_time(self, requested_image_id, node_id):
        requested_layer_names = self.env.Image[requested_image_id].layer_list
        requested_layer_ids = [self.env.layer_dict[x][-2] for x in requested_layer_names]
        size = 0

        if node_id == config.EDGE_NODE_NUM:
            for layer in requested_layer_ids:
                size += int(self.env.Layer[layer].size)
            download_time = size / (config.cloud_bandwidth/8)
            return download_time
        # print(config.EDGE_NODE_NUM)
        # print(node_id)
        node = self.env.Edge[node_id]
        node_layers = node.layer_01_list
        
        for layer in requested_layer_ids:
            if node_layers[layer] == 0:
                size += int(self.env.Layer[layer].size)
        download_time = size / (node.node_bw2 /8)
        return download_time

    def get_waiting_time(self, requested_image_id, node_id, download_time):
        
        if node_id == config.EDGE_NODE_NUM:
            return 0
        
        node_layers = self.env.Edge[node_id].layer_01_list
        requested_layer_names = self.env.Image[requested_image_id].layer_list
        requested_layer_ids = [self.env.layer_dict[x][-2] for x in requested_layer_names]
        max_download_finish_time = 0
        
        for layer in requested_layer_ids:
            layer_download_finish_time = node_layers[layer]
            
            if layer_download_finish_time > max_download_finish_time:
                max_download_finish_time = layer_download_finish_time
        waiting_time = max(0, max_download_finish_time - self.env.time)
       

        if download_time == 0:
            return 0
        else:
            return waiting_time

    def get_computation_time(self, task_cpu,curnt_task_siz, node_id):
        
        if node_id == config.EDGE_NODE_NUM:
            comp_time = task_cpu / config.cloud_cpu_frequency
            return comp_time

        task_process_density = config.task_process_density
        current_workload = self.workload(node_id)
        # computation_delay = (current_workload + curnt_task_siz * task_process_density) / self.env.Edge[node_id].cpu
        computation_delay = (current_workload + curnt_task_siz * 1024 * 1024 * task_process_density) / self.env.Edge[node_id].cpu_capacity

        return computation_delay

    def workload(self,usr_belong_edgeid):
        tmp_pendtsk_list = []
        current_workload = 0

        while self.env.task_pending.empty() == False:
            task_pending_tmp = self.env.task_pending.get()
            tmp_pendtsk_list.append(task_pending_tmp)
            node = self.env.Task[task_pending_tmp[3]].assigned_node
            if usr_belong_edgeid == node:
                task_siz = [i.size for i in self.env.Image if i.name == self.env.Task[task_pending_tmp[3]].task_image_name[0]][0]
                current_workload += task_siz * 1024 * 1024 * config.task_process_density

        if tmp_pendtsk_list:
            for item in tmp_pendtsk_list:
                self.env.task_pending.put(item)
        return current_workload

    def get_node_conta_free(self, node,task_cpu,task_mem):
        if config.node_max_container_number < node.container_number:
            return False
        elif task_cpu > node.available_cpu or task_mem > node.available_mem:
            return False
        return True

    def get_node_free_disk(self, node):
        layer_size = 0
        for index, layer_status in enumerate(node.layer_01_list):
            if layer_status != 0:
                layer_size += int(self.env.Layer[index].size)
        # return (node.disk - layer_size)
        return node.available_disk

    def scaled_score_locality(self, score):
        if score > MAX_SUM_SIZE:
            score = 10
        elif score < MIN_SUM_SIZE:
            score = 0
        else:
            score /= (MAX_SUM_SIZE - MIN_SUM_SIZE)
        return score

    def get_agent(self, policy='ga'):
        if policy == 'dep':
            return self.dep_agent
        elif policy == 'dep_soft':
            return self.dep_soft_agent
        elif policy == 'kube':
            return self.kube_agent
        elif policy == 'monkey':
            return self.monkey_agent
        elif policy == 'down':
            return self.down_agent
        elif policy == 'wait':
            return self.wait_agent
        elif policy == 'comp':
            return self.comp_agent
        if policy == 'ga':
            return self.ga_agent
        elif policy == 'de':
            return self.de_agent
        elif policy == 'pso':
            return self.pso_agent
        elif policy == 'sa':
            return self.sa_agent
        else:
            print("--> error: unknown scheduling policy specified")
            sys.exit(1)
        
    def save_pickle(self, reward, policy):

        pickle_file = 'result_cdf_' + policy + '_' + time.strftime("%Y%m%d%H%M%S", time.localtime()) + '.txt'
        with open(pickle_file, 'wb') as f:
            pickle.dump(reward, f)

    def save_data(self,task_number):
        # filename = "./result/different_task_number.csv"
        for i in task_number:
            filename = './result/result_'+ str(i) + time.strftime("%Y%m%d%H%M%S", time.localtime())+'.csv'
            with open(filename, "w+") as f:
                writer.writerow([policy, ['computation_delay', 'communication_delay', 'scaling_delay', 'reward']])
                writer = csv.writer(f)
                policy = ['ga', 'de', 'pso', 'dep', 'dep_soft', 'kube', 'monkey', 'down', 'wait', 'comp']
                for p in policy:
                    value = data.get((p,i,config.EDGE_NODE_NUM))
                    writer.writerow([p, value])

    def run_baseline(self, policy='dep'):

        task_num = self.args.task
        total_steps = 0
        total_epochs = 0

        agent = self.get_agent(policy)

        rewards_list = []

        migratio_list = []
        total_download_time_list = []
        trans_list = []
        waiting_time_list = []
        total_download_size_list = []
        computation_delay_list = []
        arrange_delay_list = []
        scaling_delay_list = []
        backhaul_delay_list = []
        

        # Build a tensorboard
        if not self.args.store:
            path = './runs/out1000/env_{}_number_{}_ver_{}'.format('see', '1', policy)
            writer = SummaryWriter(log_dir=path)

        while total_epochs < self.args.episodes:
            print("total_epochs:")
            print(total_epochs)
            print("self.args.episodes:")
            print(self.args.episodes)

            self.env.seed(self.args.seed)
            np.random.seed(self.args.seed)
            torch.manual_seed(self.args.seed)

            obs, _ = self.env.reset(self.args.task)

            self.task_list = self.env.task_queue
            node_list_id = obs['next_can']
            s = obs['alg_shu']

            init_uid = obs['next_uid']

            episode_steps = 0

            ep_reward = 0
            ep_migration = 0
            ep_total_download_time = 0
            ep_trans = 0
            ep_waiting_time = 0
            ep_total_download_size = 0
            ep_computation_delay = 0
            ep_arrange_delay = 0
            ep_scaling_delay = 0
            ep_backhaul_delay = 0

            done = False

            while not done:
                
                pre_trainning_parameters = []
                
                tem_task = self.task_list.get()
                self.task_list.put(tem_task)
                task_id = tem_task[1]
                task = self.env.Task[task_id]
                
                
                node_list = []
                node_id = []
                for index, i in enumerate(node_list_id):
                    if i == 1:
                        node_id.append(index)
                        node_list.append(self.env.Edge[index])
                # print("node_id is : ", node_id)     
                
                if node_list == []:
                    action = config.EDGE_NODE_NUM
                else:
                    action = agent(task, node_list)
                
                if self.args.store:  
                    a, a_logprob = self.agent.choose_action_2(s,init_uid,obs,action)
                
                obs_, reward, info, done, details = self.env.step(action)
                
                s_ = obs_['alg_shu']
                init_uid = obs_['next_uid']
                obs = obs_
                reward = -reward
                node_list_id = obs['next_can']
                
                
                if self.args.store:
                    # print("hahahahha")
                    
                    if done:
                        dw = True
                    else:
                        dw = False
                    
                    pre_trainning_parameters.append(s)
                    pre_trainning_parameters.append(a)
                    pre_trainning_parameters.append(a_logprob)
                    pre_trainning_parameters.append(reward)
                    pre_trainning_parameters.append(s_)
                    pre_trainning_parameters.append(dw)
                    pre_trainning_parameters.append(done)
                    
                    self.pre_training_list.append(pre_trainning_parameters)
                    print("pre trainingbbbbb ***************************")
                    
                s = s_

                ep_reward += reward * 1000
                ep_migration += details[0] * 1000
                ep_total_download_time += details[1] * 1000
                ep_trans += details[2] * 1000
                ep_waiting_time += details[3] * 1000
                ep_total_download_size += details[4]
                ep_computation_delay += details[5] * 1000
                ep_arrange_delay += details[6] * 1000
                ep_scaling_delay += details[7] * 1000
                ep_backhaul_delay += details[8] * 1000
            
            total_epochs += 1
            
            if self.args.store:
                continue
            
            tmp_r = copy.copy(ep_reward) / task_num
            tmp_migration = copy.copy(ep_migration) / task_num
            tmp_total_download_time = copy.copy(ep_total_download_time) / task_num
            tmp_trans = copy.copy(ep_trans) / task_num
            tmp_waiting_time = copy.copy(ep_waiting_time) / task_num
            tmp_total_download_size = copy.copy(ep_total_download_size) / task_num
            tmp_computation_delay = copy.copy(ep_computation_delay) / task_num
            tmp_arrange_delay = copy.copy(ep_arrange_delay) / task_num
            tmp_scaling_delay = copy.copy(ep_scaling_delay) / task_num
            tmp_backhaul_delay = copy.copy(ep_backhaul_delay) / task_num

            rewards_list.append(tmp_r)
            migratio_list.append(tmp_migration)
            total_download_time_list.append(tmp_total_download_time)
            trans_list.append(tmp_trans)
            waiting_time_list.append(tmp_waiting_time)
            total_download_size_list.append(tmp_total_download_size)
            computation_delay_list.append(tmp_computation_delay)
            arrange_delay_list.append(tmp_arrange_delay)
            scaling_delay_list.append(tmp_scaling_delay)
            backhaul_delay_list.append(tmp_backhaul_delay)

            writer.add_scalar('reward/'+str(self.args.task)+'_epoch_reward:', tmp_r, global_step=total_epochs)

            writer.add_scalar('detail/'+str(self.args.task)+'total_download_time:', tmp_total_download_time, global_step=total_epochs)
            writer.add_scalar('detail/'+str(self.args.task)+'trans:', tmp_trans, global_step=total_epochs)
            writer.add_scalar('detail/'+str(self.args.task)+'waiting_time:', tmp_waiting_time, global_step=total_epochs)
            writer.add_scalar('detail/'+str(self.args.task)+'total_download_size:', tmp_total_download_size, global_step=total_epochs)

            writer.add_scalar('delay/'+str(self.args.task)+'computation_delay:', tmp_computation_delay, global_step=total_epochs)
            writer.add_scalar('delay/'+str(self.args.task)+'arrange_delay:', tmp_arrange_delay, global_step=total_epochs)
            writer.add_scalar('delay/'+str(self.args.task)+'scaling_delay:', tmp_scaling_delay, global_step=total_epochs)
            writer.add_scalar('delay/'+str(self.args.task)+'backhaul_delay:', tmp_backhaul_delay, global_step=total_epochs)
            writer.add_scalar('delay/'+str(self.args.task )+'delay:',tmp_arrange_delay+tmp_scaling_delay,global_step=total_epochs)


            data.update({(policy,self.args.task,config.EDGE_NODE_NUM):[tmp_r,tmp_computation_delay,tmp_arrange_delay,tmp_scaling_delay,tmp_total_download_size,tmp_backhaul_delay]})

if __name__ == '__main__':

    # env = environment.Env()
    # baseline = Baseline(env)

    policy =  ['dep','dep_soft','monkey','down','wait','comp','ga', 'de', 'pso','kube']
    # policy = ['monkey','down','ga', 'de', 'pso']
    # policy = ['dep']
    # policy = ['monkey','kube']
    # task_number = [500,750,1000,1250,1500,2000]
    task_number = [config.TASK_NUM]
    node_number = [10]
    for i in task_number:
        parser = argparse.ArgumentParser(description='baseline')
        parser.add_argument('--user', type=int, default=config.USER_NUM, metavar='T', help='user number')
        parser.add_argument('--generator', type=str, metavar='G', default='zipf')
        parser.add_argument('--seed', type=int, metavar='S', default=10)
        parser.add_argument("--steps", type=int, default=int(1e6), help=" Maximum number of training steps")
        parser.add_argument("--episodes", type=int, default=1, help=" Maximum number of epochs")
        parser.add_argument('--evaltype', type=str, metavar='E', default='task_number')
        parser.add_argument('--classification', type=str, metavar='C', default='heterogeneous')
        parser.add_argument('--task', type=int, default=i, metavar='T', help='task number')
        parser.add_argument('--store', type=bool, default=False, help='store the running results')
        args = parser.parse_args()
        # config.EDGE_NODE_NUM = i
        
        env = environment.Env()
        baseline = Baseline(env, args)
        # generate_total_trace()

        for po in policy:
            baseline.run_baseline(po)
            print(data)


