# -*- coding: UTF-8 -*-
import env_tina as environment
import config
import argparse
import torch
import numpy as np
import copy
import random
from object import delay
import os

# 定义全局常量
REJ_CONT_LIMIT = -2  # 容器数量限制拒绝标记
REJ_STORE_LIMIT = -3  # 存储空间限制拒绝标记
MAX_SUM_SIZE = 1424  # 最大总大小
MIN_SUM_SIZE = 10  # 最小总大小

# 全局变量
global_requested_image_id = 0
global_task_cpu = 0
data = {}  # 存储实验数据的字典

# 设备选择
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


class Greedy:
    def __init__(self, env, args):

        self.env = env
        self.args = args
        self.task_list = self.env.allexist_task_list

    def greedy_total(self, task, node_list, lb_ratio=None):  
        """  
        基于下载时间、等待时间、计算时间、迁移时间和传输时间的贪心调度策略  
        """  
        # 初始化变量  
        max_score, selected = 1, -1  
        score_list = np.zeros(len(node_list))  

        # 获取任务信息  
        requested_image_id = task.requested_image_id  
        task_cpu = task.task_cpu  
        task_mem = task.task_mem  

        # 获取当前任务镜像大小  
        curnt_task_siz = [  
            i.size  
            for i in self.env.Image  
            if i.name == self.env.Task[task.task_id].task_image_name[0]  
        ][0]  

        # 获取任务先前分配的节点  
        prev_node_id = task.assigned_node  

        # 随机打乱节点访问顺序，避免固定顺序偏置  
        self.visit_sequence = np.random.permutation(len(node_list))  

        # 遍历节点  
        for i in self.visit_sequence:  
            node = node_list[i]  

            # 检查节点容器资源是否满足  
            if not self.get_node_conta_free(node, task_cpu, task_mem):  
                if selected < 0:  
                    selected = REJ_CONT_LIMIT  
                continue  

            # 计算任务所需镜像总大小  
            total_size = sum(  
                [  
                    int(self.env.Layer[self.env.layer_dict[x][-2]].size)  
                    for x in self.env.Image[requested_image_id].layer_list  
                ]  
            )  

            # 检查节点磁盘空间是否足够  
            if total_size > self.get_node_free_disk(node):  
                if selected < 0:  
                    selected = REJ_STORE_LIMIT  
                continue  

            # 获取下载时间  
            download_time = self.get_download_time(requested_image_id, node.id)  

            # 计算迁移延迟  
            migration_delay = 0  
            if prev_node_id is not None and prev_node_id != -1:  
                source_node = self.env.Edge[prev_node_id] if prev_node_id != config.EDGE_NODE_NUM else None  
                migration_delay = self.get_migration_delay(task, source_node, node)  

            transmission_delay = 0
            if prev_node_id is None:
                transmission_delay = self.get_transmission_delay(task, node)  

            # 综合评估：下载时间 + 等待时间 + 计算时间 + 迁移时间 + 传输时间  
            # 使用负号是因为分数越小（时间越短）越好  
            score = -(  
                download_time  
                + self.get_waiting_time(requested_image_id, node.id, download_time)  
                + self.get_computation_time(task_cpu, curnt_task_siz, node.id)  
                + migration_delay  
                + transmission_delay  
            )  

            # 记录节点分数  
            score_list[i] = score  

            # 负载均衡权重处理  
            if lb_ratio is not None:  
                score_locality = self.scaled_score_locality(score)  
                score_lb = (  
                    (config.node_max_container_number - node.container_number)  
                    / config.node_max_container_number  
                ) * 10  
                score = lb_ratio * score_lb + (1 - lb_ratio) * score_locality  

            # 更新最优节点  
            if max_score > 0 or score > max_score:  
                max_score = score  
                selected = node.id  

        # 如果没有找到合适节点，选择云节点  
        if selected < 0:  
            selected = config.EDGE_NODE_NUM  

        return selected  

    def get_node_conta_free(self, node, task_cpu, task_mem):
        """
        检查节点容器资源是否满足任务需求
        """
        if config.node_max_container_number < node.container_number:
            return False
        elif task_cpu > node.available_cpu or task_mem > node.available_mem:
            return False
        return True

    def get_node_free_disk(self, node):
        """
        获取节点可用磁盘空间
        """
        return node.available_disk

    def get_download_time(self, requested_image_id, node_id):
        """
        计算下载时间

        参数:
        - requested_image_id: 请求的镜像ID
        - node_id: 节点ID

        返回:
        - 下载时间
        """
        requested_layer_names = self.env.Image[requested_image_id].layer_list
        requested_layer_ids = [
            self.env.layer_dict[x][-2] for x in requested_layer_names
        ]
        size = 0

        if node_id == config.EDGE_NODE_NUM:
            for layer in requested_layer_ids:
                size += int(self.env.Layer[layer].size)
            download_time = size / (config.cloud_bandwidth / 8)
            return download_time

        node = self.env.Edge[node_id]
        node_layers = node.layer_01_list

        for layer in requested_layer_ids:
            if node_layers[layer] == 0:
                size += int(self.env.Layer[layer].size)
        download_time = size / (node.node_bw2 / 8)
        return download_time

    def get_waiting_time(self, requested_image_id, node_id, download_time):
        """
        计算等待时间

        参数:
        - requested_image_id: 请求的镜像ID
        - node_id: 节点ID
        - download_time: 下载时间

        返回:
        - 等待时间
        """
        if node_id == config.EDGE_NODE_NUM:
            return 0

        node_layers = self.env.Edge[node_id].layer_01_list
        requested_layer_names = self.env.Image[requested_image_id].layer_list
        requested_layer_ids = [
            self.env.layer_dict[x][-2] for x in requested_layer_names
        ]
        max_download_finish_time = 0

        for layer in requested_layer_ids:
            layer_download_finish_time = node_layers[layer]

            if layer_download_finish_time > max_download_finish_time:
                max_download_finish_time = layer_download_finish_time
        waiting_time = max(0, max_download_finish_time - self.env.time)

        if download_time == 0:
            return 0
        else:
            return waiting_time

    def get_computation_time(self, task_cpu, curnt_task_siz, node_id):
        """
        计算计算时间

        参数:
        - task_cpu: 任务CPU需求
        - curnt_task_siz: 当前任务大小
        - node_id: 节点ID

        返回:
        - 计算时间
        """
        if node_id == config.EDGE_NODE_NUM:
            comp_time = task_cpu / config.cloud_cpu_frequency
            return comp_time

        task_process_density = config.task_process_density
        current_workload = self.workload(node_id)
        computation_delay = (
            current_workload + curnt_task_siz * 1024 * 1024 * task_process_density
        ) / self.env.Edge[node_id].cpu_capacity

        return computation_delay
    
    def get_migration_delay(self, task, source_node, destination_node):  
        """  
        计算任务迁移延迟  
        
        参数:  
        - task: 待迁移任务  
        - source_node: 源节点  
        - destination_node: 目标节点  
        
        返回:  
        - 迁移延迟  
        """  
        # 云节点不计算迁移延迟  
        if source_node.id == config.EDGE_NODE_NUM:  
            return 0  
        
        task_size = [  
            i.size   
            for i in self.env.Image   
            if i.name == task.task_image_name[0]  
        ][0]  
        # 计算迁移数据大小  
        mig_size = task_size  # 使用任务大小作为迁移数据大小  

        # 计算迁移延迟  
        task_mig_delay = (  
            random.uniform(0.5, 100) * 8 /  # 迁移数据大小  
            min(  
                source_node.node_bw,  # 源节点带宽  
                destination_node.node_bw2  # 目标节点带宽  
            ) +   
            random.uniform(1, 3) * delay.hop_distance(  
                source_node.location_x, source_node.location_y,  
                destination_node.location_x, destination_node.location_y  
            )  
        )  

        return task_mig_delay  

    def get_transmission_delay(self, task, node):  
        """  
        计算传输延迟  
        
        参数:  
        - task: 待部署任务  
        - node: 目标节点  
        
        返回:  
        - 传输延迟  
        """  
        task_user_map = self.env.get_task_user_mapping()  
        task_user = task_user_map.get(task.task_id)  
        
        if not task_user:  
            return 0  

        # 找到用户和目标节点的位置  
        uid_current_x = [  
            itm.user_locationx  
            for ind, itm in enumerate(self.env.User)  
            if itm.uid == task_user  
        ][0]  
        uid_current_y = [  
            itm.user_locationy  
            for ind, itm in enumerate(self.env.User)  
            if itm.uid == task_user  
        ][0]  

        task_size = [  
            i.size   
            for i in self.env.Image   
            if i.name == task.task_image_name[0]  
        ][0]  

        # 云节点传输延迟  
        # if node.id == config.EDGE_NODE_NUM:  
        #     usr_up_trans_delay = (  
        #         random.uniform(0.5, 100) * 8 /  
        #         min(config.cloud_bandwidth, task_user.user_bw)  
        #     )  
        #     return usr_up_trans_delay  

        # 边缘节点传输延迟  
        tmp_usr_up_trans_dlytime1, distance, trate = delay.trans_dly(  
            task_size,  
            uid_current_x,  
            uid_current_y,  
            node.location_x,  
            node.location_y,  
            node.node_bw2,  
        )  

        # backhaul_delay = (  
        #     random.uniform(0.05, 5) * 8 / node.node_bw2 +  
        #     0.02 * self.hop_distance(  
        #         task_user.user_locationx, task_user.user_locationy,  
        #         node.location_x, node.location_y  
        #     )  
        # )  

        return tmp_usr_up_trans_dlytime1


    def workload(self, usr_belong_edgeid):
        """
        计算节点当前工作负载

        参数:
        - usr_belong_edgeid: 用户所属边缘节点ID

        返回:
        - 当前工作负载
        """
        tmp_pendtsk_list = []
        current_workload = 0

        while not self.env.task_pending.empty():
            task_pending_tmp = self.env.task_pending.get()
            tmp_pendtsk_list.append(task_pending_tmp)
            node = self.env.Task[task_pending_tmp[3]].assigned_node
            if usr_belong_edgeid == node:
                task_siz = [
                    i.size
                    for i in self.env.Image
                    if i.name == self.env.Task[task_pending_tmp[3]].task_image_name[0]
                ][0]
                current_workload += task_siz * 1024 * 1024 * config.task_process_density

        if tmp_pendtsk_list:
            for item in tmp_pendtsk_list:
                self.env.task_pending.put(item)
        return current_workload

    def scaled_score_locality(self, score):
        """
        将分数缩放到0-10范围

        参数:
        - score: 原始分数

        返回:
        - 缩放后的分数
        """
        if score > MAX_SUM_SIZE:
            score = 10
        elif score < MIN_SUM_SIZE:
            score = 0
        else:
            score /= MAX_SUM_SIZE - MIN_SUM_SIZE
        return score


    def run_baseline(self, policy="greedy_total"):
        task_num = self.args.task
        total_steps = 0
        total_epochs = 0

        rewards_list = []
        migratio_list = []
        total_download_time_list = []
        trans_list = []
        waiting_time_list = []
        total_download_size_list = []
        computation_delay_list = []
        arrange_delay_list = []
        scaling_delay_list = []
        backhaul_delay_list = []
        cloud_counts_list = []

        # 新增：检查点保存路径
        checkpoint_path = f"baseline_results_{policy}_task{task_num}.pth"

        while total_epochs < self.args.episodes:
            print(f"第 {total_epochs+1} 轮实验")

            self.env.seed(self.args.seed)
            np.random.seed(self.args.seed)
            torch.manual_seed(self.args.seed)

            obs, _ = self.env.reset(self.args.task)

            self.task_list = self.env.task_queue
            print(obs["users"])
            print(obs["tasks"])

            node_list_id = self.select_available_nodes_ids(self.env, obs)

            ep_reward = 0
            ep_migration = 0
            ep_total_download_time = 0
            ep_trans = 0
            ep_waiting_time = 0
            ep_total_download_size = 0
            ep_computation_delay = 0
            ep_arrange_delay = 0
            ep_scaling_delay = 0
            ep_backhaul_delay = 0

            done = False
            cloud_count = 0
            while not done:  
                # 从任务队列获取下一个任务  
                tem_task = self.task_list.get()
                self.task_list.put(tem_task)
                task_id = tem_task[1]
                task = self.env.Task[task_id]

                node_list = []  
                node_id = []  
                for index, i in enumerate(node_list_id):  
                    if i == 1 and index != config.EDGE_NODE_NUM:  
                        node_id.append(index)  
                        node_list.append(self.env.Edge[index])  

                if node_list == []:  
                    action = config.EDGE_NODE_NUM  
                    cloud_count += 1
                else:  
                    action = self.greedy_total(task, node_list)  
                
                obs_, reward, info, done, details = self.env.step(action)  

                obs = obs_  
                reward = -reward  

                # 更新候选节点列表  
                node_list_id = self.select_available_nodes_ids(self.env, obs)  

                ep_reward += reward * 1000
                ep_migration += details[0] * 1000
                ep_total_download_time += details[1] * 1000
                ep_trans += details[2]* 1000
                ep_waiting_time += details[3] * 1000
                ep_total_download_size += details[4]* 1000
                ep_computation_delay += details[5] * 1000
                ep_arrange_delay += details[6] * 1000
                ep_scaling_delay += details[7] * 1000
                ep_backhaul_delay += details[8] * 1000

            total_epochs += 1

            tmp_r = copy.copy(ep_reward) / task_num
            tmp_migration = copy.copy(ep_migration) / task_num
            tmp_total_download_time = copy.copy(ep_total_download_time) / task_num
            tmp_trans = copy.copy(ep_trans) / task_num
            tmp_waiting_time = copy.copy(ep_waiting_time) / task_num
            tmp_total_download_size = copy.copy(ep_total_download_size) / task_num
            tmp_computation_delay = copy.copy(ep_computation_delay) / task_num
            tmp_arrange_delay = copy.copy(ep_arrange_delay) / task_num
            tmp_scaling_delay = copy.copy(ep_scaling_delay) / task_num
            tmp_backhaul_delay = copy.copy(ep_backhaul_delay) / task_num

            rewards_list.append(tmp_r)
            migratio_list.append(tmp_migration)
            total_download_time_list.append(tmp_total_download_time)
            trans_list.append(tmp_trans)
            waiting_time_list.append(tmp_waiting_time)
            total_download_size_list.append(tmp_total_download_size)
            computation_delay_list.append(tmp_computation_delay)
            arrange_delay_list.append(tmp_arrange_delay)
            scaling_delay_list.append(tmp_scaling_delay)
            backhaul_delay_list.append(tmp_backhaul_delay)
            cloud_counts_list.append(cloud_count)

            # 打印结果
            print("\n实验结果:")
            print(f"平均奖励: {tmp_r}")
            print(f"平均迁移延迟: {tmp_migration} ms")
            print(f"平均下载时间: {tmp_total_download_time} ms")
            print(f"平均传输延迟: {tmp_trans} ms")
            print(f"平均等待时间: {tmp_waiting_time} ms")
            print(f"平均下载大小: {tmp_total_download_size} MB")
            print(f"平均计算延迟: {tmp_computation_delay} ms")
            print(f"平均调度延迟: {tmp_arrange_delay} ms")
            print(f"平均扩展延迟: {tmp_scaling_delay} ms")
            print(f"平均回程延迟: {tmp_backhaul_delay} ms")
            print(f"云次数：{cloud_count}")

            # 保存结果到检查点  
        checkpoint_path = f"baseline_result/greedy_results_{policy}_task{task_num}.pth"  
        # self.save_baseline_results(  
        #     checkpoint_path,   
        #     baseline_info={  
        #         "rewards": tmp_r,  
        #         "migration_delays": tmp_migration,  
        #         "download_times": tmp_total_download_time,  
        #         "trans_delays": tmp_trans,  
        #         "waiting_times": tmp_waiting_time,  
        #         "total_download_sizes": tmp_total_download_size,  
        #         "computation_delays": tmp_computation_delay,  
        #         "arrange_delays": tmp_arrange_delay,  
        #         "scaling_delays": tmp_scaling_delay,  
        #         "backhaul_delays": tmp_backhaul_delay,  
        #         "cloud_counts": cloud_count,  
        #         "policy": policy,  
        #         "task_num": task_num  
        #     }  
        # )  

        return {
            "rewards": rewards_list,
            "migration_delays": migratio_list,
            "download_times": total_download_time_list,
            "trans_delays": trans_list,
            "waiting_times": waiting_time_list,
            "total_download_sizes": total_download_size_list,
            "computation_delays": computation_delay_list,
            "arrange_delays": arrange_delay_list,
            "scaling_delays": scaling_delay_list,
            "backhaul_delays": backhaul_delay_list,
            "cloud_counts": cloud_counts_list
        }


    def select_available_nodes_ids(self, env, obs):  
        """  
        获取可用节点的ID列表  

        参数:  
        - env: 环境实例  
        - obs: 观测值  

        返回:  
        - 可用节点ID的列表（1表示可用，0表示不可用）  
        """  
        # 准备用户位置信息  
        each_use_loc = [  
            [user[0], user[1], user[2]] for user in obs["users"]  # [id, x, y]  
        ]  

        # 准备边缘节点位置信息  
        each_edge_loc = [  
            [i, node[7], node[8]]  # [node_id, x, y]  
            for i, node in enumerate(obs["nodes"])  
        ]  

        # 默认所有节点不可用  
        node_list_id = np.zeros(config.EDGE_NODE_NUM + 1, dtype=int)  

        # 如果没有待处理任务列表，直接返回  
        if not env.allexist_task_list:  
            return node_list_id  
        
        # 获取当前任务  
        current_task_idx = [  
            item.usr_has_tsk for item in env.User if item.uid == env.next_uid_idx  
        ][0][0]  
        current_task = env.Task[current_task_idx]  

        task_user_map = env.get_task_user_mapping()  
        # 获取当前用户ID  
        matching_users = task_user_map.get(current_task.task_id, -1)  

        # print("select_task_id:", current_task.task_id, matching_users)  
        # print("node_id", current_task.assigned_node)  

        if not matching_users:  
            uid = -1  
            print(  
                "No user found for task_id:",  
                current_task.task_id,  
                current_task.assigned_node  
            )  
            return node_list_id  

        else:  
            uid = matching_users  

        # 使用env的pool方法筛选位置  
        tem_next_can = env.pool(uid, each_use_loc, each_edge_loc)  

        # 标记可用节点  
        if current_task.reschedule_count <= 3:  
            for node_index in tem_next_can:  
                node = env.Edge[node_index]  

                # 资源约束条件  
                is_resource_sufficient = (  
                    node.container_number < config.node_max_container_number  
                    and node.available_mem >= current_task.task_mem  
                    and node.available_cpu >= current_task.task_cpu  
                )  

                if is_resource_sufficient:  
                    node_list_id[node_index] = 1  
        else:  
            # 如果重调度次数 > 3，所有边缘节点都标记为不可用  
            node_list_id = [0] * len(node_list_id)  
            node_list_id[config.EDGE_NODE_NUM] = 1 

        # 如果没有可用节点，默认云节点可用  
        if np.sum(node_list_id) == 0:  
            node_list_id[config.EDGE_NODE_NUM] = 1  

        return node_list_id  
    
    def save_baseline_results(path, baseline_info):
        """
        保存基线算法单次运行的结果

        :param path: 保存路径
        :param baseline_info: 包含基线运行信息的字典
        """
        checkpoint = {
            "rewards": baseline_info.get("rewards", 0),
            "migration_delays": baseline_info.get("migration_delays", 0),
            "download_times": baseline_info.get("download_times", 0),
            "trans_delays": baseline_info.get("trans_delays", 0),
            "waiting_times": baseline_info.get("waiting_times", 0),
            "total_download_sizes": baseline_info.get("total_download_sizes", 0),
            "computation_delays": baseline_info.get("computation_delays", 0),
            "arrange_delays": baseline_info.get("arrange_delays", 0),
            "scaling_delays": baseline_info.get("scaling_delays", 0),
            "backhaul_delays": baseline_info.get("backhaul_delays", 0),
            "cloud_counts": baseline_info.get("cloud_counts", 0),
            "policy": baseline_info.get("policy", "greedy_total"),
            "task_num": baseline_info.get("task_num", 0)
        }

        # 直接保存检查点
        torch.save(checkpoint, path)



# 主脚本入口
if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="baseline")
    parser.add_argument(
        "--task", type=int, default=config.TASK_NUM, metavar="T", help="task number"
    )
    parser.add_argument("--seed", type=int, metavar="S", default=10)
    parser.add_argument("--episodes", type=int, default=1, help="最大训练轮数")
    args = parser.parse_args()

    env = environment.Env()
    greedy_total = Greedy(env, args)

    # 运行基准测试
    greedy_total.run_baseline()
