#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File     : GreedyScheduler.py
@Project  : dev
@Date     : 2021/12/7
@Author   : Zhang Jinyang
@Contact  : zhang-jy@sjtu.edu.cn
'''

import numpy as np
from obj.Job import Job
from matplotlib import pyplot as plt


class GreedyScheduler(object):
    """
    已改成单host和多host通用调度器
    """
    def __init__(self):
        self.pic_num = 0

    def load_jobs(self, file_path):
        """
        根据文件路径加载case
        :param file_path: 文件路径
        :return:
        """
        with open(file_path, 'r') as f:
            contents = f.readlines()
        # 加载任务数、主机数、alpha、传输时间
        base_info = contents[0].strip().split()
        n, host_num, alpha = base_info[:3]
        if len(base_info) > 3:
            tspeed = base_info[3]
        else:
            tspeed = 0
        self.n, self.host_num = int(n), int(host_num)
        self.alpha, self.tspeed = float(alpha), float(tspeed)

        core_per_host = [int(i) for i in contents[1].strip().split()]
        self.core2host = {}

        # 加载core到host的映射关系
        count = 0
        self.m = sum(core_per_host)
        for i, cores in enumerate(core_per_host):
            for core in range(cores):
                self.core2host[count] = i
                count += 1

        # 加载每个任务信息
        block_per_job = [int(i) for i in contents[2].strip().split()]
        speed_per_job = [int(i) for i in contents[3].strip().split()]
        self.job_list = []
        for i in range(self.n):
            block_size = [int(i) for i in contents[4 + i].strip().split()]
            # 如果host数量为1，考虑单机情况
            if self.host_num == 1:
                block_loc = [0] * len(block_size)
            else:
                block_loc = [int(i) for i in contents[4 + i + self.n].strip().split()]
            self.job_list.append(Job(i, speed_per_job[i], block_size, block_loc))

    def schedule_jobs(self, visual=False):
        """
        调度任务（单机多机通用）
        :return:
        """

        # 根据任务大小顺序依次执行
        self.job_order = [i[0] for i in sorted(enumerate(self.job_list), key=lambda x: x[1].total_size / x[1].speed)][
                         ::-1]
        # 记录每个core上最后完成时间和完成的Job
        self.timelines = np.zeros(self.m)
        self.job_per_core = [[] for _ in range(len(self.core2host))]


        """
        Phase1: 将job作为整体串行执行，将不同host上的block都传输到相同host上执行
                并采用贪心策略，每个Job执行完保证总执行时间最短
        """
        for j in self.job_order:

            job = self.job_list[j]
            target_core = -1
            min_F = float("inf")
            # 采用贪心策略，将job调度到当前执行完后时间最早的core上
            for i, h in self.core2host.items():
                job.execute([i], self.alpha, self.core2host, self.tspeed)
                if min_F > self.timelines[i] + np.max(job.duration):
                    min_F = self.timelines[i] + job.duration
                    target_core = i

            # 找到最优core，更新job执行信息
            job.start_time = self.timelines[target_core]
            self.timelines[target_core] = min_F
            job.execute([target_core], self.alpha, self.core2host, self.tspeed)
            self.job_per_core[target_core].append(job.id)


        """
        Phase2: 采用贪心算法，依次找到最晚执行结束的job，调整为并行执行方式，并找到最优的执行core集合
                调整时job的执行顺序调整到已并行优化的job后面，在还未优化串行执行的job前面
        """
        # 用于记录job是否被优化过，保证每个job只从队尾优化一次
        not_visited = list(range(len(self.job_list)))
        # 表示从串行转为并行方式优化后的job插入的执行顺序位置
        pos = 0
        while not_visited:
            if visual:
                self.visualize()

            # 从最后执行结束的job开始优化
            core_sort_by_finish_time = np.argsort(self.timelines)
            max_F = self.timelines[core_sort_by_finish_time[-1]]

            # 如果job已被优化过，说明已经不能再进一步优化
            if self.job_per_core[core_sort_by_finish_time[-1]]:
                job_id = self.job_per_core[core_sort_by_finish_time[-1]][-1]
            else:
                break
            if job_id not in not_visited:
                break

            not_visited.remove(job_id)
            # 确定需要调整优化的job
            job_to_adjust = self.job_list[job_id]
            self.timelines[core_sort_by_finish_time[-1]] -= max(job_to_adjust.duration)


            # 将该任务插入到对应执行顺序位置
            self.job_per_core[core_sort_by_finish_time[-1]].pop(-1)
            self.job_order.remove(job_id)
            self.job_order.insert(pos, job_id)
            pos += 1

            # 从早结束core开始遍历执行core集合
            core_sort_by_finish_time = np.argsort(self.timelines)
            core_set = [core_sort_by_finish_time[0]]
            cores_after_adjust = [core_sort_by_finish_time[0]]

            for i in range(1, min(self.m, len(job_to_adjust.blocks_size))):
                # 测试不同cores组合的优化效果，cores按照最早执行结束时间排序
                core_set.append(core_sort_by_finish_time[i])
                job_to_adjust.execute(core_set, self.alpha, self.core2host, self.tspeed)
                self.reorder_jobs()
                # self.visualize()
                if np.max(self.timelines) <= max_F:
                    max_F = np.max(self.timelines)
                    cores_after_adjust = [i for i in core_set]

            # 优化前后不发生改变，则进行下一轮；否则选择优化后方案
            # if job_to_adjust.cores == cores_after_adjust:
            #     break
            job_to_adjust.execute(cores_after_adjust, self.alpha, self.core2host, self.tspeed)
            self.reorder_jobs(True)

            self.visualize()

            """
            将当前并行优化后的job执行结束时间前的job都进行标记，因为这些job不会再被优化到
            """
            tmp_order = [i for i in self.job_order]
            for i in tmp_order:
                job = self.job_list[i]
                if i in not_visited and job.start_time + np.max(job.duration) < job_to_adjust.start_time + np.max(
                        job_to_adjust.duration):
                    self.job_order.remove(i)
                    self.job_order.insert(pos, i)
                    pos += 1
                    not_visited.remove(i)

    def reorder_jobs(self, determined=False):
        """
        根据当前self.order规定顺序执行jobs
        :param determined: 如果为True，表明Job保持当前执行方案
        :return:
        """
        self.joblines = np.ones(self.m, dtype=int) * -1
        self.timelines = np.zeros(self.m)
        for i in self.job_order:
            # 根据job_order顺序依次按照已定义方案调度任务
            job = self.job_list[i]
            cores = job.cores
            duration = max(job.duration)
            latest_core = cores[np.argmax(self.timelines[cores])]
            if self.joblines[latest_core] != -1:
                tmp = [i for i in self.job_list[self.joblines[latest_core]].cores]
                if self.expand_cores(self.job_list[self.joblines[latest_core]], job) and not determined:
                    # 可能存在还可以优化的空隙，进行优化；如果当前方案未确定，则要恢复原状态
                    self.job_list[self.joblines[latest_core]].execute(tmp, self.alpha, self.core2host,
                                                                                 self.tspeed)
            latest_core = cores[np.argmax(self.timelines[cores])]
            job.start_time = self.timelines[latest_core]

            self.timelines[cores] = job.start_time + duration
            self.joblines[cores] = i

    def expand_cores(self, job1, job2):
        """
        可能出现可优化的空隙
        例： job2在job1之后，且job2存在job1未使用的cores
            因此job1可以增大并行化，将job2中未使用的cores都用上，减少空隙
        """
        core_pre_job = set(job1.cores)
        core_latter_job = set(job2.cores)
        diff_cores = core_latter_job.difference(core_pre_job)

        # 找出最大可利用core组合
        for core in diff_cores:
            if self.timelines[core] <= job1.start_time:
                core_pre_job.add(core)
        if core_pre_job == set(job1.cores):
            # 如果执行的cores集合不变，则退出
            return False

        pre_duration = np.max(job1.duration)
        pre_cores = set(job1.cores)
        job1.execute(list(core_pre_job), self.alpha, self.core2host, self.tspeed)

        # 如果扩大并行化后，效率未提升，则恢复原状态
        if np.max(job1.duration) <= pre_duration:
            self.timelines[list(core_pre_job)] = job1.start_time + np.max(job1.duration)
        else:
            job1.execute(list(pre_cores), self.alpha, self.core2host, self.tspeed)
        return True


    """
    outputSolutionFromBlock: 输出各Job的执行情况
    outputSolutionFromCore: 输出各Core的执行情况
    """

    def outputSolutionFromBlock(self):
        job_solution = [""] * self.n
        self.block_per_core = [[] for _ in range(self.m)]
        for j in self.job_order:
            job = self.job_list[j]
            msg = job.outputSolutionFromBlock(self.core2host, self.block_per_core)
            job_solution[j] = msg
        return ('\n'.join(job_solution))

    def outputSolutionFromCore(self):

        for job in self.job_list:
            job.update_block_info()
        self.host2core = [[] for i in range(self.host_num)]
        for c, h in self.core2host.items():
            self.host2core[h].append(c)

        for i, cores in enumerate(self.host2core):

            max_t = 0
            msg = ""
            for core in cores:
                blocks = self.block_per_core[core]
                core_finish_time = blocks[-1].job_start_time + blocks[-1].finish_time
                msg += f"\tCore{core} has {len(blocks)} tasks and finishes at time {str(round(core_finish_time, 1)).zfill(5)}\n"
                if max_t < core_finish_time:
                    max_t = core_finish_time
                for binfo in blocks:
                    msg += str(binfo)
                msg += "\n"
            return f"Host{i} finishes at time {str(round(max_t, 1)).zfill(5)}\n"+msg


    def visualize(self, figsize=[24, 6], savefig=True):

        fig, ax = plt.subplots(figsize=figsize)
        y = ["core "+str(i) for i in range(self.m)]
        for job in self.job_list:
            duration = np.zeros(self.m)
            duration[job.cores] = job.duration
            start_time = job.start_time
            ax.barh(y, duration, 0.6, left=start_time)
            for i in range(self.m):
                if duration[i] == 0.:
                    continue
                ax.text(start_time + duration[i], y[i],
                        str(round(start_time + duration[i], 3)),
                        fontsize=10, fontweight='bold',
                        color='grey')
                blocks = job.block_per_core[job.cores.index(i)]
                ax.text(start_time + 0.2, i - 0.2,
                        f"{job.id} b {' '.join([str(i) for i in blocks])}",
                        fontsize=10, fontweight='bold',
                        color='white')
        # Remove x, y Ticks
        ax.xaxis.set_ticks_position('none')
        ax.yaxis.set_ticks_position('none')

        # Remove axes splines
        for s in ['top', 'right']:
            ax.spines[s].set_visible(False)

        # Add padding between axes and labels
        ax.xaxis.set_tick_params(pad=5)
        ax.yaxis.set_tick_params(pad=10)

        # Add x, y gridlines
        ax.grid(b=True, color='grey', axis='x',
                linestyle='-.', linewidth=1,
                alpha=0.8)

        ax.legend(loc="best")
        plt.xlabel("Timeline", fontdict=dict(fontsize=16))

        if savefig:
            plt.savefig(f"./output/vis{self.pic_num}.png")
        else:
            plt.show()
        self.pic_num+=1