# Copyright (c) 2025, HUAWEI CORPORATION. All rights reserved.
import json
import math
import functools
import operator
import atexit
from abc import ABC, abstractmethod
from dataclasses import dataclass
from pathlib import Path

import pandas as pd
import numpy as np

from mindspeed_rl.utils.loggers import Loggers
from mindspeed_rl.trainer.auto_parallel.system_config import SystemConfig
from mindspeed_rl.trainer.auto_parallel.launch import TaskType
from mindspeed_rl.trainer.auto_parallel.search_space import ParallelConfig


logger = Loggers('cost_model')


@dataclass
class PipelineParallelParas:
    num_stages: int
    vpp: int
    fwd_durations: float
    bwd_durations: float
    num_microbatches: int
    comm_matrix: list


class Sheet:
    def __init__(self, config, worker):
        self.sheet = pd.DataFrame(columns=['config', 'memory', 'E2Etime'])
        self.path = "{}/{}_modle_results.csv".format(SystemConfig.rl_config.auto_parallel.work_dir, worker)
        self.config = config
        self.worker = worker
        atexit.register(lambda: Sheet.save(self.sheet, self.path))

    def update(self, parallel_config, memory, e2etime):
        if not (self.sheet['config'] == str(parallel_config)).any():
            self.insert(parallel_config, memory, e2etime)
            return
        self.sheet.loc[self.sheet['config'] == str(parallel_config), 'memory'] = memory
        self.sheet.loc[self.sheet['config'] == str(parallel_config), 'E2Etime'] = e2etime

    def insert(self, parallel_config, memory, e2etime):
        self.sheet.loc[len(self.sheet)] = [str(parallel_config), memory, e2etime]

    def get_minimum_e2etime_row(self):
        return self.sheet.loc[self.sheet['E2Etime'].idxmin()]
    
    def set_path(self, path):
        self.path = path

    @staticmethod
    def save(data_frame, path):
        data_frame.to_csv(path, index=False, encoding='utf-8')
    

class PipelineCostModel:
    def __init__(self, config, parallel_config: ParallelConfig, task_type: TaskType):
        self.config = config
        self.parallel_config = parallel_config
        self.task_type = task_type
        self.forward_only = False

    def _get_module_info(self, file_path, key, sub_key=None):
        try:
            with open(file_path, 'r') as file:
                content = json.loads(file.read())
                return content[key] if sub_key is None else content[key][sub_key]
        except FileNotFoundError:
            return float('inf')
        except KeyError:
            return float('inf')

    def _get_module_time(self, profile_file, module_name):
        module_info = self._get_module_info(profile_file, module_name)
        fwd_time = module_info.get('time', float('inf'))
        if self.forward_only:
            return fwd_time, float('inf')

        forward_step_time = self._get_module_info(profile_file, 'forward_step_time')
        backward_step_time = self._get_module_info(profile_file, 'backward_step_time')
        return fwd_time, fwd_time / forward_step_time * backward_step_time

    def _get_chunks_time(self, profile_file):
        pp = self.parallel_config.pipeline_model_parallel_size
        vpp = self.parallel_config.virtual_pipeline_model_parallel_size
        num_layers_per_vpp = SystemConfig.actor_config.num_layers // pp // vpp
        num_chunks = pp * vpp

        forward_time_each_chunk = []
        backward_time_each_chunk = []

        if num_chunks == 1:
            fwd_time, bwd_time = 0, 0
            embedding = self._get_module_time(profile_file, 'embedding')
            fwd_time += embedding[0]
            bwd_time += embedding[1]
            transformer = self._get_module_time(profile_file, '0')
            fwd_time += transformer[0] * num_layers_per_vpp
            bwd_time += transformer[1] * num_layers_per_vpp
            final_norm = self._get_module_time(profile_file, 'final_layernorm')
            fwd_time += final_norm[0]
            bwd_time += final_norm[1]
            output_layer = self._get_module_time(profile_file, 'output_layer')
            fwd_time += output_layer[0]
            bwd_time += output_layer[1]

            forward_time_each_chunk.append(fwd_time)
            backward_time_each_chunk.append(bwd_time)
        else:
            for chunk_id in range(num_chunks):
                fwd_time, bwd_time = 0, 0
                if chunk_id == 0:
                    embedding = self._get_module_time(profile_file, 'embedding')
                    fwd_time += embedding[0]
                    bwd_time += embedding[1]
                    transformer = self._get_module_time(profile_file, '0')
                    fwd_time += transformer[0] * num_layers_per_vpp
                    bwd_time += transformer[1] * num_layers_per_vpp
                elif chunk_id == num_chunks - 1:
                    transformer = self._get_module_time(profile_file, '0')
                    fwd_time += transformer[0] * num_layers_per_vpp
                    bwd_time += transformer[1] * num_layers_per_vpp
                    final_norm = self._get_module_time(profile_file, 'final_layernorm')
                    fwd_time += final_norm[0]
                    bwd_time += final_norm[1]
                    output_layer = self._get_module_time(profile_file, 'output_layer')
                    fwd_time += output_layer[0]
                    bwd_time += output_layer[1]
                else:
                    transformer = self._get_module_time(profile_file, '0')
                    fwd_time += transformer[0] * num_layers_per_vpp
                    bwd_time += transformer[1] * num_layers_per_vpp

                forward_time_each_chunk.append(fwd_time)
                backward_time_each_chunk.append(bwd_time)

        return forward_time_each_chunk, backward_time_each_chunk

    def _get_prev_task_time(self, pre_task_info):
        """# 获取当前任务的上一个任务以及依赖的前序任务的结束时间"""
        task_start_time, task_list, pp_stage_id, task_idx, chunk_stage_map, comp_order, model_chunk_times, comm_time_matrix = pre_task_info
        current_task = task_list[pp_stage_id][task_idx]
        previous_task = task_list[pp_stage_id][task_idx - 1]
        previous_task_name, _ = previous_task.split('-')
        stage_id_previous_task = chunk_stage_map[previous_task_name]
        chunk_position = comp_order.index(previous_task_name)
        # 前一个任务计算完成后的通信时间
        if chunk_position < len(comp_order) - 1:
            stage_id_next = chunk_stage_map[comp_order[chunk_position + 1]]
            comm_time = comm_time_matrix[stage_id_previous_task][stage_id_next]
        else:
            comm_time = 0.01
        # 同一个stage上，前一个任务完成时间
        end_time_previous_task = (task_start_time[previous_task]
                                  + model_chunk_times[previous_task]
                                  + comm_time)

        # 同一个micro batch id，在前一个model chunk上依赖任务的计算时间
        chunk_name, cur_mb_index = current_task.split('-')
        chunk_position = comp_order.index(chunk_name)
        if chunk_position > 0:
            previous_chunk = comp_order[chunk_position - 1]
            dependent_task = previous_chunk + '-' + cur_mb_index
            comm_time = comm_time_matrix[chunk_stage_map[previous_chunk]][chunk_stage_map[chunk_name]]
            end_time_dependent_task = (task_start_time[dependent_task]
                                       + model_chunk_times[dependent_task]
                                       + comm_time)
            completed_flag = task_start_time[previous_task] > 0 and task_start_time[dependent_task] > 0
        else:
            end_time_dependent_task = 0.1
            completed_flag = task_start_time[previous_task] > 0
        return end_time_previous_task, end_time_dependent_task, completed_flag

    def get_iteration_time(self, profile_file):
        forward_time_chunks, backward_time_chunks = self._get_chunks_time(profile_file)
        if self.forward_only:
            iteration_time, _ = self.pipeline_costmodel(forward_time_chunks, [])
        else:
            iteration_time, _ = self.pipeline_costmodel(forward_time_chunks, backward_time_chunks)
        return iteration_time

    def pipeline_costmodel(self, fwd_time_chunks, bwd_time_chunks):
        def get_send_recv_time(shape: list):
            data_size = functools.reduce(operator.mul, shape) * 2 / SystemConfig.unit_gb
            return (data_size / SystemConfig.p2p_band_width) * 1e3

        seq_length = int(SystemConfig.actor_config.seq_length) + int(SystemConfig.generate_config.sampling_config.max_tokens)
        batch_size = self.parallel_config.micro_batch_size
        hidden_size = SystemConfig.actor_config.hidden_size

        send_recv_time = get_send_recv_time([seq_length, batch_size, hidden_size])
        comm_matrix = [
            [send_recv_time] * self.parallel_config.pipeline_model_parallel_size
            for _ in range(self.parallel_config.pipeline_model_parallel_size)
        ]
        for i in range(self.parallel_config.pipeline_model_parallel_size):
            comm_matrix[i][i] = 0.

        global_batch_size = SystemConfig.actor_config.global_batch_size * SystemConfig.rl_config.n_samples_per_prompt
        num_microbatch = global_batch_size // self.parallel_config.data_parallel_size // batch_size
        paras = PipelineParallelParas(
            num_stages=self.parallel_config.pipeline_model_parallel_size,
            vpp=self.parallel_config.virtual_pipeline_model_parallel_size,
            fwd_durations=fwd_time_chunks,
            bwd_durations=bwd_time_chunks,
            num_microbatches=num_microbatch,
            comm_matrix=comm_matrix
        )

        if self.forward_only:
            scheduler_1f1b = self.get_schedule_fwd_only(paras)
            e2e_time_1f1b, stage_start_time = self.time_model_fwd_only(paras, scheduler_1f1b)
        else:
            scheduler_1f1b = self.get_schedule_1f1b(paras)
            e2e_time_1f1b, stage_start_time = self.time_model_nfmb(paras, scheduler_1f1b)
        return e2e_time_1f1b, stage_start_time

    def time_model_nfmb(self, paras, stage_list):
        # 给定一个调度序列，计算端到端时间
        num_pp_stages = paras.num_stages
        num_mb = paras.num_microbatches
        comm_matrix = paras.comm_matrix
        vpp = paras.vpp
        # vpp chunk放置顺序
        chunk_placement = list(range(num_pp_stages)) * vpp + list(range(num_pp_stages - 1, -1, -1)) * vpp
        # Fwd和Bwd执行顺序
        fwd_bwd_comp_order = ([f'F_{i}' for i in range(num_pp_stages * vpp)] +
                              [f'B_{i}' for i in range(num_pp_stages * vpp - 1, -1, -1)])
        chunk_stage_map = dict(zip(fwd_bwd_comp_order, chunk_placement))

        # 初始化
        fwd_bwd_list = ([f"F_{j}-{i}" for i in range(num_mb) for j in range(num_pp_stages * vpp)]
                        + [f"B_{j}-{i}" for i in range(num_mb) for j in range(num_pp_stages * vpp)])
        values = [0 for _ in range(num_pp_stages * vpp * num_mb * 2)]
        start_time = dict(zip(fwd_bwd_list, values))
        fwd_bwd_durations = dict()
        for j in range(num_pp_stages * vpp):
            for i in range(num_mb):
                fwd_bwd_durations[f"F_{j}-{i}"] = paras.fwd_durations[j]
                fwd_bwd_durations[f"B_{j}-{i}"] = paras.bwd_durations[j]

        start_time[f"F_{0}-{0}"] = 0.1
        for s in range(num_pp_stages - 1):
            start_time[f"F_{s + 1}-{0}"] = start_time[f"F_{s}-{0}"] + paras.fwd_durations[s] + comm_matrix[s][s + 1]

        

        # 更新计算时间
        begin_up = [1] * num_pp_stages
        remaining = [num_mb * vpp * 2 - begin_up[p] for p in range(num_pp_stages)]
        remaining_flag = True
        count = 0
        while remaining_flag:
            ids_old = []
            ids_new = []
            for s in range(num_pp_stages):
                ids_old.append(remaining[s])
                if remaining[s]:
                    microbatch_idx = len(stage_list[0]) - remaining[s]
                    (end_time_prev_task_same_stage,
                     end_time_dependent_task_same_microbatch,
                     job_flag) = self._get_prev_task_time((start_time, stage_list, s, microbatch_idx, chunk_stage_map,
                                                          fwd_bwd_comp_order, fwd_bwd_durations, comm_matrix))

                    if job_flag:
                        start_time[stage_list[s][microbatch_idx]] = max(end_time_prev_task_same_stage,
                                                                        end_time_dependent_task_same_microbatch)
                        remaining[s] = remaining[s] - 1

                ids_new.append(remaining[s])

                if all(item == 0 for item in remaining):
                    remaining_flag = False

            if ids_old == ids_new:
                count += 1
                if count == 3:
                    print("stage list is locked")
                    start_time[f'B_0-{num_mb - 1}'] = 1e7
                    break

        e2e_time = start_time[f'B_0-{num_mb - 1}'] + paras.bwd_durations[-1]
        stage_start_time = [[start_time[job_name] for job_name in stage_list[s]] for s in range(num_pp_stages)]

        return e2e_time, stage_start_time

    def get_schedule_1f1b(self, paras):
        pp_stages = paras.num_stages
        vpp = paras.vpp
        num_microbatches = paras.num_microbatches
        computation_placement = list(range(pp_stages * vpp)) + list(range(pp_stages * vpp - 1, -1, -1))

        # Fwd和Bwd执行顺序
        fwd_bwd_order = ([f'F_{i}' for i in range(pp_stages * vpp)] +
                         [f'B_{i}' for i in range(pp_stages * vpp - 1, -1, -1)])

        # 根据1F1B策略生成每个stage上的调度顺序
        def get_stage_list(fwd_seq, bwd_seq, num_advanced):
            stage_order = []
            n = len(fwd_seq)
            # 判断序列中micro batch数目是否少于warm-up所需数目
            num_advanced = min(n, num_advanced)
            for idx in range(n):
                if idx < num_advanced:
                    stage_order.append(fwd_seq[idx])
                else:
                    stage_order.append(fwd_seq[idx])
                    stage_order.append(bwd_seq[idx - num_advanced])
                if idx == n - 1:
                    for i in range(num_advanced):
                        stage_order.append(bwd_seq[i - num_advanced])

            return stage_order

        def get_stage_schedule(all_jobs_array, comp_placement, num_stages, vpp):
            stage_job_list = []
            for s in range(num_stages):
                stage_chunk_id = [index for index, element in enumerate(comp_placement) if (element % num_stages) == s]

                # 计算warmup的micro batch的数目
                if vpp > 1:
                    warmup = num_stages * (vpp + 1) - 2 * (s + 1)
                else:
                    warmup = num_stages - s - 1

                fwds = all_jobs_array[stage_chunk_id[0:vpp]]
                fwd_list = np.concatenate([fwds[:, index:index + num_stages].flatten()
                                           for index in range(0, np.size(all_jobs_array, 1), num_stages)])
                bwds = all_jobs_array[stage_chunk_id[vpp:]]
                bwd_list = np.concatenate([bwds[:, index:index + num_stages].flatten()
                                           for index in range(0, np.size(all_jobs_array, 1), num_stages)])
                stage_s_list = get_stage_list(fwd_list, bwd_list, warmup)
                stage_job_list.append(stage_s_list)
            return stage_job_list

        all_jobs = np.array([[s + f'-{i}' for i in range(num_microbatches)] for s in fwd_bwd_order])
        stage_list = get_stage_schedule(all_jobs, computation_placement, pp_stages, vpp)
        return stage_list

    def time_model_fwd_only(self, paras, stage_list):
        # 给定一个调度序列，计算端到端时间
        num_pp_stages = paras.num_stages
        num_mb = paras.num_microbatches
        comm_matrix = paras.comm_matrix
        vpp = paras.vpp
        # vpp chunk放置顺序
        chunk_placement = list(range(num_pp_stages)) * vpp
        # Fwd和Bwd执行顺序
        fwd_comp_order = [f'F_{i}' for i in range(num_pp_stages * vpp)]
        chunk_stage_map = dict(zip(fwd_comp_order, chunk_placement))

        # 初始化
        fwd_lists = [
            f"F_{j}-{i}" 
            for i in range(num_mb) 
            for j in range(num_pp_stages * vpp)
        ]
        values = [0 for _ in range(num_pp_stages * vpp * num_mb)]
        start_time = dict(zip(fwd_lists, values))
        fwd_durations = dict()
        for j in range(num_pp_stages * vpp):
            for i in range(num_mb):
                fwd_durations[f"F_{j}-{i}"] = paras.fwd_durations[j]

        start_time[f"F_{0}-{0}"] = 0.1
        for s in range(num_pp_stages - 1):
            start_time[f"F_{s + 1}-{0}"] = start_time[f"F_{s}-{0}"] + paras.fwd_durations[s] + comm_matrix[s][s + 1]

        # 更新计算时间
        begin_up = [1] * num_pp_stages
        remaining = [num_mb * vpp - begin_up[p] for p in range(num_pp_stages)]
        remaining_flag = True
        count = 0
        while remaining_flag:
            ids_old = []
            ids_new = []
            for s in range(num_pp_stages):
                ids_old.append(remaining[s])
                if remaining[s]:
                    microbatch_idx = len(stage_list[0]) - remaining[s]
                    (end_time_prev_task_same_stage,
                     end_time_dependent_task_same_microbatch,
                     job_flag) = self._get_prev_task_time((start_time, stage_list, s, microbatch_idx, 
                                                          chunk_stage_map, fwd_comp_order, fwd_durations, comm_matrix))

                    if job_flag:
                        start_time[stage_list[s][microbatch_idx]] = max(end_time_prev_task_same_stage,
                                                                        end_time_dependent_task_same_microbatch)
                        remaining[s] = remaining[s] - 1

                ids_new.append(remaining[s])

                if all(item == 0 for item in remaining):
                    remaining_flag = False

            if ids_old == ids_new:
                count += 1
                if count == 3:
                    print(f"stage list is locked")
                    start_time[f'B_0-{num_mb - 1}'] = 1e7
                    break

        e2e_time = start_time[fwd_lists[-1]] + paras.fwd_durations[-1]
        stage_start_time = [[start_time[job_name] for job_name in stage_list[s]] for s in range(num_pp_stages)]

        return e2e_time, stage_start_time

    def get_schedule_fwd_only(self, paras):
        # generate 1f1b schedule list
        pp_stages = paras.num_stages
        vpp = paras.vpp
        num_microbatches = paras.num_microbatches
        computation_placement = list(range(pp_stages * vpp))

        # Fwd和Bwd执行顺序
        fwd_order = [f'F_{i}' for i in range(pp_stages * vpp)]

        # 根据1F1B策略生成每个stage上的调度顺序
        def get_stage_list(fwd_seq, bwd_seq, num_advanced):
            stage_order = []
            n = len(fwd_seq)
            for idx in range(n):
                if idx < num_advanced:
                    stage_order.append(fwd_seq[idx])
                else:
                    stage_order.append(fwd_seq[idx])
                    stage_order.append(bwd_seq[idx - num_advanced])
                if idx == n - 1:
                    for i in range(num_advanced):
                        stage_order.append(bwd_seq[i - num_advanced])

            return stage_order

        def get_stage_schedule(all_jobs_array, comp_placement, num_stages, vpp):
            stage_job_list = []
            for s in range(num_stages):
                stage_chunk_id = [index for index, element in enumerate(comp_placement) if (element % num_stages) == s]
                fwds = all_jobs_array[stage_chunk_id[0:vpp]]
                fwd_list = np.concatenate(
                    [
                        fwds[:, index: index + num_stages].flatten()
                        for index in range(0, np.size(all_jobs_array, 1), num_stages)
                    ]
                )
                stage_job_list.append(fwd_list.tolist())
            return stage_job_list

        all_jobs = np.array([[s + f'-{i}' for i in range(num_microbatches)] for s in fwd_order])
        stage_list = get_stage_schedule(all_jobs, computation_placement, pp_stages, vpp)
        return stage_list


class CostModel(ABC):
    def __init__(self, config, parallel_config, task_type, profile_path=None):
        self.config = config
        self.parallel_config = parallel_config
        self.task_type = task_type
        self.profile_path = profile_path
        self.unit_gb = 1024 ** 3

    def get_memory(self):
        raise RuntimeError("Should be implemented by subclass")

    def get_time(self, prof_files: list):
        raise RuntimeError("Should be implemented by subclass")
    
    def compute_static_memory(self, rank=0):
        N = self.compute_params()
        DP = self.parallel_config.data_parallel_size

        actor_config = SystemConfig.actor_config
        if not actor_config.bf16:
            raise AssertionError('DataType not support')

        if actor_config.reuse_fp32_param and actor_config.use_distributed_optimizer:
            params = 2 * N
            gradient = 4 * N
            optimizer = 10 * N / DP
        elif actor_config.reuse_fp32_param:
            params = 2 * N
            gradient = 4 * N
            optimizer = 10 * N
        elif actor_config.use_distributed_optimizer:
            params = 2 * N
            gradient = 4 * N
            optimizer = 12 * N / DP
        else:
            params = 2 * N
            gradient = 4 * N
            optimizer = 12 * N

        return params, gradient, optimizer

    def compute_params(self, rank=0):
        pp = self.parallel_config.pipeline_model_parallel_size
        tp = self.parallel_config.tensor_model_parallel_size
        num_layers = SystemConfig.actor_config.num_layers

        try:
            with open(self.profile_path, 'r') as file:
                content = json.loads(file.read())
                tp_old = content['parallel_config']['tensor_model_parallel_size']
                module_params = content['module_params']
        except BaseException as e:
            logger.error(e)
            return float('inf')
        
        embedding_params = module_params.get('embedding', float('inf')) * tp_old / tp
        transformer_params = module_params.get('self_attention', float('inf')) * tp_old / tp + \
                             module_params.get('mlp', float('inf')) * tp_old / tp
        output_layer_params = module_params.get('output_layer', float('inf')) * tp_old / tp

        if pp == 1:
            total_params = embedding_params + transformer_params * num_layers + output_layer_params
        else:
            if rank == 0:
                total_params = embedding_params + transformer_params * (num_layers // pp)
            elif rank == pp - 1:
                total_params = transformer_params * (num_layers // pp) + output_layer_params
            else:
                total_params = transformer_params * (num_layers // pp)

        return int(total_params)
    
    @staticmethod
    def read_profile(file_path, key):
        try:
            with open(file_path, 'r') as file:
                data = json.load(file)
                value = data.get(key, None)
        except FileNotFoundError:
            logger.error(f"not find {file_path}", flush=True)
            value = None
        except KeyError:
            value = None
        return value


class GenerateCostModel(CostModel):
    def get_memory(self):
        params, _, _ = self.compute_static_memory()
        return params

    def get_time(self, prof_files: list):
        prof_path = prof_files[-1]
        try:
            with open(prof_path, 'r', encoding='utf-8') as file:
                data = json.load(file)
                elapsed_time = data['rollout_time']
        except FileNotFoundError:
            logger.error(f"not find {prof_path}", flush=True)
            elapsed_time = float('inf')
        return elapsed_time


class ActorCostModel(CostModel, PipelineCostModel):
    def __init__(self, config, parallel_config, profile_path):
        CostModel.__init__(self, config, parallel_config, TaskType.UPDATE, profile_path)
        PipelineCostModel.__init__(self, config, parallel_config, TaskType.UPDATE)
        self.forward_only = False

    def get_memory(self):
        params, gradient, optimizer = self.compute_static_memory()
        sliced_config = self.parallel_config.slice_dims(SystemConfig.rl_config.actor_resource.num_npus)
        coeff = 1
        if not Path(SystemConfig.actor_profile_path_fmt.format(sliced_config)).exists():
            coeff = sliced_config.micro_batch_size
            sliced_config = sliced_config.slice_mbs()
        
        # 一层transformer产生的激活值
        transformer = self.read_profile(SystemConfig.actor_profile_path_fmt.format(sliced_config), '0')
        if not transformer:
            return float('inf')
        
        transformer_active_mem = transformer.get('memory', float('inf'))
        active_mem = coeff * transformer_active_mem * self.unit_gb

        pp = self.parallel_config.pipeline_model_parallel_size
        vpp = self.parallel_config.virtual_pipeline_model_parallel_size
        num_layers = SystemConfig.actor_config.num_layers

        if vpp == 1:
            total_active_mem = active_mem * (num_layers // pp) * pp
        else:
            active_mem_chunk = active_mem * (num_layers // pp // vpp)
            total_active_mem = active_mem_chunk * (pp * vpp + (pp - 1))

        peak_memory = params + optimizer + total_active_mem + gradient
        logger.info(f"config: {self.parallel_config} \n"
                    f"params: {params / self.unit_gb} \n"
                    f"optimizer: {optimizer / self.unit_gb} \n"
                    f"gradient: {gradient / self.unit_gb} \n"
                    f"active_mem: {active_mem / self.unit_gb} \n"
                    f"peak_memory: {peak_memory / self.unit_gb}")
        return peak_memory

    def get_time(self, prof_files: list):
        return self.get_iteration_time(prof_files[0])


class ReferenceCostModel(CostModel, PipelineCostModel):
    def __init__(self, config, parallel_config, profile_path):
        CostModel.__init__(self, config, parallel_config, TaskType.REFERENCE, profile_path)
        PipelineCostModel.__init__(self, config, parallel_config, TaskType.REFERENCE)
        self.forward_only = True

    def get_memory(self):
        params, _, _ = self.compute_static_memory() # byte
        if math.isinf(params):
            return float('inf')
            
        sliced_config = self.parallel_config.slice_dims(SystemConfig.rl_config.actor_resource.num_npus)
        coeff = 1
        if not Path(SystemConfig.ref_profile_path_fmt.format(sliced_config)).exists():
            coeff = self.parallel_config.micro_batch_size
            sliced_config = self.parallel_config.slice_dims(
                SystemConfig.rl_config.actor_resource.num_npus,
                True
            )
        
        active_mem = coeff * self.read_profile(SystemConfig.ref_profile_path_fmt.format(sliced_config), 'forward_step_mem') * self.unit_gb
        return params + active_mem

    def get_time(self, prof_files: list):
        return self.get_iteration_time(prof_files[0])