# Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved.

from ...utils.decorators.time_decorator import timer
from ...utils.prof.profiler import span_start, span_end, span_req
import mindspore as ms
import time
from mindformers.tools.debug_info import DetailedLatency


class Plugin:
    def __init__(self, generator_backend, cache_manager, input_manager, **kwargs):
        self.generator_backend = generator_backend
        self.model_wrapper = self.generator_backend.model_wrapper
        self.sampler = self.generator_backend.sampler
        self.cache_manager = cache_manager
        self.input_manager = input_manager
        self.count = 0
        self.input_len = 0
        self.detailed_latency = DetailedLatency()

    @timer.track_time_async('generate_token')
    def generate_token(self, input_metadata):
        self.count += 1
        if input_metadata.is_prefill:
            self.input_len = input_metadata.total_seq_num / input_metadata.batch_size
        # print(f"input_metadata: {input_metadata}", flush=True)
        prof = span_start("preprocess")
        self.detailed_latency.start_preprocess_timer()
        model_inputs, cached_idx, sampling_param, sampling_data, request_ids = self.preprocess(input_metadata)
        span_req(prof, request_ids)
        span_end(prof)
        prof = span_start("forward")
        """
        self.count += 1
        if self.count == 301:
            self.profiler = ms.Profiler(start_profile=False, output_path="profile")
            self.profiler.start()
        """
        self.detailed_latency.start_predict_timer(prefill=input_metadata.is_prefill)
        logits = self.generator_backend.forward(model_inputs)
        """
        if self.count == 320:
            self.profiler.stop()
            self.count = 0
        """
        span_end(prof)
        prof = span_start("sample")
        self.detailed_latency.start_postprocess_timer(prefill=input_metadata.is_prefill)
        next_tokens, _ = self.generator_backend.sample(logits, sampling_data, sampling_param)
        span_end(prof)
        prof = span_start("postprocess")
        sampling = True if sampling_param is not None else False
        eos_np, stop_generate, truncation_indices, token_indices, next_tokens = (
            self.postprocess(input_metadata, next_tokens, cached_idx, sampling))
        span_end(prof)
        self.detailed_latency.end_postprocess_timer(prefill=input_metadata.is_prefill)
        res_and_stop = (next_tokens, eos_np, stop_generate, truncation_indices, token_indices, request_ids)
        # time4 = time.time()
        # print(f"prepare: {(time2 - time1) * 1000}ms, forward: {(time3 - time2) * 1000}ms, post: {(time4 - time3) * 1000}ms", flush=True)
        if not input_metadata.is_prefill and self.count >= input_metadata.batch_max_output_lens[0]:
            print(f"batch: {input_metadata.batch_size} x [{self.input_len}, {input_metadata.batch_max_output_lens[0]}]", flush=True)
            self.detailed_latency.print_info()
            self.count = 0
            self.detailed_latency.clear()
        return res_and_stop