import sglang as sgl
import torch
import os 
import sys
from typing import Optional
sys.path.append("..")	# Add the parent directory to the system path so that we can import structs.py
from structs import *
import random
from .abstract_sut import SystemUnderTest, get_input_ids

class sglangSUT(SystemUnderTest):
    def __init__(self,
                 worker_param:WorkerParam,
                 input_params:list[InputParam]
                 ):
        max_num_tokens = 0
        for input_param in input_params:
            cur_num_tokens = input_param.batch_size * (input_param.input_len + input_param.output_len+1)
            max_num_tokens = max(max_num_tokens, cur_num_tokens)
        self.worker_param = worker_param
        self.engine=sgl.Engine(
            model_path=worker_param.model_dir,
            tp_size=worker_param.tp_world_size,
            max_total_tokens=max_num_tokens,
            pp_size=1,
            max_micro_batch_size=worker_param.max_req_num,
            disable_cuda_graph=True
            )

    
    def inference(
        self,
        input_param: InputParam
    ) -> tuple[torch.Tensor, Optional[torch.Tensor], list[str], float, list[float]]:
        input_ids = get_input_ids(self.worker_param.model_dir, input_param.input_len*input_param.batch_size)
        prompt_token_ids = input_ids.view(input_param.batch_size, input_param.input_len).tolist()
        sampling_params = {"temperature": 0.8, "top_p": 0.95,'ignore_eos':True,'max_new_tokens':input_param.output_len}
        stream_gen, get_prefill_time, get_decode_time = self.engine.generate( input_ids = prompt_token_ids, sampling_params = sampling_params,stream=True, timing=True)

        request_outputs = []
        for chunk in stream_gen:
            request_outputs.append(chunk)

        prefill_time = get_prefill_time()
        decode_time = get_decode_time()
        predict_ids = [
            [random.randint(0, 9999) for _ in range(request_output['meta_info']['completion_tokens'])]
            for request_output in request_outputs[-input_param.batch_size:]
        ]
        predict_texts = [request_output['text'] for request_output in request_outputs[-input_param.batch_size:]]
        return input_ids, predict_ids, predict_texts,prefill_time,decode_time
    