import copy, time
import torch
import ray
from transformers import AutoTokenizer
from typing import Optional
import asyncio
import sys
import numpy as np

from mixserve.config import (
    ModelConfig,
    DisaggParallelConfig,
    ParallelConfig,
    CacheConfig,
    ContextStageSchedConfig,
    DecodingStageSchedConfig
)
from mixserve.models import get_model_op
from mixserve.utils import get_gpu_memory, set_random_seed, GB, MB
from mixserve.downloader import download_and_convert_weights
from mixserve import OfflineDriverLLM,SamplingParams

sys.path.append("..")	# Add the parent directory to the system path so that we can import structs.py
from structs import *
from .abstract_sut import SystemUnderTest, get_input_ids

BLOCK_SIZE = 16

class mixserveSUT(SystemUnderTest):
    def __init__(self,
                 worker_param:WorkerParam,
                 input_params:list[InputParam]
                 ):
        max_num_tokens = 0
        self.worker_param=worker_param
        for input_param in input_params:
            cur_num_tokens = input_param.batch_size * (input_param.input_len + input_param.output_len+1)
            max_num_tokens = max(max_num_tokens, cur_num_tokens)
        self.engine=OfflineDriverLLM(model_config=ModelConfig(
        model=worker_param.model_dir,
        tokenizer=None
         ),disagg_parallel_config=DisaggParallelConfig(
        context=ParallelConfig(
            tensor_parallel_size=worker_param.tp_world_size//2,
            pipeline_parallel_size=1
        ),
        decoding=ParallelConfig(
            tensor_parallel_size=worker_param.tp_world_size//2,
            pipeline_parallel_size=1
        )
    ),cache_config=CacheConfig(
        block_size=16,
        max_num_blocks_per_req=128,
        gpu_memory_utilization=0.9,
        cpu_swap_space=1.0
    ),context_sched_config=ContextStageSchedConfig(
        policy="fcfs",
        max_batch_size=160,
        max_tokens_per_batch=160*128*16
    ),decoding_sched_config=DecodingStageSchedConfig(
        policy="fcfs",
        max_batch_size=160,
        max_tokens_per_batch=160*128*16
    ),
    profile_first=False)
        
        
    async def initialization(self):
        await self.engine.async_init()  

    async def async_inference(
        self,
        input_param: InputParam
    ) -> tuple[torch.Tensor, Optional[torch.Tensor], list[str], float, list[float]]:
        input_ids = get_input_ids(self.worker_param.model_dir, input_param.input_len*input_param.batch_size)
        prompt_token_ids = input_ids.view(input_param.batch_size, input_param.input_len).tolist()
        bs=input_param.batch_size
        TPOT_SLO=[0.04 if i<bs/2 else 0.12 for i in range(bs)]
        sampling_params = SamplingParams(
        temperature=0.8, top_p=0.95, max_tokens=input_param.output_len, ignore_eos=True
        )
        
        outputs=  await self.engine.generate(
        prompt_token_ids=prompt_token_ids,
        sampling_params=sampling_params,
        TPOT_SLO=TPOT_SLO
        )
        prefill_time=0
        decode_time=[]
        if prompt_token_ids is not None and outputs is not None:
            predict_ids = []
            predict_texts = []
            
            for  step_outputs in outputs:
                ids = [step_output.new_token_id for step_output in step_outputs]
                texts = [step_output.new_token for step_output in step_outputs]
                prefill_time=np.sum(step_outputs[0].prefill_time)
                decode_time=[step_output.decode_time for step_output in step_outputs]
                predict_ids.append(ids)
                predict_texts.append(texts)
        
        return input_ids, predict_ids, predict_texts,prefill_time,decode_time
    
    def inference(
        self,
        input_param: InputParam
    ) -> tuple[torch.Tensor, Optional[torch.Tensor], list[str], float, list[float]]:
        input_ids = get_input_ids(self.worker_param.model_dir, input_param.input_len*input_param.batch_size)
        prompt_token_ids = input_ids.view(input_param.batch_size, input_param.input_len).tolist()
        sampling_params = SamplingParams(
        temperature=0.8, top_p=0.95, max_tokens=input_param.output_len, ignore_eos=True
        )
        bs=input_param.batch_size
        TPOT_SLO=[0.04 if i<bs/2 else 0.12 for i in range(bs)]
        outputs = asyncio.run( self.engine.generate(
        prompt_token_ids=prompt_token_ids,
        sampling_params=sampling_params,
        TPOT_SLO=TPOT_SLO
        ))
       
        prefill_time=0
        decode_time=[]
        if prompt_token_ids is not None and outputs is not None:
            predict_ids = []
            predict_texts = []
            
            for  step_outputs in outputs:
                ids = [step_output.new_token_id for step_output in step_outputs]
                texts = [step_output.new_token for step_output in step_outputs]
                prefill_time=np.sum(step_outputs[0].prefill_time)
                decode_time=[step_output.decode_time for step_output in step_outputs]
                predict_ids.append(ids)
                predict_texts.append(texts)

        
        return input_ids, predict_ids, predict_texts,prefill_time,decode_time
    