
from .vllm_hooker_base import vLLMHookerBase



class ModelRunnerExecuteHook(vLLMHookerBase):
    vllm_version = ("0.6.3", "0.6.3")
   
    def init(self):
        from vllm.worker.model_runner import ModelRunner
        
        def execute_model_maker(ori_func):
            def execute_model(this, model_input, kv_caches, *args, **kwargs):
                print("=== model execute start ===")
                ret = ori_func(this, model_input, kv_caches, *args, **kwargs)
                print("=== model execute end ===")
                return ret
            return execute_model
        
        
        self.do_hook([ModelRunner.execute_model], execute_model_maker)
 

class EngineAddRequestHook(vLLMHookerBase):
    vllm_version = ("0.6.3", "0.6.3")
   
    def init(self):
        from vllm.engine.llm_engine import LLMEngine
        from vllm.engine.async_llm_engine import RequestTracker
        
        def add_requests_maker(ori_func):
            def add_request(this, request_id, *args, **kwargs):
                print("=^_^= engine add request start =^_^=")
                ret = ori_func(this, request_id, *args, **kwargs)
                print("=^_^= engine add request end =^_^=")
                return ret
            return add_request
        
        self.do_hook([LLMEngine.add_request, RequestTracker.add_request], add_requests_maker)
         
      

class ServerGenerateHook(vLLMHookerBase):
    vllm_version = ("0.6.3", "0.6.3")
   
    def init(self):
        from vllm.engine.async_llm_engine import AsyncLLMEngine
        from vllm.utils import iterate_with_cancellation
        cache_gen_2_req_id = {}
        
        def engine_generate_maker(ori_func):
            def generate(this, prompt, sampling_params, request_id, *args, **kwargs):
                print("=^_^= engine generate start =^_^=", "req is", request_id)
                ret = ori_func(this, prompt, sampling_params, request_id, *args, **kwargs)
                cache_gen_2_req_id[id(ret)] = request_id
                print("=^_^= engine generate end =^_^=")
                return ret
            return generate
        
        self.do_hook([AsyncLLMEngine.generate], engine_generate_maker)
        
        def iterate_with_cancellation_maker(ori_func):
            async def iterate_with_cancellation(iterator, is_cancelled, *args, **kwargs):
                print("=^_^= iterate_with_cancellation start =^_^=")
                async for out in ori_func(iterator, is_cancelled, *args, **kwargs):
                    yield out
                print("=^_^= iterate_with_cancellation end =^_^=", "req is", cache_gen_2_req_id.pop(id(iterator), None))

            return iterate_with_cancellation
        
        self.do_hook([iterate_with_cancellation], iterate_with_cancellation_maker, pname="generate")
        
        
    
all_hookers = [ ModelRunnerExecuteHook, EngineAddRequestHook, ServerGenerateHook ]
