import onnxruntime as ort
import asyncio
from contextlib import asynccontextmanager
from typing import List, Callable, Any


class ONNXSessionPool:
    def __init__(self, model_path: str, pool_size: int = 2, providers: List[str] = None, provider_options: List[dict] = None):
        self.model_path = model_path
        self.pool_size = pool_size
        self.queue = asyncio.Queue(maxsize=pool_size)
        self.providers = providers or ["CPUExecutionProvider"]
        self.provider_options = provider_options or [{}] * len(self.providers)

    async def init(self):
        sess_options = ort.SessionOptions()
        sess_options.intra_op_num_threads = 1
        sess_options.execution_mode = ort.ExecutionMode.ORT_PARALLEL

        for _ in range(self.pool_size):
            session = ort.InferenceSession(
                self.model_path,
                sess_options,
                providers=self.providers,
                provider_options=self.provider_options
            )
            await self.queue.put(session)

    @asynccontextmanager
    async def get_session(self):
        session = await self.queue.get()
        try:
            yield session
        finally:
            await self.queue.put(session)


def compose_processors(processors: List[Callable[[Any], Any]]) -> Callable[[Any], Any]:
    def composed(input_data):
        result = input_data
        for fn in processors:
            result = fn(result)
        return result

    return composed


class ModelNode:
    def __init__(self, model_path: str, pool_size: int = 2, preprocess: Callable = None, postprocess: Callable = None):
        self.pool = ONNXSessionPool(model_path, pool_size)
        self.preprocess = preprocess or (lambda x: x)
        self.postprocess = postprocess or (lambda x: x)

    async def init(self):
        await self.pool.init()

    async def infer(self, input_data):
        async with self.pool.get_session() as session:
            loop = asyncio.get_event_loop()

            def get_input_names(s) -> List[str]:
                return [v.name for v in s.get_inputs()]

            def _run():
                inputs = self.preprocess(input_data)
                ort_inputs = dict(zip(get_input_names(session), inputs))
                outs = session.run(None, ort_inputs)
                out_input = {"infer_result": outs}
                return self.postprocess(out_input)

            return await loop.run_in_executor(None, _run)


class InferencePipeline:
    def __init__(self, stages: list[ModelNode]):
        self.stages = stages

    async def init(self):
        for stage in self.stages:
            await stage.init()

    async def infer(self, input_data):
        data = input_data
        for stage in self.stages:
            data = await stage.infer(data)
        return data
