import asyncio
import argparse
import base64
import concurrent.futures
import contextlib
import functools
import fastapi
import fastapi.encoders
import fastapi.exceptions
import fastapi.responses
import uvicorn
import uuid
import pandas as pd
import pydantic
import io
import json
import os
import logging
import starlette.exceptions
from typing_extensions import TypeAlias
from typing import (Any, AsyncGenerator, Awaitable, Dict,
                    Generic, Literal, Optional, TypeVar)

from paddlets import TSDataset
from paddlets.models.model_loader import load
from paddlets.models.forecasting.dl.paddle_base import PaddleBaseModel

logging.basicConfig(level=logging.INFO, format='[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')

logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)

model_manager: Optional["ModelManager"] = None
model_file_name = "checkpoints"


class BaseResponse(pydantic.BaseModel):
    logId: str


class ErrorResponse(BaseResponse):
    errorCode: int
    errorMsg: str


class NoErrorResponse(BaseResponse):
    errorCode: Literal[0]
    errorMsg: Literal["success"]


class CheckHealthResponse(NoErrorResponse):
    pass

ResultT = TypeVar("ResultT", bound=pydantic.BaseModel)


class ResultResponse(NoErrorResponse, Generic[ResultT]):
    result: ResultT


class InferRequest(pydantic.BaseModel):
    csv: str


class InferResult(pydantic.BaseModel):
    csv: str


"""
    PPTS 模型相关
"""

ModelState: TypeAlias = Literal["IDLE", "RUNNING"]

class ModelWrapper(object):
    def __init__(self, model: PaddleBaseModel, name: Optional[str]=None, pred_len: int=24) -> None:
        super().__init__()
        self._model = model
        self._name = name
        self._state: ModelState = "IDLE"
        self._pred_len = pred_len * -1

    @property
    def name(self) -> str:
        return self._name

    @property
    def state(self) -> ModelState:
        return self._state

    @state.setter
    def state(self, state: ModelState) -> None:
        self._state = state

    def pre_process(self, **kwargs: Any) -> TSDataset:
        df = kwargs["data"]
        target_df = df.iloc[0:self._pred_len]
        known_df = df

        target_dataset = TSDataset.load_from_dataframe(
            target_df,
            time_col='date',
            target_cols='OT',
            freq='1h'
        )
        target_dataset['OT'] = pd.to_numeric(target_dataset['OT'], downcast='float')

        known_cov_dataset = TSDataset.load_from_dataframe(
            known_df,
            time_col='date',
            known_cov_cols=['weekday', 'is_workday', 'is_holiday', 'holidays'],
            freq='1h'
        )
        known_cov_dataset['weekday'] = pd.to_numeric(known_cov_dataset['weekday'], downcast='float')
        known_cov_dataset['is_workday'] = pd.to_numeric(known_cov_dataset['is_workday'], downcast='float')
        known_cov_dataset['is_holiday'] = pd.to_numeric(known_cov_dataset['is_holiday'], downcast='float')
        known_cov_dataset['holidays'] = pd.to_numeric(known_cov_dataset['holidays'], downcast='float')

        target_cov_dataset = TSDataset.concat([target_dataset, known_cov_dataset])
        return target_cov_dataset

    def post_process(self, result_dataset: TSDataset) -> str:
        result_df = result_dataset.to_dataframe()
        result_df.index.name = 'date'

        output = io.StringIO()
        result_df.to_csv(output)
        return output.getvalue()

    def infer(self, **kwargs: Any) -> Any:
        target_cov_dataset = self.pre_process(**kwargs)
        result_dataset = self._model.predict(target_cov_dataset)
        return self.post_process(result_dataset)


class ModelManager(object):
    def __init__(self,
                 path_models: Dict[str, str],
                 *,
                 polling_interval: float = 0.1) -> None:
        super().__init__()
        self._path_models = path_models
        self._polling_interval = polling_interval
        self._models: Dict[str, ModelWrapper] = {}
        # TODO: Let each model run in a separate process.
        self._executor = concurrent.futures.ThreadPoolExecutor(max_workers=len(self._path_models))

    def new_models(self) -> None:
        self._models.clear()
        for name, path in self._path_models.items():
            model = self._new_model(name, path)
            self._models[model.name] = model

    async def infer(self, **kwargs: Any) -> Any:
        # TODO: Timeout
        while True:
            model = self._get_available_model()
            if not model:
                await asyncio.sleep(self._polling_interval)
                continue
            model.state = "RUNNING"
            # NOTE: We take an optimistic approach here, assuming that the model
            # is not broken even if inference fails.
            try:
                result = await self._new_inference_task(model, **kwargs)
            except:
                logger.warning(
                    "The model %r encountered an error during inference.",
                    model.name)
                raise
            finally:
                model.state = "IDLE"
            return result

    def close(self) -> None:
        self._executor.shutdown(wait=True)

    def _new_model(self, name: str, path: str) -> ModelWrapper:
        logger.info("Creating a new model from %s ...", path)
        model_meta = self._get_model_meta(path)
        real_model = load(path + "/" + model_file_name)
        model = ModelWrapper(real_model, name, model_meta["size"]["out_chunk_len"])
        logger.info("New model: %s", model.name)
        return model

    def _get_model_meta(self, path: str) -> Dict:
        with open(path + '/' + model_file_name + '_model_meta', 'r', encoding='utf-8') as file:
            file_content = file.read()
        model_meta = json.loads(file_content)
        return model_meta

    def _get_available_model(self) -> Optional[ModelWrapper]:
        if not self._models:
            raise RuntimeError("No living models")
        for model in self._models.values():
            if model.state == "IDLE":
                return model
        return None

    def _new_inference_task(self, model: ModelWrapper,
                            **kwargs: Any) -> Awaitable[Any]:
        return asyncio.get_running_loop().run_in_executor(
            self._executor, functools.partial(model.infer, **kwargs))


"""
    工具方法
"""
def generate_log_id() -> str:
    return str(uuid.uuid4())


def get_model_manager() -> ModelManager:
    if not model_manager:
        raise RuntimeError("The model manager is not initialized.")
    return model_manager


"""
    Web Server 生命周期函数（启动/停止事件）
"""
@contextlib.asynccontextmanager
async def app_lifespan(app: fastapi.FastAPI) -> AsyncGenerator[None, None]:
    logger.info("Web 服务启动中...")
    path_models: Dict[str, str] = {}
    for f in os.scandir("models"):
        if f.is_dir():
            path_models[f.name] = f.path

    global model_manager
    model_manager = ModelManager(path_models)
    loop = asyncio.get_running_loop()
    await loop.run_in_executor(None, model_manager.new_models)
    yield
    await loop.run_in_executor(None, model_manager.close)
    model_manager = None
    logger.info("Web 服务已停止!")

app = fastapi.FastAPI(lifespan=app_lifespan)


"""
    注册Handler
"""
@app.exception_handler(fastapi.exceptions.RequestValidationError)
async def validation_exception_handler(
    request: fastapi.Request, exc: fastapi.exceptions.RequestValidationError
) -> fastapi.responses.JSONResponse:
    json_compatible_data = fastapi.encoders.jsonable_encoder(
        ErrorResponse(
            logId=generate_log_id(),
            errorCode=422,
            errorMsg=json.dumps(exc.errors())))
    return fastapi.responses.JSONResponse(
        content=json_compatible_data, status_code=422)


@app.exception_handler(starlette.exceptions.HTTPException)
async def http_exception_handler(
    request: fastapi.Request,
    exc: starlette.exceptions.HTTPException) -> fastapi.responses.JSONResponse:
    json_compatible_data = fastapi.encoders.jsonable_encoder(
        ErrorResponse(
            logId=generate_log_id(),
            errorCode=exc.status_code,
            errorMsg=exc.detail))
    return fastapi.responses.JSONResponse(
        content=json_compatible_data, status_code=exc.status_code)


"""
    REST接口：健康检测
"""
@app.get("/health", operation_id="checkHealth", status_code=200)
async def check_health() -> CheckHealthResponse:
    return CheckHealthResponse(
        logId=generate_log_id(), errorCode=0, errorMsg="success")


"""
    REST接口：时序预测
"""
@app.post(
    "/time-series-forecasting",
    operation_id="infer",
    responses={422: {
        "model": ErrorResponse
    }})
async def infer(request: InferRequest) -> ResultResponse[InferResult]:
    logger.info("PPTS时序预测")
    model_manager = get_model_manager()

    try:
        file_bytes = base64.b64decode(request.csv)
        with io.StringIO(file_bytes.decode("utf-8")) as f:
            df = pd.read_csv(f)

        output_csv = await model_manager.infer(data=df)

        output_csv = base64.b64encode(output_csv.encode("utf-8")).decode("ascii")
        return ResultResponse(
            logId=generate_log_id(),
            errorCode=0,
            errorMsg="success",
            result=InferResult(csv=output_csv))

    except Exception as e:
        logger.exception(e)
        raise fastapi.HTTPException(
            status_code=500, detail="Internal server error")


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--host", type=str, default="0.0.0.0")
    parser.add_argument("--port", type=int, default=8098)
    args = parser.parse_args()

    uvicorn.run(app, host=args.host, port=args.port, log_level=logging.INFO)


