import argparse
import os
import sys
from contextlib import asynccontextmanager

import jieba
import uvicorn
from anyio import CapacityLimiter
from anyio.lowlevel import RunVar
from fastapi import FastAPI, Request
from loguru import logger
from openai import OpenAIError, RateLimitError, AuthenticationError
from starlette.responses import StreamingResponse
from volcenginesdkarkruntime._exceptions import ArkError

data = os.getcwd()
sys.path.append(data)
sys.path.append(data[:-len(data.split('/')[-1])])

from ai_engine.car_wrap.service.chat.chat_service import ChatService
from ai_engine.car_wrap.service.extraction.extraction_service import ExtractionService
from ai_engine.common.ai_common import trace_context
from ai_engine.load_config import SysConfig
from ai_engine.core.model import result
from ai_engine.core.model.base import RequestHeader
from ai_engine.core.model.chat import CompletionRequest, ExtractionRequest
from ai_engine.core.model.result_code import ERROR_AI_DEFAULT_ERROR, ERROR_AI_RATE_LIMIT_ERROR, ERROR_DEFAULT_ERROR, \
    ERROR_AI_ACCOUNT_ERROR



@asynccontextmanager
async def lifespan(sys_app: FastAPI):
    log_format = ("<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> "
                  "| <level>{level: <8}</level> "
                  "| <green>{extra[trace_id]:}</green> "
                  "| <cyan>{name}</cyan>:<cyan>{function}</cyan><cyan>[{line}]</cyan> : <level>{message}</level>")
    logger.remove()
    logConfig = SysConfig.log
    logger.add(sink=logConfig["path"],
               level=logConfig["level"],
               rotation="200 MB",
               format=log_format,
               filter=_log_filter)
    logger.add(sys.stderr,
               format=log_format,
               filter=_log_filter)
    # 设置FastAPI默认的同步方法的线程数（同时处理的线程数，默认40，其余排队）
    RunVar("_default_thread_limiter").set(CapacityLimiter(200))
    jiebaConfig = SysConfig.jieba
    # 加载自定义词典
    jieba.load_userdict(jiebaConfig["path"])
    logger.info("Oula Engine RESTful API server started")

    connect = SysConfig.vector_store
    logger.info("Oula Engine vector：" + connect["host"])
    # 开启调试模式
    # set_debug(True)
    yield


app = FastAPI(lifespan=lifespan)


def _log_filter(record):
    try:
        """日志拦截器"""
        trace_id = trace_context.get()
        if trace_id is None:
            trace_id = "-"
        record["extra"] = {"trace_id": trace_id}
        return record["extra"]
    except Exception as e:
        logger.error("日志拦截器处理异常" + str(e))
        record["extra"] = {"trace_id": "-"}
        return record["extra"]


# 全局错误
@app.exception_handler(Exception)
def exception_handler(request: Request, exc: Exception):
    error_class = type(exc)
    error_class_name = error_class.__name__
    if (isinstance(exc, ArkError)
            or isinstance(exc, OpenAIError)):
        errorBase = ERROR_AI_DEFAULT_ERROR
        if error_class is AuthenticationError:
            errorBase = ERROR_AI_ACCOUNT_ERROR
        elif error_class is RateLimitError:
            errorBase = ERROR_AI_RATE_LIMIT_ERROR
        return result.error(errorBase, message_append=error_class_name + "," + str(exc))

    return result.error(ERROR_DEFAULT_ERROR, message_append=error_class_name + "," + str(exc))


# 全局过滤器
@app.middleware("http")
def api_global_filter(request: Request, call_next):
    """
      拦截器
    """

    response = call_next(request)
    return response


@app.post('/v1/chat/completions', summary="基于知识库生成文本")
def chat_completions(request: CompletionRequest, http_request: Request):
    header = RequestHeader(http_request=http_request, request_id=request.request_id, model_name=request.model_name)
    service = ChatService(header.get_model_kwargs())
    logger.info("基于知识库生成文本:" + request.json(ensure_ascii=False))
    generator = service.completions(request)
    if request.stream:
        return StreamingResponse(generator, media_type="text/event-stream")
    else:
        return generator


@app.post('/v1/extraction', summary="实体抽取")
def extraction(request: ExtractionRequest, http_request: Request):
    header = RequestHeader(http_request=http_request, request_id=request.request_id, model_name=request.model_name)
    service = ExtractionService(header.get_model_kwargs())
    logger.info("实体抽取:" + request.json(ensure_ascii=False))
    generator = service.extraction(request)
    if request.stream:
        return StreamingResponse(generator, media_type="text/event-stream")
    else:
        return generator


if __name__ == '__main__':
    parser = argparse.ArgumentParser(
        description="CAR TINTING AI Engine RESTful API server"
    )
    parser.add_argument("--host", type=str, default="0.0.0.0", help="host name")
    parser.add_argument("--port", type=int, default=10128, help="port number")
    parser.add_argument("--workers", type=int, default=1, help="workers")
    parser.add_argument("--backlog", type=int, default=2048, help="backlog")

    # vpn 本地代理
    # os.environ["http_proxy"] = "http://127.0.0.1:1080"
    # os.environ["https_proxy"] = "http://127.0.0.1:1080"

    args = parser.parse_args()

    uvicorn.run(app='api:app', host=args.host, port=args.port, log_level="info", workers=args.workers,
                backlog=args.backlog)
