import argparse

import uvicorn
from fastapi import FastAPI, Request
from loguru import logger
from openai import AuthenticationError, RateLimitError
from pymilvus import Milvus
from starlette import status
from starlette.responses import StreamingResponse, JSONResponse

from llmchat.model import ApiResponse
from llmchat.model.ApiResponse import ErrorCode
from llmchat.model.chat import CompletionRequest
from llmchat.service.Factory import Factory

app = FastAPI()


# @app.middleware("http")
# def api_global_filter(request: Request, call_next):
#     """全局过滤器"""
#     headers = request.headers
#     api_type = headers.get("apiType")
#     api_key = headers.get("apiKey")
#     api_base = headers.get("apiBase")
#     if api_type not in ["openai", "azure", "qianfan"]:
#         raise ValueError(f"apiType error,Only supports openai or azure or qianfan.")
#
#     if api_key is None or len(api_key) < 1:
#         raise ValueError(f"apiKey error,Need to provide.")
#
#     if api_type in ["openai", "azure"]:
#         if api_base is None or len(api_base) < 1:
#             raise ValueError(f"apiBase error,Need to provide.")


@app.exception_handler(Exception)
def exception_handler(request: Request, ex: Exception):
    """API全局异常处理"""
    error_enum = ErrorCode.AI_DEFAULT_EXCEPTION
    if isinstance(ex, AuthenticationError):
        error_enum = ErrorCode.AI_ACCOUNT_ERROR
    elif isinstance(ex, RateLimitError):
        error_enum = ErrorCode.AI_RATE_LIMIT_ERROR
    error_msg = error_enum.message + ",message=" + str(ex)
    logger.error(error_msg)

    return JSONResponse(
        status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
        content=ApiResponse.error(error_enum.code, error_msg).dict()
    )


@app.post('/chat/completions', summary="文本生成")
def chat_completion(request: Request, completion: CompletionRequest):
    logger.info("文本生成:" + str(completion))
    api_type = request.headers.get("apiType")
    api_key = request.headers.get("apiKey")
    api_base = request.headers.get("apiBase")

    chat = Factory.create_chat(api_type=api_type, api_base=api_base, api_key=api_key)
    generator = chat.completions(completion)

    if completion.stream:
        return StreamingResponse(generator, media_type="text/event-stream")
    else:
        return generator


@app.get('/milvus/get_collection_stats/{collection_name}', summary="查询向量维度")
def get_collection_stats(collection_name: str):
    client = Milvus(host="43.139.210.97", port=19530, user="oula", password="oula2023!@#321$^")
    # stats = client.get_collection_stats(collection_name=collection_name)
    describe = client.describe_collection(collection_name=collection_name)
    # dimension 维度
    # 输出集合中的向量数量
    return describe


if __name__ == '__main__':
    parser = argparse.ArgumentParser(
        description="LLM RESTFUL API server."
    )
    parser.add_argument("--host", type=str, default="0.0.0.0", help="host name")
    parser.add_argument("--port", type=int, default=9002, help="port number")
    parser.add_argument("--workers", type=int, default=1, help="workers")
    parser.add_argument("--backlog", type=int, default=2048, help="backlog")

    args = parser.parse_args()

    uvicorn.run(app='main:app', host=args.host, port=args.port, log_level="info", workers=args.workers,
                backlog=args.backlog)
