# coding=utf-8
# Implements API for ChatGLM3-6B in OpenAI's format. (https://platform.openai.com/docs/api-reference/chat)
# Usage: python openai_api.py
# Visit http://localhost:8000/docs for documents.


import time
from contextlib import asynccontextmanager
from typing import List, Literal, Optional, Union

import torch
import uvicorn
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from loguru import logger
from pydantic import BaseModel, Field

from configuration import Config
from tool.tool_register import dispatch_tool, get_lm4_tools,nested_object_to_dict
import json
import hashlib
from zhipuai import ZhipuAI

@asynccontextmanager
async def lifespan(app: FastAPI):  # collects GPU memory
    yield
    if torch.cuda.is_available():
        torch.cuda.empty_cache()
        torch.cuda.ipc_collect()


app = FastAPI(lifespan=lifespan)

app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)


class ModelCard(BaseModel):
    id: str
    object: str = "model"
    created: int = Field(default_factory=lambda: int(time.time()))
    owned_by: str = "owner"
    root: Optional[str] = None
    parent: Optional[str] = None
    permission: Optional[list] = None


class ModelList(BaseModel):
    object: str = "list"
    data: List[ModelCard] = []


class FunctionCallResponse(BaseModel):
    name: Optional[str] = None
    arguments: Optional[str] = None


class ChatMessage(BaseModel):
    role: Literal["user", "assistant", "system", "function"]
    content: str = None
    name: Optional[str] = None
    function_call: Optional[FunctionCallResponse] = None


class DeltaMessage(BaseModel):
    role: Optional[Literal["user", "assistant", "system"]] = None
    content: Optional[str] = None
    function_call: Optional[FunctionCallResponse] = None


class ChatCompletionRequest(BaseModel):
    model: str
    messages: List[dict]
    temperature: Optional[float] = 0.8
    top_p: Optional[float] = 0.8
    max_tokens: Optional[int] = None
    stream: Optional[bool] = False
    functions: Optional[Union[dict, List[dict]]] = None

    # Additional parameters
    max_length: Optional[int] = None
    repetition_penalty: Optional[float] = 1.1


class ChatCompletionResponseChoice(BaseModel):
    index: int
    message: Optional[Union[dict, ChatMessage]]
    finish_reason: Literal["stop", "length", "function_call"]


class ChatCompletionResponseStreamChoice(BaseModel):
    index: int
    delta: DeltaMessage
    finish_reason: Optional[Literal["stop", "length", "function_call"]]


class UsageInfo(BaseModel):
    prompt_tokens: int = 0
    total_tokens: int = 0
    completion_tokens: Optional[int] = 0


class ChatCompletionResponse(BaseModel):
    model: str
    object: Literal["chat.completion", "chat.completion.chunk"]
    choices: List[Union[ChatCompletionResponseChoice, ChatCompletionResponseStreamChoice]]
    created: Optional[int] = Field(default_factory=lambda: int(time.time()))
    usage: Optional[UsageInfo] = None

#历史聊天纪录
history={}

@app.get("/v1/models", response_model=ModelList)
async def list_models():
    model_card = ModelCard(id="gpt-3.5-turbo")
    return ModelList(data=[model_card])

def message_handle(message,fuctionf=None,hid=None):
    if fuctionf:
        if Config().CHAT_ROBOT['apiType']=='glm4':
            fuction = fuctionf.function
        else:
            fuction = fuctionf
        function_args = json.loads(fuction.arguments)
        print(f"Tool Name {fuction.name} Rrguments: {fuction.arguments}")
        addStr=''
        try:
            if fuction.name == 'resetting_chat_record':
                addStr='resetting_chat_record' #history[hid] = history[hid][:2]
                observation={"success": True,"res": "重置成功" ,"res_type": "text"}
            else:
                observation = dispatch_tool(fuction.name, function_args)
        except Exception as e:
            rsp = f'api调用错误: {e}'
        if isinstance(observation, dict):
            res_type = observation['res_type'] if 'res_type' in observation else 'text'
            res = str(observation['res']) if 'res_type' in observation else str(
                observation)
            if res_type == 'image':
                addStr=res
            tool_response = '[Image]' if Config().CHAT_ROBOT['apiType']=='glm3' and res_type == 'image' else res
        else:
            tool_response = observation if isinstance(
                observation, str) else str(observation)
        print(f"Tool Call Response: {tool_response}")
        if Config().CHAT_ROBOT['apiType']=='glm4':
            history[hid].append({
                "role":"tool",
                "tool_call_id": fuctionf.id,
                "content":tool_response
            })
        else:
            history[hid].append({
                "role":"function",
                "name": fuction.name,
                "content":tool_response
            })
        return addStr
        
    
    if Config().CHAT_ROBOT['apiType']=='glm4':
        if len(message.tool_calls)>0:
            addStr=''
            for call in message.tool_calls:
                addStr+=message_handle(message,call,hid=hid)
            return addStr
    else:
        if message.get("function_call"):
            return message_handle(message,message.function_call,hid=hid)

@app.post("/v1/chat/completions", response_model=ChatCompletionResponse)
async def create_chat_completion(request: ChatCompletionRequest):
    #字符串生成MD5
    md5 = hashlib.md5()
    md5.update(request.messages[0]['content'].encode('utf-8'))
    if md5.hexdigest() not in history:
        if len(request.messages)<=2:
            history[md5.hexdigest()]=request.messages
        else:
            history[md5.hexdigest()]=[request.messages[0],request.messages[-1]]

    history[md5.hexdigest()].append(request.messages[-1])
    request.messages=history[md5.hexdigest()]
    response=chat_completion(md5.hexdigest())
    history[md5.hexdigest()].append(nested_object_to_dict(response.choices[0].message))
    addStr=''
    while response.choices[0].finish_reason=='tool_calls' and len(response.choices[0].message.tool_calls)>0:
        addStr=message_handle(response.choices[0].message,hid=md5.hexdigest())
        response = chat_completion(md5.hexdigest())
        history[md5.hexdigest()].append(nested_object_to_dict(response.choices[0].message))
    
    function_call, finish_reason = None, "stop"
    usage = UsageInfo()
    message = ChatMessage(
        role="assistant",
        content=history[md5.hexdigest()][-1]['content'],
        function_call=function_call,
    )
    if 'resetting_chat_record'==addStr:
        history[md5.hexdigest()] = history[md5.hexdigest()][:2]

    print(message)

    choice_data = ChatCompletionResponseChoice(
        index=0,
        message=message,
        finish_reason=finish_reason,
    )

    return ChatCompletionResponse(model=request.model, choices=[choice_data], object="chat.completion", usage=usage)

def chat_completion(hid):
    client = ZhipuAI(api_key=Config().GLM4['key']) # 请填写您自己的APIKey
    tools=get_lm4_tools()
    response = client.chat.completions.create(
        model=Config().GLM4['key'], # 填写需要调用的模型名称
        messages=history[hid],
        tools=tools,
        tool_choice="true",
    )
    return response


if __name__ == "__main__":
    #uvicorn.run(app, host='0.0.0.0', port=8000, workers=1)
    uvicorn.run(
               app,
               host="0.0.0.0",
               port=443,
               ssl_keyfile="C:/Users/Administrator/localhost+2-key.pem",
               ssl_certfile="C:/Users/Administrator/localhost+2.pem",
               workers=1,
               )
