matthoffner commited on
Commit
46ac909
1 Parent(s): 4f37acf

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +3 -3
main.py CHANGED
@@ -44,14 +44,14 @@ async def index():
44
  """
45
  return HTMLResponse(content=html_content, status_code=200)
46
 
47
- class ChatCompletionRequest(BaseModel):
48
  prompt: str
49
 
50
  class Message(BaseModel):
51
  role: str
52
  content: str
53
 
54
- class ChatCompletionRequestV2(BaseModel):
55
  messages: List[Message]
56
  max_tokens: int = 100
57
 
@@ -89,7 +89,7 @@ async def chat(request: ChatCompletionRequest):
89
  return StreamingResponse(format_response(chat_chunks), media_type="text/event-stream")
90
 
91
  @app.post("/v0/chat/completions")
92
- async def chat(request: ChatCompletionRequest, response_mode=None):
93
  tokens = llm.tokenize(request.prompt)
94
  async def server_sent_events(chat_chunks, llm):
95
  for chat_chunk in llm.generate(chat_chunks):
 
44
  """
45
  return HTMLResponse(content=html_content, status_code=200)
46
 
47
+ class ChatCompletionRequestV0(BaseModel):
48
  prompt: str
49
 
50
  class Message(BaseModel):
51
  role: str
52
  content: str
53
 
54
+ class ChatCompletionRequest(BaseModel):
55
  messages: List[Message]
56
  max_tokens: int = 100
57
 
 
89
  return StreamingResponse(format_response(chat_chunks), media_type="text/event-stream")
90
 
91
  @app.post("/v0/chat/completions")
92
+ async def chat(request: ChatCompletionRequestV0, response_mode=None):
93
  tokens = llm.tokenize(request.prompt)
94
  async def server_sent_events(chat_chunks, llm):
95
  for chat_chunk in llm.generate(chat_chunks):