matthoffner commited on
Commit
865b816
1 Parent(s): d7501b1

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +3 -3
main.py CHANGED
@@ -90,7 +90,7 @@ async def chat(request: ChatCompletionRequest):
90
 
91
  return StreamingResponse(format_response(chat_chunks), media_type="text/event-stream")
92
 
93
- async def stream_response(send: Callable) -> None:
94
  async with send:
95
  try:
96
  iterator: Generator = llm.generate(tokens)
@@ -123,7 +123,7 @@ async def chatV2(request: Request, body: ChatCompletionRequest):
123
  combined_messages = ' '.join([message.content for message in body.messages])
124
  tokens = llm.tokenize(combined_messages)
125
 
126
- return ResponseGenerator(stream_response)
127
 
128
  @app.post("/v2/chat/completions")
129
  async def chatV2_endpoint(request: Request, body: ChatCompletionRequest):
@@ -141,4 +141,4 @@ async def chat(request: ChatCompletionRequestV0, response_mode=None):
141
  return EventSourceResponse(server_sent_events(tokens, llm))
142
 
143
  if __name__ == "__main__":
144
- uvicorn.run(app, host="0.0.0.0", port=8000)
 
90
 
91
  return StreamingResponse(format_response(chat_chunks), media_type="text/event-stream")
92
 
93
+ async def stream_response(tokens: Any) -> None:
94
  async with send:
95
  try:
96
  iterator: Generator = llm.generate(tokens)
 
123
  combined_messages = ' '.join([message.content for message in body.messages])
124
  tokens = llm.tokenize(combined_messages)
125
 
126
+ return StreamingResponse(stream_response(tokens))
127
 
128
  @app.post("/v2/chat/completions")
129
  async def chatV2_endpoint(request: Request, body: ChatCompletionRequest):
 
141
  return EventSourceResponse(server_sent_events(tokens, llm))
142
 
143
  if __name__ == "__main__":
144
+ uvicorn.run(app, host="0.0.0.0", port=8000)