matthoffner commited on
Commit
3366fc4
1 Parent(s): 882b099

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +2 -2
main.py CHANGED
@@ -102,7 +102,7 @@ async def demo():
102
  return HTMLResponse(content=html_content, status_code=200)
103
 
104
  @app.get("/stream")
105
- async def chat(prompt = "<|user|> Write a simple express style server in golang"):
106
  tokens = llm.tokenize(prompt)
107
  async def server_sent_events(chat_chunks, llm):
108
  yield prompt
@@ -114,7 +114,7 @@ async def chat(prompt = "<|user|> Write a simple express style server in golang"
114
 
115
  @app.post("/v1/chat/completions")
116
  async def chat(request, response_mode=None):
117
- tokens = llm.tokenize(request.messages.join(''))
118
  async def server_sent_events(chat_chunks, llm):
119
  for token in llm.generate(chat_chunks):
120
  yield llm.detokenize(token)
 
102
  return HTMLResponse(content=html_content, status_code=200)
103
 
104
  @app.get("/stream")
105
+ async def chat(prompt = "<|user|> Write an express server with server sent events. <|assistant|>"):
106
  tokens = llm.tokenize(prompt)
107
  async def server_sent_events(chat_chunks, llm):
108
  yield prompt
 
114
 
115
  @app.post("/v1/chat/completions")
116
  async def chat(request, response_mode=None):
117
+ tokens = llm.tokenize(request.messages)
118
  async def server_sent_events(chat_chunks, llm):
119
  for token in llm.generate(chat_chunks):
120
  yield llm.detokenize(token)