Spaces:
Sleeping
Sleeping

refactor: improve code readability and structure in OpenAI integration tests and services, update requirements for consistency
f5c3d9c
# import os | |
# import json | |
# from fastapi import APIRouter, HTTPException | |
# from fastapi.responses import StreamingResponse | |
# import openai | |
# from typing import List | |
# from ....schemas.chat import Message, ChatRequest | |
# from ....core.config import settings | |
# router = APIRouter(prefix="/chat", tags=["chat"]) | |
# # Initialize OpenAI client using the same settings as other working endpoints | |
# client = openai.OpenAI(api_key=settings.OPENAI_API_KEY) | |
# async def stream_text(messages: List[Message]): | |
# try: | |
# formatted_messages = ( | |
# [ | |
# { | |
# "role": "system", | |
# "content": """You are an AI learning assistant for PlayGo AI, | |
# an educational platform. Your goal is to help students learn and understand various | |
# subjects. Provide clear, concise, and accurate explanations.""", | |
# }, | |
# ] | |
# + [{"role": msg.role, "content": msg.content} for msg in messages] | |
# ) | |
# stream = client.chat.completions.create( | |
# model="gpt-3.5-turbo", | |
# messages=formatted_messages, | |
# temperature=0.7, | |
# stream=True, | |
# ) | |
# for chunk in stream: | |
# for choice in chunk.choices: | |
# if choice.finish_reason == "stop": | |
# continue | |
# else: | |
# yield "0:{text}\n".format(text=json.dumps(choice.delta.content)) | |
# if chunk.choices == []: | |
# usage = chunk.usage | |
# prompt_tokens = usage.prompt_tokens | |
# completion_tokens = usage.completion_tokens | |
# yield 'd:{{"finishReason":"{reason}","usage":{{"promptTokens":{prompt},"completionTokens":{completion}}}}}\n'.format( | |
# reason="stop", prompt=prompt_tokens, completion=completion_tokens | |
# ) | |
# except Exception as e: | |
# print(f"Error in stream_text: {str(e)}") | |
# yield f"Error: {str(e)}".encode("utf-8") | |
# @router.post("/stream") | |
# async def chat_stream(request: ChatRequest): | |
# response = StreamingResponse( | |
# stream_text(request.messages), | |
# ) | |
# response.headers["x-vercel-ai-data-stream"] = "v1" | |
# return response | |
# @router.get("/test") | |
# async def test_chat(): | |
# return {"message": "Chat endpoint is working!"} | |
# @router.get("/test-simple") | |
# async def test_simple_chat(): | |
# """Test simple chat completion without streaming - same as working repo""" | |
# try: | |
# response = client.chat.completions.create( | |
# model="gpt-3.5-turbo", | |
# messages=[ | |
# { | |
# "role": "user", | |
# "content": "Say 'Hello from simple chat!' in exactly those words.", | |
# } | |
# ], | |
# max_tokens=20, | |
# ) | |
# return { | |
# "status": "success", | |
# "message": "Simple chat works!", | |
# "response": response.choices[0].message.content, | |
# "model": response.model, | |
# } | |
# except Exception as e: | |
# return { | |
# "status": "error", | |
# "message": f"Simple chat failed: {str(e)}", | |
# "error_type": type(e).__name__, | |
# } | |