File size: 3,313 Bytes
f5c3d9c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
# import os
# import json

# from fastapi import APIRouter, HTTPException
# from fastapi.responses import StreamingResponse
# import openai
# from typing import List

# from ....schemas.chat import Message, ChatRequest
# from ....core.config import settings

# router = APIRouter(prefix="/chat", tags=["chat"])

# # Initialize OpenAI client using the same settings as other working endpoints
# client = openai.OpenAI(api_key=settings.OPENAI_API_KEY)


# async def stream_text(messages: List[Message]):
#     try:
#         formatted_messages = (
#             [
#                 {
#                     "role": "system",
#                     "content": """You are an AI learning assistant for PlayGo AI, 
#              an educational platform. Your goal is to help students learn and understand various 
#              subjects. Provide clear, concise, and accurate explanations.""",
#                 },
#             ]
#             + [{"role": msg.role, "content": msg.content} for msg in messages]
#         )

#         stream = client.chat.completions.create(
#             model="gpt-3.5-turbo",
#             messages=formatted_messages,
#             temperature=0.7,
#             stream=True,
#         )

#         for chunk in stream:
#             for choice in chunk.choices:
#                 if choice.finish_reason == "stop":
#                     continue

#                 else:
#                     yield "0:{text}\n".format(text=json.dumps(choice.delta.content))

#             if chunk.choices == []:
#                 usage = chunk.usage
#                 prompt_tokens = usage.prompt_tokens
#                 completion_tokens = usage.completion_tokens
#                 yield 'd:{{"finishReason":"{reason}","usage":{{"promptTokens":{prompt},"completionTokens":{completion}}}}}\n'.format(
#                     reason="stop", prompt=prompt_tokens, completion=completion_tokens
#                 )

#     except Exception as e:
#         print(f"Error in stream_text: {str(e)}")
#         yield f"Error: {str(e)}".encode("utf-8")


# @router.post("/stream")
# async def chat_stream(request: ChatRequest):
#     response = StreamingResponse(
#         stream_text(request.messages),
#     )
#     response.headers["x-vercel-ai-data-stream"] = "v1"
#     return response


# @router.get("/test")
# async def test_chat():
#     return {"message": "Chat endpoint is working!"}


# @router.get("/test-simple")
# async def test_simple_chat():
#     """Test simple chat completion without streaming - same as working repo"""
#     try:
#         response = client.chat.completions.create(
#             model="gpt-3.5-turbo",
#             messages=[
#                 {
#                     "role": "user",
#                     "content": "Say 'Hello from simple chat!' in exactly those words.",
#                 }
#             ],
#             max_tokens=20,
#         )

#         return {
#             "status": "success",
#             "message": "Simple chat works!",
#             "response": response.choices[0].message.content,
#             "model": response.model,
#         }

#     except Exception as e:
#         return {
#             "status": "error",
#             "message": f"Simple chat failed: {str(e)}",
#             "error_type": type(e).__name__,
#         }