Spaces:
Running
on
Zero
Running
on
Zero
fda
Browse files
mysite/interpreter/interpreter.py
CHANGED
@@ -61,7 +61,7 @@ def chat_with_interpreter(
|
|
61 |
|
62 |
GENERATION_TIMEOUT_SEC = 60
|
63 |
|
64 |
-
|
65 |
client = Groq(api_key=os.getenv("api_key"))
|
66 |
messages = []
|
67 |
recent_messages = history[-20:]
|
@@ -78,27 +78,27 @@ async def completion(message: str, history, c=None, d=None, prompt="あなたは
|
|
78 |
system_prompt = {"role": "system", "content": prompt}
|
79 |
messages.insert(0, system_prompt)
|
80 |
|
81 |
-
async with async_timeout.timeout(GENERATION_TIMEOUT_SEC):
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
|
103 |
# 例としての使用方法
|
104 |
if __name__ == "__main__":
|
|
|
61 |
|
62 |
GENERATION_TIMEOUT_SEC = 60
|
63 |
|
64 |
+
def completion(message: str, history, c=None, d=None, prompt="あなたは日本語の優秀なアシスタントです。"):
|
65 |
client = Groq(api_key=os.getenv("api_key"))
|
66 |
messages = []
|
67 |
recent_messages = history[-20:]
|
|
|
78 |
system_prompt = {"role": "system", "content": prompt}
|
79 |
messages.insert(0, system_prompt)
|
80 |
|
81 |
+
#async with async_timeout.timeout(GENERATION_TIMEOUT_SEC):
|
82 |
+
try:
|
83 |
+
response = await client.chat.completions.create(
|
84 |
+
model="llama3-8b-8192",
|
85 |
+
messages=messages,
|
86 |
+
temperature=1,
|
87 |
+
max_tokens=1024,
|
88 |
+
top_p=1,
|
89 |
+
stream=True,
|
90 |
+
stop=None,
|
91 |
+
)
|
92 |
+
all_result = ""
|
93 |
+
for chunk in response:
|
94 |
+
current_content = chunk.choices[0].delta.content or ""
|
95 |
+
all_result += current_content
|
96 |
+
yield current_content
|
97 |
+
yield all_result
|
98 |
+
except asyncio.TimeoutError:
|
99 |
+
raise HTTPException(status_code=504, detail="Stream timed out")
|
100 |
+
except StopAsyncIteration:
|
101 |
+
return
|
102 |
|
103 |
# 例としての使用方法
|
104 |
if __name__ == "__main__":
|