Spaces:
Runtime error
Runtime error
Update appDeepseekCoder.py
Browse files- appDeepseekCoder.py +40 -69
appDeepseekCoder.py
CHANGED
@@ -1,33 +1,44 @@
|
|
1 |
-
|
2 |
-
|
|
|
3 |
from langchain.chains import LLMChain
|
4 |
from prompts import maths_assistant_prompt_template
|
5 |
from langchain.memory.buffer import ConversationBufferMemory
|
6 |
-
from
|
7 |
-
import
|
8 |
import chainlit as cl
|
9 |
-
|
10 |
-
# Load the model and tokenizer
|
11 |
-
model_name = "deepseek-ai/deepseek-math-7b-instruct"
|
12 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
13 |
-
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, device_map="auto")
|
14 |
-
model.generation_config = GenerationConfig.from_pretrained(model_name)
|
15 |
-
model.generation_config.pad_token_id = model.generation_config.eos_token_id
|
16 |
|
|
|
|
|
17 |
|
18 |
-
|
19 |
-
|
|
|
|
|
20 |
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
|
|
|
|
26 |
cl.user_session.set("llm_chain", llm_chain)
|
27 |
|
28 |
-
|
29 |
-
|
30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
actions = [
|
32 |
cl.Action(name="Probability", value="Probability", description="Select Quiz Topic!"),
|
33 |
cl.Action(name="Linear Algebra", value="Linear Algebra", description="Select Quiz Topic!"),
|
@@ -36,57 +47,17 @@ async def start_llm():
|
|
36 |
]
|
37 |
await cl.Message(content="**Pick a Topic and Let the Quiz Adventure Begin!** ππ", actions=actions).send()
|
38 |
|
39 |
-
|
40 |
-
|
41 |
-
@cl.on_message
|
42 |
-
async def query_llm(message: cl.Message):
|
43 |
-
llm_chain = cl.user_session.get("llm_chain")
|
44 |
-
#selected_topic = cl.user_session.get("selected_topic", "probability") # Default to probability if not set
|
45 |
-
print("Message being sent to the LLM is")
|
46 |
-
print(message.content)
|
47 |
-
#response = await llm_chain.ainvoke(message.content,
|
48 |
-
# callbacks=[
|
49 |
-
# cl.AsyncLangchainCallbackHandler()])
|
50 |
-
|
51 |
-
response = await llm_chain.ainvoke({
|
52 |
-
"chat_history": llm_chain.memory.load_memory_variables({})["chat_history"],
|
53 |
-
"question": message.content
|
54 |
-
}, callbacks=[
|
55 |
-
cl.AsyncLangchainCallbackHandler()
|
56 |
-
])
|
57 |
-
await cl.Message(response["text"]).send()
|
58 |
-
|
59 |
-
|
60 |
-
async def send_good_luck_message():
|
61 |
-
await cl.Message(content="Good luck! π", align="bottom").send()
|
62 |
-
|
63 |
-
async def handle_topic_selection(action: cl.Action):
|
64 |
-
llm_chain = cl.user_session.get("llm_chain")
|
65 |
-
#cl.user_session.set("selected_topic", action.value)
|
66 |
-
#await cl.Message(content=f"Selected {action.value}").send()
|
67 |
-
response = await llm_chain.ainvoke({
|
68 |
-
"chat_history": llm_chain.memory.load_memory_variables({})["chat_history"],
|
69 |
-
"question": f"Quiz me on the topic {action.value}."
|
70 |
-
}, callbacks=[
|
71 |
-
cl.AsyncLangchainCallbackHandler()
|
72 |
-
])
|
73 |
-
await cl.Message(response["text"]).send()
|
74 |
-
|
75 |
@cl.action_callback("Linear Algebra")
|
76 |
-
async def on_action(action: cl.Action):
|
77 |
-
await handle_topic_selection(action)
|
78 |
-
|
79 |
@cl.action_callback("Probability")
|
80 |
-
async def on_action(action: cl.Action):
|
81 |
-
await handle_topic_selection(action)
|
82 |
-
|
83 |
@cl.action_callback("Accounts")
|
84 |
-
async def on_action(action: cl.Action):
|
85 |
-
await handle_topic_selection(action)
|
86 |
-
|
87 |
@cl.action_callback("Calculus")
|
88 |
async def on_action(action: cl.Action):
|
89 |
-
|
90 |
-
|
91 |
-
|
|
|
|
|
|
|
92 |
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, Request
|
2 |
+
from fastapi.responses import JSONResponse
|
3 |
+
from langchain_openai import ChatOpenAI
|
4 |
from langchain.chains import LLMChain
|
5 |
from prompts import maths_assistant_prompt_template
|
6 |
from langchain.memory.buffer import ConversationBufferMemory
|
7 |
+
from dotenv import load_dotenv
|
8 |
+
import os
|
9 |
import chainlit as cl
|
10 |
+
import uvicorn
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
+
# Load environment variables from .env file
|
13 |
+
load_dotenv()
|
14 |
|
15 |
+
api_key = os.getenv('OPENAI_API_KEY')
|
16 |
+
print(f"api key is {api_key}")
|
17 |
+
|
18 |
+
app = FastAPI()
|
19 |
|
20 |
+
@app.on_event("startup")
|
21 |
+
async def startup_event():
|
22 |
+
print("Initializing llm...")
|
23 |
+
llm = ChatOpenAI(model='gpt-4o-mini', temperature=0.5, api_key=api_key)
|
24 |
+
print("llm initialized!")
|
25 |
+
conversation_memory = ConversationBufferMemory(memory_key="chat_history", max_len=50, return_messages=True)
|
26 |
+
llm_chain = LLMChain(llm=llm, prompt=maths_assistant_prompt_template, memory=conversation_memory)
|
27 |
cl.user_session.set("llm_chain", llm_chain)
|
28 |
|
29 |
+
@app.post("/query/")
|
30 |
+
async def query_llm(request: Request):
|
31 |
+
data = await request.json()
|
32 |
+
message = data.get("message")
|
33 |
+
llm_chain = cl.user_session.get("llm_chain")
|
34 |
+
response = await llm_chain.ainvoke({
|
35 |
+
"chat_history": llm_chain.memory.load_memory_variables({})["chat_history"],
|
36 |
+
"question": message
|
37 |
+
}, callbacks=[cl.AsyncLangchainCallbackHandler()])
|
38 |
+
return JSONResponse(content={"response": response["text"]})
|
39 |
+
|
40 |
+
@cl.on_chat_start
|
41 |
+
async def on_chat_start():
|
42 |
actions = [
|
43 |
cl.Action(name="Probability", value="Probability", description="Select Quiz Topic!"),
|
44 |
cl.Action(name="Linear Algebra", value="Linear Algebra", description="Select Quiz Topic!"),
|
|
|
47 |
]
|
48 |
await cl.Message(content="**Pick a Topic and Let the Quiz Adventure Begin!** ππ", actions=actions).send()
|
49 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
@cl.action_callback("Linear Algebra")
|
|
|
|
|
|
|
51 |
@cl.action_callback("Probability")
|
|
|
|
|
|
|
52 |
@cl.action_callback("Accounts")
|
|
|
|
|
|
|
53 |
@cl.action_callback("Calculus")
|
54 |
async def on_action(action: cl.Action):
|
55 |
+
llm_chain = cl.user_session.get("llm_chain")
|
56 |
+
response = await llm_chain.ainvoke({
|
57 |
+
"chat_history": llm_chain.memory.load_memory_variables({})["chat_history"],
|
58 |
+
"question": f"Quiz me on the topic {action.value}."
|
59 |
+
}, callbacks=[cl.AsyncLangchainCallbackHandler()])
|
60 |
+
await cl.Message(response["text"]).send()
|
61 |
|
62 |
+
if __name__ == "__main__":
|
63 |
+
uvicorn.run(app, host="0.0.0.0", port=7860)
|