Spaces:
Sleeping
Sleeping
gizemsarsinlar
commited on
Commit
•
698d782
1
Parent(s):
5d33605
Update app.py
Browse files
app.py
CHANGED
@@ -1,17 +1,13 @@
|
|
1 |
import os
|
2 |
-
import chainlit as cl
|
3 |
-
from openai import AsyncOpenAI # OpenAI API
|
4 |
import textract # For extracting text from documents
|
5 |
|
6 |
-
# Load OpenAI API key from environment variables
|
7 |
-
api_key = os.getenv("OPENAI_API_KEY")
|
8 |
-
|
9 |
# Function to extract text from uploaded documents
|
10 |
async def extract_text_from_file(file_path):
|
11 |
return textract.process(file_path).decode('utf-8')
|
12 |
|
13 |
# Chat initialization
|
14 |
-
@cl.on_chat_start
|
15 |
async def start_chat():
|
16 |
settings = {
|
17 |
"model": "gpt-3.5-turbo",
|
@@ -22,26 +18,27 @@ async def start_chat():
|
|
22 |
"presence_penalty": 0,
|
23 |
}
|
24 |
cl.user_session.set("settings", settings)
|
25 |
-
|
26 |
await cl.Message(content="Welcome! Please upload a document to begin.").send()
|
27 |
|
28 |
-
# Handling
|
29 |
@cl.on_message
|
30 |
async def main(message: cl.Message):
|
31 |
-
|
32 |
-
|
33 |
-
|
|
|
34 |
|
35 |
-
#
|
36 |
file_content = await extract_text_from_file(file_path)
|
37 |
|
38 |
-
#
|
39 |
cl.user_session.set("document_content", file_content)
|
40 |
|
|
|
41 |
await cl.Message(content=f"Document '{uploaded_file['name']}' uploaded successfully! You can now ask questions based on the document content.").send()
|
|
|
42 |
else:
|
43 |
document_content = cl.user_session.get("document_content", "")
|
44 |
-
|
45 |
if not document_content:
|
46 |
await cl.Message(content="Please upload a document first.").send()
|
47 |
return
|
@@ -49,12 +46,11 @@ async def main(message: cl.Message):
|
|
49 |
settings = cl.user_session.get("settings")
|
50 |
client = AsyncOpenAI()
|
51 |
|
52 |
-
#
|
53 |
prompt = f"Document Content: {document_content}\n\nUser Query: {message.content}"
|
54 |
-
|
55 |
msg = cl.Message(content="")
|
56 |
|
57 |
-
#
|
58 |
async for stream_resp in await client.chat.completions.create(
|
59 |
model=settings["model"],
|
60 |
messages=[{"role": "system", "content": "Answer based on the provided document."},
|
@@ -67,5 +63,5 @@ async def main(message: cl.Message):
|
|
67 |
token = ""
|
68 |
await msg.stream_token(token)
|
69 |
|
70 |
-
#
|
71 |
await msg.send()
|
|
|
1 |
import os
|
2 |
+
import chainlit as cl
|
|
|
3 |
import textract # For extracting text from documents
|
4 |
|
|
|
|
|
|
|
5 |
# Function to extract text from uploaded documents
|
6 |
async def extract_text_from_file(file_path):
|
7 |
return textract.process(file_path).decode('utf-8')
|
8 |
|
9 |
# Chat initialization
|
10 |
+
@cl.on_chat_start
|
11 |
async def start_chat():
|
12 |
settings = {
|
13 |
"model": "gpt-3.5-turbo",
|
|
|
18 |
"presence_penalty": 0,
|
19 |
}
|
20 |
cl.user_session.set("settings", settings)
|
|
|
21 |
await cl.Message(content="Welcome! Please upload a document to begin.").send()
|
22 |
|
23 |
+
# Handling file upload
|
24 |
@cl.on_message
|
25 |
async def main(message: cl.Message):
|
26 |
+
# Checking if there is an uploaded file
|
27 |
+
if message.files:
|
28 |
+
uploaded_file = message.files[0] # Accessing the uploaded file
|
29 |
+
file_path = uploaded_file['path'] # Getting the path of the uploaded file
|
30 |
|
31 |
+
# Extracting text from the uploaded file
|
32 |
file_content = await extract_text_from_file(file_path)
|
33 |
|
34 |
+
# Saving the content of the document in the user session
|
35 |
cl.user_session.set("document_content", file_content)
|
36 |
|
37 |
+
# Informing the user that the document was uploaded successfully
|
38 |
await cl.Message(content=f"Document '{uploaded_file['name']}' uploaded successfully! You can now ask questions based on the document content.").send()
|
39 |
+
|
40 |
else:
|
41 |
document_content = cl.user_session.get("document_content", "")
|
|
|
42 |
if not document_content:
|
43 |
await cl.Message(content="Please upload a document first.").send()
|
44 |
return
|
|
|
46 |
settings = cl.user_session.get("settings")
|
47 |
client = AsyncOpenAI()
|
48 |
|
49 |
+
# Creating the prompt for OpenAI based on the document content and user query
|
50 |
prompt = f"Document Content: {document_content}\n\nUser Query: {message.content}"
|
|
|
51 |
msg = cl.Message(content="")
|
52 |
|
53 |
+
# Sending prompt to OpenAI and streaming response
|
54 |
async for stream_resp in await client.chat.completions.create(
|
55 |
model=settings["model"],
|
56 |
messages=[{"role": "system", "content": "Answer based on the provided document."},
|
|
|
63 |
token = ""
|
64 |
await msg.stream_token(token)
|
65 |
|
66 |
+
# Sending the final response
|
67 |
await msg.send()
|