Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,147 +1,121 @@
|
|
1 |
-
import
|
2 |
-
import
|
3 |
-
from
|
4 |
-
|
5 |
-
import
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
"
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
"""
|
61 |
-
High level function that takes in the user inputs and returns the
|
62 |
-
classification results as panel objects.
|
63 |
-
"""
|
64 |
-
try:
|
65 |
-
main.disabled = True
|
66 |
-
if not image_url:
|
67 |
-
yield "##### ⚠️ Provide an image URL"
|
68 |
-
return
|
69 |
-
|
70 |
-
yield "##### ⚙ Fetching image and running model..."
|
71 |
-
try:
|
72 |
-
pil_img = await open_image_url(image_url)
|
73 |
-
img = pn.pane.Image(pil_img, height=400, align="center")
|
74 |
-
except Exception as e:
|
75 |
-
yield f"##### 😔 Something went wrong, please try a different URL!"
|
76 |
-
return
|
77 |
-
|
78 |
-
class_items = class_names.split(",")
|
79 |
-
class_likelihoods = get_similarity_scores(class_items, pil_img)
|
80 |
-
|
81 |
-
# build the results column
|
82 |
-
results = pn.Column("##### 🎉 Here are the results!", img)
|
83 |
-
|
84 |
-
for class_item, class_likelihood in zip(class_items, class_likelihoods):
|
85 |
-
row_label = pn.widgets.StaticText(
|
86 |
-
name=class_item.strip(), value=f"{class_likelihood:.2%}", align="center"
|
87 |
-
)
|
88 |
-
row_bar = pn.indicators.Progress(
|
89 |
-
value=int(class_likelihood * 100),
|
90 |
-
sizing_mode="stretch_width",
|
91 |
-
bar_color="secondary",
|
92 |
-
margin=(0, 10),
|
93 |
-
design=pn.theme.Material,
|
94 |
-
)
|
95 |
-
results.append(pn.Column(row_label, row_bar))
|
96 |
-
yield results
|
97 |
-
finally:
|
98 |
-
main.disabled = False
|
99 |
-
|
100 |
-
|
101 |
-
# create widgets
|
102 |
-
randomize_url = pn.widgets.Button(name="Randomize URL", align="end")
|
103 |
-
|
104 |
-
image_url = pn.widgets.TextInput(
|
105 |
-
name="Image URL to classify",
|
106 |
-
value=pn.bind(random_url, randomize_url),
|
107 |
-
)
|
108 |
-
class_names = pn.widgets.TextInput(
|
109 |
-
name="Comma separated class names",
|
110 |
-
placeholder="Enter possible class names, e.g. cat, dog",
|
111 |
-
value="cat, dog, parrot",
|
112 |
-
)
|
113 |
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
118 |
)
|
119 |
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
height=600,
|
124 |
)
|
125 |
|
126 |
-
#
|
127 |
-
|
128 |
-
|
129 |
-
href_button = pn.widgets.Button(icon=icon, width=35, height=35)
|
130 |
-
href_button.js_on_click(code=f"window.open('{url}')")
|
131 |
-
footer_row.append(href_button)
|
132 |
-
footer_row.append(pn.Spacer())
|
133 |
-
|
134 |
-
# create dashboard
|
135 |
-
main = pn.WidgetBox(
|
136 |
-
input_widgets,
|
137 |
-
interactive_result,
|
138 |
-
footer_row,
|
139 |
-
)
|
140 |
|
141 |
-
title = "Panel Demo - Image Classification"
|
142 |
-
pn.template.BootstrapTemplate(
|
143 |
-
title=title,
|
144 |
-
main=main,
|
145 |
-
main_max_width="min(50%, 698px)",
|
146 |
-
header_background="#F08080",
|
147 |
-
).servable(title=title)
|
|
|
1 |
+
import os, dotenv, anthropic, panel, platform
|
2 |
+
from langchain_community.vectorstores import Chroma
|
3 |
+
from langchain.embeddings import HuggingFaceEmbeddings
|
4 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
5 |
+
from langchain_community.document_loaders import DirectoryLoader, PyPDFLoader
|
6 |
+
|
7 |
+
panel.extension()
|
8 |
+
|
9 |
+
# Set API key
|
10 |
+
dotenv.load_dotenv()
|
11 |
+
ANTHROPIC_API_KEY = os.getenv('ANTHROPIC_API_KEY')
|
12 |
+
|
13 |
+
@panel.cache
|
14 |
+
def load_vectorstore():
|
15 |
+
if "macOS" in platform.platform():
|
16 |
+
device="mps"
|
17 |
+
else:
|
18 |
+
device="cpu"
|
19 |
+
|
20 |
+
# Create the HF embeddings
|
21 |
+
model_name = "sentence-transformers/all-mpnet-base-v2"
|
22 |
+
model_kwargs = {'device': device}
|
23 |
+
encode_kwargs = {'normalize_embeddings': False}
|
24 |
+
|
25 |
+
hf_embeddings = HuggingFaceEmbeddings(
|
26 |
+
model_name=model_name,
|
27 |
+
model_kwargs=model_kwargs,
|
28 |
+
encode_kwargs=encode_kwargs
|
29 |
+
)
|
30 |
+
|
31 |
+
# If the vector embeddings of the documents have not been created
|
32 |
+
if not os.path.isfile('chroma_db/chroma.sqlite3'):
|
33 |
+
|
34 |
+
# Load the documents
|
35 |
+
loader = DirectoryLoader('Docs/', glob="./*.pdf", loader_cls=PyPDFLoader)
|
36 |
+
data = loader.load()
|
37 |
+
|
38 |
+
# Split the docs into chunks
|
39 |
+
splitter = RecursiveCharacterTextSplitter(
|
40 |
+
chunk_size=1000,
|
41 |
+
chunk_overlap=50
|
42 |
+
)
|
43 |
+
docs = splitter.split_documents(data)
|
44 |
+
|
45 |
+
# Embed the documents and store them in a Chroma DB
|
46 |
+
vectorstore = Chroma.from_documents(documents=docs,embedding=hf_embeddings, persist_directory="./chroma_db")
|
47 |
+
else:
|
48 |
+
# load ChromaDB from disk
|
49 |
+
vectorstore = Chroma(persist_directory="./chroma_db", embedding_function=hf_embeddings)
|
50 |
+
|
51 |
+
return vectorstore
|
52 |
+
|
53 |
+
# Initialize the chat history
|
54 |
+
chat_history = []
|
55 |
+
|
56 |
+
async def get_response(contents, user, instance):
|
57 |
+
|
58 |
+
# Load the vectorstore
|
59 |
+
vectorstore = load_vectorstore()
|
60 |
+
|
61 |
+
question = contents
|
62 |
+
|
63 |
+
# Get the relevant information to form the context on which to query with the LLM
|
64 |
+
docs = vectorstore.similarity_search(question)
|
65 |
+
|
66 |
+
context = "\n"
|
67 |
+
for doc in docs:
|
68 |
+
context += "\n" + doc.page_content + "\n"
|
69 |
+
|
70 |
+
# Update the global chat_history with the user's question
|
71 |
+
global chat_history
|
72 |
+
chat_history.append({"role": "user", "content": question})
|
73 |
+
|
74 |
+
# Define prompt template
|
75 |
+
prompt = f"""
|
76 |
+
Here are the Task Context and History
|
77 |
+
|
78 |
+
- Context: {context}
|
79 |
+
- Chat History: {chat_history}
|
80 |
+
- User Question: {question}
|
81 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
|
83 |
+
# Create the Anthropic client
|
84 |
+
client = anthropic.Anthropic(api_key=ANTHROPIC_API_KEY)
|
85 |
+
|
86 |
+
response = ''
|
87 |
+
# Generate the completion with the updated chat_history
|
88 |
+
with client.messages.stream(
|
89 |
+
max_tokens=1024,
|
90 |
+
messages=[
|
91 |
+
{"role": "user", "content": prompt}
|
92 |
+
],
|
93 |
+
model="claude-3-haiku-20240307",
|
94 |
+
) as stream:
|
95 |
+
for text in stream.text_stream:
|
96 |
+
response += text
|
97 |
+
yield response
|
98 |
+
|
99 |
+
# Append the assistant's response to the chat_history
|
100 |
+
chat_history.append({"role": "assistant", "content": response})
|
101 |
+
|
102 |
+
chat_interface = panel.chat.ChatInterface(
|
103 |
+
callback=get_response,
|
104 |
+
callback_user="Sarathi",
|
105 |
+
sizing_mode="stretch_width",
|
106 |
+
callback_exception='verbose',
|
107 |
+
message_params=dict(
|
108 |
+
default_avatars={"Sarathi": "S", "User": "U"},
|
109 |
+
reaction_icons={"like": "thumb-up"},
|
110 |
+
),
|
111 |
)
|
112 |
|
113 |
+
chat_interface.send(
|
114 |
+
{"user": "Sarathi", "value": '''Welcome to Sarathi, your personal assistant for Assam Tourism.'''},
|
115 |
+
respond=False,
|
|
|
116 |
)
|
117 |
|
118 |
+
template = panel.template.BootstrapTemplate(title="Sarathi", favicon="favicon.png", header_background = "#000000", main=[panel.Tabs( ('Chat', chat_interface), dynamic=True )])
|
119 |
+
|
120 |
+
template.servable()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
121 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|