Spaces:
Sleeping
Sleeping
Upload 2 files
Browse files- .gitattributes +1 -0
- HFNWAY.pdf +3 -0
- app.py +55 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
HFNWAY.pdf filter=lfs diff=lfs merge=lfs -text
|
HFNWAY.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8e432749fc200c3460ad76d7acc2ebd02f16d5f7d588b4ede35225274d40503c
|
3 |
+
size 4305099
|
app.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import openai, os
|
2 |
+
openai.api_key = "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"
|
3 |
+
# openai.api_key = os.environ["OPENAI_API_KEY"]
|
4 |
+
from llama_index.core import SimpleDirectoryReader
|
5 |
+
|
6 |
+
documents = SimpleDirectoryReader(
|
7 |
+
input_files = ["./HFNWAY.pdf"]
|
8 |
+
).load_data()
|
9 |
+
|
10 |
+
from llama_index.core.schema import Document
|
11 |
+
document = Document(text = "\n\n".join([doc.text for doc in documents]))
|
12 |
+
# above step merges it the 218 different documents into 1 document
|
13 |
+
|
14 |
+
from llama_index.core import VectorStoreIndex
|
15 |
+
from llama_index.core import ServiceContext
|
16 |
+
from llama_index.llms.openai import OpenAI
|
17 |
+
|
18 |
+
# resp = OpenAI().complete("Lord Ram is ")
|
19 |
+
# print(resp)
|
20 |
+
# does not need api key
|
21 |
+
# the embedding model we use is hugging face bge small
|
22 |
+
# the service context class already hass this
|
23 |
+
|
24 |
+
llm = OpenAI(model= "gpt-3.5-turbo", temperature=0.2)
|
25 |
+
service_context = ServiceContext.from_defaults(
|
26 |
+
llm = llm, embed_model = "local:BAAI/bge-small-en-v1.5"
|
27 |
+
)
|
28 |
+
# https://huggingface.co/BAAI/bge-small-en
|
29 |
+
index = VectorStoreIndex.from_documents([document], service_context=service_context)
|
30 |
+
# pip install llama-index-embeddings-huggingface
|
31 |
+
# this cell takes care of chunking embedding, indexing
|
32 |
+
|
33 |
+
query_engine = index.as_query_engine()
|
34 |
+
|
35 |
+
import gradio as gr
|
36 |
+
|
37 |
+
def chat_interface(message, history):
|
38 |
+
|
39 |
+
# Update history with the current message
|
40 |
+
# Call the query function with the user's message
|
41 |
+
response = str(query_engine.query(message))
|
42 |
+
# Update history with the bot's response
|
43 |
+
|
44 |
+
history.append([message, response])
|
45 |
+
return response
|
46 |
+
|
47 |
+
|
48 |
+
# Initialize an empty history
|
49 |
+
history = []
|
50 |
+
# Create the chat interface using gr.ChatInterface
|
51 |
+
interface = gr.ChatInterface(chat_interface, examples = ["Who is DAAJI?", "What is Heartfulness way of meditation all about?"] , title = "HFN BOT")
|
52 |
+
# Launch the interface
|
53 |
+
interface.launch()
|
54 |
+
|
55 |
+
|