ssm123ssm commited on
Commit
2344212
β€’
1 Parent(s): a14d8ac

Upload 7 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ storage/vector_store.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,12 +1,13 @@
1
  ---
2
- title: LLM Exp
3
- emoji: 🐨
4
- colorFrom: purple
5
- colorTo: blue
6
  sdk: gradio
7
  sdk_version: 3.35.2
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Pharm
3
+ emoji: πŸ‘
4
+ colorFrom: pink
5
+ colorTo: indigo
6
  sdk: gradio
7
  sdk_version: 3.35.2
8
  app_file: app.py
9
  pinned: false
10
+ duplicated_from: ssm123ssm/pharm
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import openai
3
+ from llama_index import StorageContext, load_index_from_storage
4
+ import logging
5
+ import sys
6
+
7
+ logging.basicConfig(stream=sys.stdout, level=logging.INFO)
8
+ logging.getLogger().handlers = []
9
+ logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
10
+
11
+ os.environ['OPENAI_API_KEY'] = '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'
12
+ openai.api_key = os.environ['OPENAI_API_KEY']
13
+
14
+ from llama_index import load_indices_from_storage
15
+ storage_context = StorageContext.from_defaults(persist_dir='./storage')
16
+ indx = load_indices_from_storage(storage_context)
17
+
18
+ from llama_index.query_engine import CitationQueryEngine
19
+ query_engine = CitationQueryEngine.from_args(index=indx[1], similarity_top_k=4)
20
+ list_query_engine = indx[0].as_query_engine(
21
+ response_mode='tree_summarize',
22
+ use_async=True,
23
+ )
24
+ vector_query_engine = indx[1].as_query_engine(similarity_top_k=4)
25
+
26
+ import gradio as gr
27
+ def chatbot(input_text, is_qen):
28
+ engine = query_engine if is_qen else vector_query_engine
29
+ response = engine.query(input_text)
30
+ return response
31
+
32
+ iface = gr.Interface(fn=chatbot,
33
+ inputs=[gr.inputs.Textbox(lines=7, default="What are examples for short, intermediate and long acting glucocorticoids?", label="Enter your text here"), gr.Checkbox(label="Use Citation Engine instead of Query Engine.")],
34
+ outputs="text",
35
+ title="Project Rang and Dale")
36
+ iface.launch()
37
+
38
+ #%%
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ llama_index
2
+ gradio~=3.35.2
3
+ pypdf
4
+ openai~=0.27.8
storage/docstore.json ADDED
The diff for this file is too large to render. See raw diff
 
storage/graph_store.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"graph_dict": {}}
storage/index_store.json ADDED
The diff for this file is too large to render. See raw diff
 
storage/vector_store.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:faa5fcb34a27825e5c3a5f58813876a51f511b78668623b9fc98cd27e2251e35
3
+ size 59050799