Update app.py
Browse files
app.py
CHANGED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from llama_index.core import SimpleDirectoryReader
|
2 |
+
from llama_index.core import VectorStoreIndex
|
3 |
+
from llama_index.llms.ollama import Ollama
|
4 |
+
|
5 |
+
loader = SimpleDirectoryReader(input_dir="./test/", recursive=True,required_exts=[".epub",".pdf"],)
|
6 |
+
documents = loader.load_data()
|
7 |
+
|
8 |
+
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
9 |
+
|
10 |
+
embedding_model = HuggingFaceEmbedding(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
11 |
+
|
12 |
+
|
13 |
+
|
14 |
+
llama = Ollama(model="phi3", request_timeout=100000,)
|
15 |
+
|
16 |
+
index = VectorStoreIndex.from_documents(documents,embed_model=embedding_model,)
|
17 |
+
|
18 |
+
import gradio as gr
|
19 |
+
|
20 |
+
# chathistory = []
|
21 |
+
def chatbot(q,history):
|
22 |
+
query_engine = index.as_query_engine(llm=llama)
|
23 |
+
answer = query_engine.query(q)
|
24 |
+
|
25 |
+
return str(answer)
|
26 |
+
|
27 |
+
|
28 |
+
gr.ChatInterface(chatbot).launch(share=True)
|