DanyaalMajid commited on
Commit
5b00cc2
1 Parent(s): c3d7129

Final Commmit

Browse files
Files changed (1) hide show
  1. app.py +13 -7
app.py CHANGED
@@ -1,8 +1,9 @@
1
  import time
2
  import streamlit as st
3
- from llama_index import ServiceContext, StorageContext, set_global_service_context, VectorStoreIndex, SimpleDirectoryReader, Document
4
  from llama_index.embeddings import LangchainEmbedding
5
  from langchain.embeddings.huggingface import HuggingFaceEmbeddings
 
6
  from llama_index.llms import LlamaCPP
7
  from llama_index.llms.llama_utils import messages_to_prompt, completion_to_prompt
8
  from PyPDF2 import PdfReader
@@ -62,6 +63,9 @@ def main():
62
  # Storage Context
63
  storage_context = StorageContext.from_defaults()
64
  st.title("Llama-CPP Local LLM with RAG (Phi-2 RAG)")
 
 
 
65
 
66
  pdf = st.file_uploader("Upload a PDF file", type=["pdf"])
67
 
@@ -72,8 +76,14 @@ def main():
72
  storage_context.docstore.add_documents(nodes)
73
  index = (VectorStoreIndex.from_documents(
74
  documents, service_context=service_context, storage_context=storage_context, llm=llm))
75
- chat_engine = index.as_chat_engine(chat_mode="simple", verbose=True)
76
-
 
 
 
 
 
 
77
  # Initialize chat history
78
  if "messages" not in st.session_state:
79
  st.session_state.messages = []
@@ -110,10 +120,6 @@ def main():
110
  st.session_state.messages.append(
111
  {"role": "assistant", "content": full_response})
112
 
113
- # Credits
114
- st.markdown(
115
- "Made By Danyaal Majid & Muhammad Bin Asif [HuggingFace](https://huggingface.co/spaces/DanyaalMajid/NLP-Final-LocalLLM-RAG).")
116
-
117
 
118
  if __name__ == "__main__":
119
  main()
 
1
  import time
2
  import streamlit as st
3
+ from llama_index import ServiceContext, StorageContext, set_global_service_context, VectorStoreIndex, Document
4
  from llama_index.embeddings import LangchainEmbedding
5
  from langchain.embeddings.huggingface import HuggingFaceEmbeddings
6
+ from llama_index.chat_engine.condense_question import CondenseQuestionChatEngine
7
  from llama_index.llms import LlamaCPP
8
  from llama_index.llms.llama_utils import messages_to_prompt, completion_to_prompt
9
  from PyPDF2 import PdfReader
 
63
  # Storage Context
64
  storage_context = StorageContext.from_defaults()
65
  st.title("Llama-CPP Local LLM with RAG (Phi-2 RAG)")
66
+ # Credits
67
+ st.markdown(
68
+ "Made with ❤️️ By Danyaal Majid & Muhammad Bin Asif Using [HF Spaces](https://huggingface.co/spaces/DanyaalMajid/NLP-Final-LocalLLM-RAG)")
69
 
70
  pdf = st.file_uploader("Upload a PDF file", type=["pdf"])
71
 
 
76
  storage_context.docstore.add_documents(nodes)
77
  index = (VectorStoreIndex.from_documents(
78
  documents, service_context=service_context, storage_context=storage_context, llm=llm))
79
+ # chat_engine = index.as_chat_engine(chat_mode="simple", verbose=True)
80
+ custom_prompt = ""
81
+ query_engine = index.as_query_engine()
82
+ chat_engine = CondenseQuestionChatEngine.from_defaults(
83
+ query_engine=query_engine,
84
+ condense_question_prompt=custom_prompt,
85
+ verbose=True,
86
+ )
87
  # Initialize chat history
88
  if "messages" not in st.session_state:
89
  st.session_state.messages = []
 
120
  st.session_state.messages.append(
121
  {"role": "assistant", "content": full_response})
122
 
 
 
 
 
123
 
124
  if __name__ == "__main__":
125
  main()