durgeshshisode1988 commited on
Commit
b9e07ca
1 Parent(s): b218b55

Update llama3.py

Browse files
Files changed (1) hide show
  1. llama3.py +81 -81
llama3.py CHANGED
@@ -1,81 +1,81 @@
1
- import streamlit as st
2
- import os
3
- from langchain_groq import ChatGroq
4
- from langchain_openai import OpenAIEmbeddings
5
- from langchain.text_splitter import RecursiveCharacterTextSplitter
6
- from langchain.chains.combine_documents import create_stuff_documents_chain
7
- from langchain_core.prompts import ChatPromptTemplate
8
- from langchain.chains import create_retrieval_chain
9
- from langchain_community.vectorstores import FAISS
10
-
11
- from langchain_community.document_loaders import PyPDFDirectoryLoader
12
-
13
- from dotenv import load_dotenv
14
-
15
- load_dotenv()
16
-
17
- ## load the GroqAPI Key
18
- os.environ['OPENAI_API_KEY']=os.getenv("OPENAI_API_KEY")
19
- groq_api_key = os.getenv('GROQ_API_KEY')
20
-
21
- st.title("ChatBot Demo for Error Codes")
22
-
23
- llm=ChatGroq(groq_api_key=groq_api_key,
24
- model="Llama3-8b-8192")
25
-
26
-
27
- prompt = ChatPromptTemplate.from_template(
28
- """
29
- Answer the question based on the provided context only.
30
- Please provide the most accurate response based on the question.
31
- <context>
32
- {context}
33
- <context>
34
- Question: {input}
35
- """
36
- )
37
-
38
-
39
- def vector_embedding():
40
-
41
- if "vectors" not in st.session_state:
42
-
43
- st.session_state.embeddings = OpenAIEmbeddings()
44
- st.session_state.loader = PyPDFDirectoryLoader("./data")
45
- st.session_state.docs = st.session_state.loader.load()
46
- st.session_state.text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
47
- st.session_state.final_documents = st.session_state.text_splitter.split_documents(st.session_state.docs[:20])
48
- st.session_state.vectors = FAISS.from_documents(st.session_state.final_documents, st.session_state.embeddings )
49
-
50
-
51
-
52
-
53
-
54
- prompt1=st.text_input("Enter your question from Documents")
55
-
56
- if st.button("Documents Embedding"):
57
- vector_embedding()
58
- st.write("VectorStore DB is ready")
59
-
60
- import time
61
-
62
-
63
-
64
-
65
- if prompt1:
66
- start = time.process_time()
67
- document_chain = create_stuff_documents_chain(llm, prompt)
68
- retriever = st.session_state.vectors.as_retriever()
69
- retrieval_chain = create_retrieval_chain(retriever, document_chain)
70
- response = retrieval_chain.invoke({'input': prompt1})
71
- print("Response time : ", time.process_time() - start)
72
- st.write(response['answer'])
73
-
74
- # With a Streamlit expander
75
- with st.expander("Document Similarity Search"):
76
- # Find the relevant chunks
77
- for i, doc in enumerate(response["context"]):
78
- st.write(doc.page_content)
79
- st.write("------------------------------------")
80
-
81
-
 
1
+ import streamlit as st
2
+ import os
3
+ from langchain_groq import ChatGroq
4
+ from langchain_openai import OpenAIEmbeddings
5
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
6
+ from langchain.chains.combine_documents import create_stuff_documents_chain
7
+ from langchain_core.prompts import ChatPromptTemplate
8
+ from langchain.chains import create_retrieval_chain
9
+ from langchain_community.vectorstores import FAISS
10
+
11
+ from langchain_community.document_loaders import PyPDFDirectoryLoader
12
+
13
+ from dotenv import load_dotenv
14
+
15
+ load_dotenv()
16
+
17
+ ## load the GroqAPI Key
18
+ os.environ['OPENAI_API_KEY']=os.getenv("OPENAI_API_KEY")
19
+ groq_api_key = os.getenv('GROQ_API_KEY')
20
+
21
+ st.title("ChatBot Demo for Error Codes")
22
+
23
+ llm=ChatGroq(groq_api_key=groq_api_key,
24
+ model="Llama3-8b-8192")
25
+
26
+
27
+ prompt = ChatPromptTemplate.from_template(
28
+ """
29
+ Answer the question based on the provided context only.
30
+ Please provide the most accurate response based on the question.
31
+ <context>
32
+ {context}
33
+ <context>
34
+ Question: {input}
35
+ """
36
+ )
37
+
38
+
39
+ def vector_embedding():
40
+
41
+ if "vectors" not in st.session_state:
42
+
43
+ st.session_state.embeddings = OpenAIEmbeddings()
44
+ st.session_state.loader = PyPDFDirectoryLoader("/*.pdf")
45
+ st.session_state.docs = st.session_state.loader.load()
46
+ st.session_state.text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
47
+ st.session_state.final_documents = st.session_state.text_splitter.split_documents(st.session_state.docs[:20])
48
+ st.session_state.vectors = FAISS.from_documents(st.session_state.final_documents, st.session_state.embeddings )
49
+
50
+
51
+
52
+
53
+
54
+ prompt1=st.text_input("Enter your question from Documents")
55
+
56
+ if st.button("Documents Embedding"):
57
+ vector_embedding()
58
+ st.write("VectorStore DB is ready")
59
+
60
+ import time
61
+
62
+
63
+
64
+
65
+ if prompt1:
66
+ start = time.process_time()
67
+ document_chain = create_stuff_documents_chain(llm, prompt)
68
+ retriever = st.session_state.vectors.as_retriever()
69
+ retrieval_chain = create_retrieval_chain(retriever, document_chain)
70
+ response = retrieval_chain.invoke({'input': prompt1})
71
+ print("Response time : ", time.process_time() - start)
72
+ st.write(response['answer'])
73
+
74
+ # With a Streamlit expander
75
+ with st.expander("Document Similarity Search"):
76
+ # Find the relevant chunks
77
+ for i, doc in enumerate(response["context"]):
78
+ st.write(doc.page_content)
79
+ st.write("------------------------------------")
80
+
81
+