Stanlito commited on
Commit
e5b5f0b
1 Parent(s): 27dc228

Upload 8 files

Browse files
__pycache__/gpt.cpython-311.pyc ADDED
Binary file (1.41 kB). View file
 
__pycache__/gradio_pr.cpython-311.pyc ADDED
Binary file (3.72 kB). View file
 
app.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pathlib
3
+ import re
4
+ import gradio as gr
5
+ from langchain.docstore.document import Document
6
+ from langchain.document_loaders import TextLoader
7
+ from langchain.text_splitter import CharacterTextSplitter
8
+ from langchain.embeddings.openai import OpenAIEmbeddings
9
+ from langchain.vectorstores import FAISS
10
+
11
+ os.environ["OPENAI_API_KEY"] = "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"
12
+
13
+ from langchain.prompts.chat import (
14
+ ChatPromptTemplate,
15
+ SystemMessagePromptTemplate,
16
+ HumanMessagePromptTemplate,
17
+ )
18
+ from langchain.chat_models import ChatOpenAI
19
+ from langchain.chains import RetrievalQAWithSourcesChain
20
+
21
+ # Set the data store directory
22
+ DATA_STORE_DIR = "data_store"
23
+
24
+ if os.path.exists(DATA_STORE_DIR):
25
+ vector_store = FAISS.load_local(
26
+ DATA_STORE_DIR,
27
+ OpenAIEmbeddings()
28
+ )
29
+ else:
30
+ print(f"Missing files. Upload index.faiss and index.pkl files to {DATA_STORE_DIR} directory first")
31
+
32
+ system_template = """Use the following pieces of context to answer the user's question.
33
+ Take note of the sources and include them in the answer in the format: "SOURCES: source1", use "SOURCES" in capital letters regardless of the number of sources.
34
+ If you don't know the answer, just say "I don't know", don't try to make up an answer.
35
+ ----------------
36
+ {summaries}"""
37
+
38
+ messages = [
39
+ SystemMessagePromptTemplate.from_template(system_template),
40
+ HumanMessagePromptTemplate.from_template("{question}")
41
+ ]
42
+ prompt = ChatPromptTemplate.from_messages(messages)
43
+
44
+ llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0,
45
+ max_tokens=256) # Modify model_name if you have access to GPT-4
46
+
47
+ chain_type_kwargs = {"prompt": prompt}
48
+ chain = RetrievalQAWithSourcesChain.from_chain_type(
49
+ llm=llm,
50
+ chain_type="stuff",
51
+ retriever=vector_store.as_retriever(),
52
+ return_source_documents=True,
53
+ chain_type_kwargs=chain_type_kwargs
54
+ )
55
+
56
+
57
+ def chatbot_interface(query):
58
+ result = chain(query)
59
+ return result['answer']
60
+
61
+
62
+ # Create a Gradio interface
63
+ gr.Interface(
64
+ fn=chatbot_interface,
65
+ inputs="text",
66
+ outputs="text",
67
+ title="LLM Chatbot",
68
+ description="Chat with the LLM Chatbot on Custom Data"
69
+ ).launch()
data_store/index.faiss ADDED
Binary file (79.9 kB). View file
 
data_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83b6ab5883ed5446048cac782f8be8027281e3f68ed1ae5f829d6b3cf3ee1de6
3
+ size 36695
gradio_pr.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import pathlib
4
+ import random
5
+ #import torch
6
+ #import transformers
7
+ from langchain.document_loaders import TextLoader
8
+ from langchain.text_splitter import CharacterTextSplitter
9
+ from langchain.embeddings.openai import OpenAIEmbeddings
10
+ from langchain.vectorstores import FAISS
11
+ from langchain.prompts.chat import (
12
+ ChatPromptTemplate,
13
+ SystemMessagePromptTemplate,
14
+ HumanMessagePromptTemplate,
15
+ )
16
+ from langchain.chat_models import ChatOpenAI
17
+ from langchain.chains import RetrievalQAWithSourcesChain
18
+
19
+ os.environ["OPENAI_API_KEY"] = "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"
20
+
21
+ # Set the data store directory
22
+ DATA_STORE_DIR = "data_store"
23
+
24
+ if os.path.exists(DATA_STORE_DIR):
25
+ vector_store = FAISS.load_local(
26
+ DATA_STORE_DIR,
27
+ OpenAIEmbeddings()
28
+ )
29
+ else:
30
+ print(f"Missing files. Upload index.faiss and index.pkl files to {DATA_STORE_DIR} directory first")
31
+
32
+ system_template = """Use the following pieces of context to answer the user's question.
33
+ Take note of the sources and include them in the answer in the format: "SOURCES: source1", use "SOURCES" in capital letters regardless of the number of sources.
34
+ If you don't know the answer, just say "I don't know", don't try to make up an answer.
35
+ ----------------
36
+ {summaries}"""
37
+
38
+ messages = [
39
+ SystemMessagePromptTemplate.from_template(system_template),
40
+ HumanMessagePromptTemplate.from_template("{question}")
41
+ ]
42
+ prompt = ChatPromptTemplate.from_messages(messages)
43
+
44
+ llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0, max_tokens=256)
45
+
46
+ chain_type_kwargs = {"prompt": prompt}
47
+ chain = RetrievalQAWithSourcesChain.from_chain_type(
48
+ llm=llm,
49
+ chain_type="stuff",
50
+ retriever=vector_store.as_retriever(),
51
+ return_source_documents=True,
52
+ chain_type_kwargs=chain_type_kwargs
53
+ )
54
+
55
+
56
+ class Chatbot:
57
+ def __init__(self):
58
+ self.query = None
59
+
60
+ def chat(self, query):
61
+ self.query = query
62
+ result = chain(query)
63
+ return result['answer']
64
+
65
+
66
+ chatbot = Chatbot()
67
+
68
+
69
+ # Create a Gradio interface
70
+ def chat_interface(query):
71
+ response = chatbot.chat(query)
72
+ return response
73
+
74
+
75
+ # inputs = gr.inputs.Textbox(lines=2, placeholder="Enter your message here...")
76
+ # outputs = gr.outputs.Textbox()
77
+
78
+ # chat_interface = gr.ChatInterface(chat_interface, inputs=inputs, outputs=outputs)
79
+ #
80
+ # chat_interface.launch()
81
+ gr.ChatInterface(
82
+ chat_interface,
83
+ chatbot=gr.Chatbot(height=300),
84
+ textbox=gr.Textbox(placeholder="Ask me a yes or no question"),
85
+ description="Ask Yes Man any question",
86
+ theme="soft",
87
+ cache_examples=True,
88
+ clear_btn="Clear",
89
+ ).launch()
main.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import gradio as gr
3
+
4
+
5
+ def slow_echo(message, history):
6
+ for i in range(len(message)):
7
+ time.sleep(0.3)
8
+ yield "You typed: " + message[: i + 1]
9
+
10
+
11
+ gr.ChatInterface(
12
+ slow_echo,
13
+ # chatbot=gr.Chatbot(height=200),
14
+ # textbox=gr.Textbox(placeholder="Ask me a yes or no question"),
15
+ description="Ask Yes Man any question",
16
+ theme="soft",
17
+ css="",
18
+ examples=["Hello", "Am I cool?", "Are tomatoes vegetables?"],
19
+ cache_examples=True,
20
+ clear_btn="Clear",
21
+ ).queue().launch()
streamlit_app.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pathlib
3
+ import re
4
+ import streamlit as st
5
+ from streamlit_chat import message
6
+ from langchain.docstore.document import Document
7
+ from langchain.document_loaders import TextLoader
8
+ from langchain.text_splitter import CharacterTextSplitter
9
+ from langchain.embeddings.openai import OpenAIEmbeddings
10
+ from langchain.vectorstores import FAISS
11
+
12
+ st.set_page_config(
13
+ page_title="LLM Chatbot"
14
+ )
15
+ st.header(" LLM Chatbot on Custom data")
16
+ st.sidebar.header("Instructions")
17
+ st.sidebar.info(
18
+ '''This is a web application that allows you to interact with
19
+ your custom data
20
+ '''
21
+ )
22
+ st.sidebar.info('''Enter a query in the text box and press enter
23
+ to receive a response''')
24
+
25
+ st.sidebar.info('''
26
+ This project works perfectly even on your own data
27
+ ''')
28
+
29
+ os.environ["OPENAI_API_KEY"] = "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"
30
+
31
+ from langchain.prompts.chat import (
32
+ ChatPromptTemplate,
33
+ SystemMessagePromptTemplate,
34
+ HumanMessagePromptTemplate,
35
+ )
36
+ from langchain.chat_models import ChatOpenAI
37
+ from langchain.chains import RetrievalQAWithSourcesChain
38
+
39
+ # Initialize Streamlit
40
+ st.title("Stanlito AI Chatbot")
41
+
42
+ # Set the data store directory
43
+ DATA_STORE_DIR = "data_store"
44
+
45
+ # Upload the files `$DATA_STORE_DIR/index.faiss` and `$DATA_STORE_DIR/index.pkl` to local
46
+ if os.path.exists(DATA_STORE_DIR):
47
+ vector_store = FAISS.load_local(
48
+ DATA_STORE_DIR,
49
+ OpenAIEmbeddings()
50
+ )
51
+ else:
52
+ st.write(f"Missing files. Upload index.faiss and index.pkl files to {DATA_STORE_DIR} directory first")
53
+
54
+ # Define system template
55
+ system_template = """Use the following pieces of context to answer the user's question.
56
+ Take note of the sources and include them in the answer in the format: "SOURCES: source1", use "SOURCES" in capital letters regardless of the number of sources.
57
+ If you don't know the answer, just say "I don't know", don't try to make up an answer.
58
+ ----------------
59
+ {summaries}"""
60
+
61
+ # Create the prompt
62
+ messages = [
63
+ SystemMessagePromptTemplate.from_template(system_template),
64
+ HumanMessagePromptTemplate.from_template("{question}")
65
+ ]
66
+ prompt = ChatPromptTemplate.from_messages(messages)
67
+
68
+ # Load the language model
69
+ llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0,
70
+ max_tokens=256) # Modify model_name if you have access to GPT-4
71
+
72
+ # Create the chain
73
+ chain_type_kwargs = {"prompt": prompt}
74
+ chain = RetrievalQAWithSourcesChain.from_chain_type(
75
+ llm=llm,
76
+ chain_type="stuff",
77
+ retriever=vector_store.as_retriever(),
78
+ return_source_documents=True,
79
+ chain_type_kwargs=chain_type_kwargs
80
+ )
81
+
82
+
83
+ # Define function to print the result
84
+ def print_result(result):
85
+ output_text = f"""### Question:
86
+ {query}
87
+ Answer:
88
+ {result['answer']}
89
+ """
90
+ st.markdown(output_text)
91
+
92
+
93
+ # Get user input
94
+ query = st.text_input("Ask a question")
95
+
96
+ # Process user input
97
+ if query:
98
+ result = chain(query)
99
+ print_result(result)