rishisim commited on
Commit
91a0713
·
verified ·
1 Parent(s): 5fcd144

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +108 -35
app.py CHANGED
@@ -1,5 +1,4 @@
1
  import gradio as gr
2
- from langchain.schema import AIMessage, HumanMessage
3
 
4
 
5
  import os
@@ -13,8 +12,8 @@ llm = HuggingFaceEndpoint(repo_id = repo_id, max_new_tokens = 128, temperature =
13
  from langchain_core.output_parsers import StrOutputParser
14
  from langchain_core.prompts import ChatPromptTemplate
15
 
16
- # prompt = ChatPromptTemplate.from_template("tell me a joke about {topic}")
17
- # chain = prompt | llm | StrOutputParser()
18
 
19
  # from langchain.document_loaders.csv_loader import CSVLoader
20
  from langchain_community.document_loaders.csv_loader import CSVLoader
@@ -31,55 +30,129 @@ from langchain_huggingface.embeddings import HuggingFaceEndpointEmbeddings
31
  model = "BAAI/bge-m3"
32
  embeddings = HuggingFaceEndpointEmbeddings(model = model)
33
 
 
 
34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
 
36
  # Define the chat response function
37
  def chatresponse(message, history):
38
- # history_langchain_format = []
39
- # for human, ai in history:
40
- # history_langchain_format.append(HumanMessage(content=human))
41
- # history_langchain_format.append(AIMessage(content=ai))
42
- # history_langchain_format.append(HumanMessage(content=message))
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
44
- data_vectorstore = Chroma.from_documents(documents = data, embedding = embeddings)
45
- # history_vectorstore = Chroma.from_documents(documents = history, embedding = embeddings)
46
- # vectorstore = data_vectorstore + history_vectorstore
47
- vectorstore = data_vectorstore
48
- retriever = vectorstore.as_retriever()
 
 
 
 
 
49
 
50
- history_str = "\n".join([f"Human: {h[0]}\nAI: {h[1]}" for h in history])
 
 
 
 
 
 
 
 
51
 
52
- # from langchain.prompts import PromptTemplate
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
- from langchain_core.prompts import ChatPromptTemplate
55
 
56
- prompt = ChatPromptTemplate.from_template("""Given the following history, context and a question, generate an answer based on the context only.
57
 
58
- In the answer try to provide as much text as possible from "response" section in the source document context without making much changes.
59
- If somebody asks "Who are you?" or a similar phrase, state "I am Rishi's assistant built using a Large Language Model!"
60
- If the answer is not found in the context, kindly state "I don't know. Please ask Rishi on Discord. Discord Invite Link: https://discord.gg/6ezpZGeCcM. Or email at rishi@aiotsmartlabs.com" Don't try to make up an answer.
61
 
62
- HISTORY: {history}
63
 
64
- CONTEXT: {context}
65
 
66
- QUESTION: {question}""")
67
 
68
- from langchain_core.runnables import RunnablePassthrough
69
- rag_chain = (
70
- {"history": history_str, "context": retriever, "question": RunnablePassthrough()}
71
- | prompt
72
- | llm
73
- | StrOutputParser()
74
- )
75
 
76
 
77
- output = rag_chain.invoke(message)
78
- response = output.split('ANSWER: ')[-1].strip()
79
- return response
80
 
81
- # Launch the Gradio chat interface
82
- gr.ChatInterface(chatresponse).launch()
83
 
84
  # import gradio as gr
85
  # from langchain.schema import AIMessage, HumanMessage
 
1
  import gradio as gr
 
2
 
3
 
4
  import os
 
12
  from langchain_core.output_parsers import StrOutputParser
13
  from langchain_core.prompts import ChatPromptTemplate
14
 
15
+ prompt = ChatPromptTemplate.from_template("tell me a joke about {topic}")
16
+ chain = prompt | llm | StrOutputParser()
17
 
18
  # from langchain.document_loaders.csv_loader import CSVLoader
19
  from langchain_community.document_loaders.csv_loader import CSVLoader
 
30
  model = "BAAI/bge-m3"
31
  embeddings = HuggingFaceEndpointEmbeddings(model = model)
32
 
33
+ vectorstore = Chroma.from_documents(documents = data, embedding = embeddings)
34
+ retriever = vectorstore.as_retriever()
35
 
36
+ # from langchain.prompts import PromptTemplate
37
+
38
+ from langchain_core.prompts import ChatPromptTemplate
39
+
40
+ prompt = ChatPromptTemplate.from_template("""Given the following context and a question, generate an answer based on the context only.
41
+ In the answer try to provide as much text as possible from "response" section in the source document context without making much changes.
42
+ If somebody asks "Who are you?" or a similar phrase, state "I am Rishi's assistant built using a Large Language Model!"
43
+ If the answer is not found in the context, kindly state "I don't know. Please ask Rishi on Discord. Discord Invite Link: https://discord.gg/6ezpZGeCcM. Or email at rishi@aiotsmartlabs.com" Don't try to make up an answer.
44
+ CONTEXT: {context}
45
+ HISTORY: {history}
46
+ QUESTION: {question}""")
47
+
48
+ from langchain_core.runnables import RunnablePassthrough
49
+
50
+ rag_chain = (
51
+ {"context": retriever, "history": RunnablePassthrough(), "question": RunnablePassthrough()}
52
+ | prompt
53
+ | llm
54
+ | StrOutputParser()
55
+ )
56
 
57
  # Define the chat response function
58
  def chatresponse(message, history):
59
+ history_text = "\n".join([f"User: {h[0]}\nAssistant: {h[1]}" for h in history])
60
+
61
+ inputs = {
62
+ "context": "", # context will be retrieved by the retriever
63
+ "history": history_text,
64
+ "question": message
65
+ }
66
+
67
+ output = rag_chain.invoke(inputs)
68
+ response = output.split('ANSWER: ')[-1].strip()
69
+ return response
70
+
71
+ # Launch the Gradio chat interface
72
+ gr.ChatInterface(chatresponse).launch()
73
+
74
+ # import gradio as gr
75
+ # from langchain.schema import AIMessage, HumanMessage
76
+
77
 
78
+ # import os
79
+ # hftoken = os.environ["hftoken"]
80
+
81
+ # from langchain_huggingface import HuggingFaceEndpoint
82
+
83
+ # repo_id = "mistralai/Mistral-7B-Instruct-v0.3"
84
+ # llm = HuggingFaceEndpoint(repo_id = repo_id, max_new_tokens = 128, temperature = 0.7, huggingfacehub_api_token = hftoken)
85
+
86
+ # from langchain_core.output_parsers import StrOutputParser
87
+ # from langchain_core.prompts import ChatPromptTemplate
88
 
89
+ # # prompt = ChatPromptTemplate.from_template("tell me a joke about {topic}")
90
+ # # chain = prompt | llm | StrOutputParser()
91
+
92
+ # # from langchain.document_loaders.csv_loader import CSVLoader
93
+ # from langchain_community.document_loaders.csv_loader import CSVLoader
94
+
95
+
96
+ # loader = CSVLoader(file_path='aiotsmartlabs_faq.csv', source_column = 'prompt')
97
+ # data = loader.load()
98
 
99
+ # from langchain_huggingface import HuggingFaceEmbeddings
100
+ # from langchain_chroma import Chroma
101
+ # from langchain_huggingface.embeddings import HuggingFaceEndpointEmbeddings
102
+
103
+ # # CHECK MTEB LEADERBOARD & FIND BEST EMBEDDING MODEL
104
+ # model = "BAAI/bge-m3"
105
+ # embeddings = HuggingFaceEndpointEmbeddings(model = model)
106
+
107
+
108
+
109
+ # # Define the chat response function
110
+ # def chatresponse(message, history):
111
+ # # history_langchain_format = []
112
+ # # for human, ai in history:
113
+ # # history_langchain_format.append(HumanMessage(content=human))
114
+ # # history_langchain_format.append(AIMessage(content=ai))
115
+ # # history_langchain_format.append(HumanMessage(content=message))
116
+
117
+ # data_vectorstore = Chroma.from_documents(documents = data, embedding = embeddings)
118
+ # # history_vectorstore = Chroma.from_documents(documents = history, embedding = embeddings)
119
+ # # vectorstore = data_vectorstore + history_vectorstore
120
+ # vectorstore = data_vectorstore
121
+ # retriever = vectorstore.as_retriever()
122
+
123
+ # history_str = "\n".join([f"Human: {h[0]}\nAI: {h[1]}" for h in history])
124
+
125
+ # # from langchain.prompts import PromptTemplate
126
 
127
+ # from langchain_core.prompts import ChatPromptTemplate
128
 
129
+ # prompt = ChatPromptTemplate.from_template("""Given the following history, context and a question, generate an answer based on the context only.
130
 
131
+ # In the answer try to provide as much text as possible from "response" section in the source document context without making much changes.
132
+ # If somebody asks "Who are you?" or a similar phrase, state "I am Rishi's assistant built using a Large Language Model!"
133
+ # If the answer is not found in the context, kindly state "I don't know. Please ask Rishi on Discord. Discord Invite Link: https://discord.gg/6ezpZGeCcM. Or email at rishi@aiotsmartlabs.com" Don't try to make up an answer.
134
 
135
+ # HISTORY: {history}
136
 
137
+ # CONTEXT: {context}
138
 
139
+ # QUESTION: {question}""")
140
 
141
+ # from langchain_core.runnables import RunnablePassthrough
142
+ # rag_chain = (
143
+ # {"history": history_str, "context": retriever, "question": RunnablePassthrough()}
144
+ # | prompt
145
+ # | llm
146
+ # | StrOutputParser()
147
+ # )
148
 
149
 
150
+ # output = rag_chain.invoke(message)
151
+ # response = output.split('ANSWER: ')[-1].strip()
152
+ # return response
153
 
154
+ # # Launch the Gradio chat interface
155
+ # gr.ChatInterface(chatresponse).launch()
156
 
157
  # import gradio as gr
158
  # from langchain.schema import AIMessage, HumanMessage