Zwea Htet commited on
Commit
b821729
β€’
1 Parent(s): 06ea9a8

integrated langchain openai model

Browse files
bloomLlama.json ADDED
The diff for this file is too large to render. See raw diff
 
data/updated_calregs.txt ADDED
The diff for this file is too large to render. See raw diff
 
main.py CHANGED
@@ -13,14 +13,16 @@ from fastapi.security import APIKeyHeader
13
  from fastapi.staticfiles import StaticFiles
14
  from fastapi.templating import Jinja2Templates
15
 
16
- from models.bloom import initialize_index
17
 
18
  load_dotenv()
19
  app = FastAPI()
20
  app.secret_key = os.environ.get('SECRET_KEY', 'my-secret-key')
21
  # openai.api_key = os.environ.get("OPENAI_API_KEY")
22
  openai_api_key_header = APIKeyHeader(name="token")
23
- index = None
 
 
24
  # index = initialize_index("index.json")
25
  OPENAI_API_KEY = ""
26
 
@@ -117,8 +119,15 @@ async def login(token: str, response: Response):
117
  @app.post("/initLlamaIndex")
118
  async def initLlamaIndex(token: str):
119
  openai.api_key = token
120
- global index
121
- index = initialize_index("index.json")
 
 
 
 
 
 
 
122
  return {"success": True}
123
 
124
  # Chatbot endpoint
@@ -128,20 +137,15 @@ async def chatbot(request: Request):
128
  context = {"request": request, "startTime": startTime}
129
  return templates.TemplateResponse("chatbot.html", context=context)
130
 
131
-
132
- # @app.get("/chatbot", response_class=HTMLResponse)
133
- # async def chatbot(request: Request, username: str = Cookie(None)):
134
- # if not username:
135
- # # Redirect back to the login page if the user is not authenticated
136
- # return RedirectResponse(url="/login")
137
- # else:
138
- # # Render the chatbot page with the username passed to the template
139
- # return templates.TemplateResponse("chatbot.html", {"request": request, "username": username})
140
-
141
  @app.get("/reply")
142
- def reply(input: str):
143
- bot_reply = index.query(input)
144
- return {"bot_reply": bot_reply}
145
-
 
 
 
 
 
146
  if __name__ == "__main__":
147
  uvicorn.run("main:app", reload=True)
 
13
  from fastapi.staticfiles import StaticFiles
14
  from fastapi.templating import Jinja2Templates
15
 
16
+ from models import bloom, langOpen
17
 
18
  load_dotenv()
19
  app = FastAPI()
20
  app.secret_key = os.environ.get('SECRET_KEY', 'my-secret-key')
21
  # openai.api_key = os.environ.get("OPENAI_API_KEY")
22
  openai_api_key_header = APIKeyHeader(name="token")
23
+
24
+ bloomLlama = None
25
+ langChainAI = None
26
  # index = initialize_index("index.json")
27
  OPENAI_API_KEY = ""
28
 
 
119
  @app.post("/initLlamaIndex")
120
  async def initLlamaIndex(token: str):
121
  openai.api_key = token
122
+ global bloomLlama
123
+ bloomLlama = bloom.initialize_index("bloomLlama.json")
124
+ return {"success": True}
125
+
126
+ @app.post("/initLangOpen")
127
+ async def initLangOpen(token: str):
128
+ openai.api_key = token
129
+ global langChainAI
130
+ langChainAI = langOpen.initialize_index("langOpen")
131
  return {"success": True}
132
 
133
  # Chatbot endpoint
 
137
  context = {"request": request, "startTime": startTime}
138
  return templates.TemplateResponse("chatbot.html", context=context)
139
 
 
 
 
 
 
 
 
 
 
 
140
  @app.get("/reply")
141
+ def reply(input: str, modelName: str):
142
+ if (modelName == "bloom"):
143
+ bot_reply = bloomLlama.query(input)
144
+ return {"bot_reply": bot_reply}
145
+ else:
146
+ bot_reply = langOpen.answer_question(langChainAI, input)
147
+ print("bot reply: ", bot_reply)
148
+ return {"bot_reply": bot_reply}
149
+
150
  if __name__ == "__main__":
151
  uvicorn.run("main:app", reload=True)
models/bloom.py CHANGED
@@ -15,10 +15,11 @@ llm_predictor = LLMPredictor(llm=CustomLLM())
15
  service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
16
 
17
  def initialize_index(index_name):
18
- if os.path.exists(index_name):
19
- return GPTSimpleVectorIndex.load_from_disk(index_name)
 
20
  else:
21
  documents = [Document(d) for d in data]
22
  index = GPTSimpleVectorIndex.from_documents(documents, service_context=service_context)
23
- index.save_to_disk(index_name)
24
  return index
 
15
  service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
16
 
17
  def initialize_index(index_name):
18
+ path = f"./vectorStores/{index_name}"
19
+ if os.path.exists(path):
20
+ return GPTSimpleVectorIndex.load_from_disk(path)
21
  else:
22
  documents = [Document(d) for d in data]
23
  index = GPTSimpleVectorIndex.from_documents(documents, service_context=service_context)
24
+ index.save_to_disk(path)
25
  return index
models/langOpen.py CHANGED
@@ -1,14 +1,44 @@
1
  import os
2
 
 
 
 
3
  from langchain.embeddings.openai import OpenAIEmbeddings
 
 
4
  from langchain.vectorstores import FAISS
5
 
6
- embedding_function = OpenAIEmbeddings()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
  def initialize_index(index_name):
9
- if os.path.exists(index_name):
10
- return FAISS.load_local("./", embedding_function, index_name=index_name)
 
11
  else:
12
- faiss = FAISS.from_texts("./data/calregs.txt")
13
- faiss.save_local("./")
14
  return faiss
 
 
 
 
 
 
 
 
 
 
1
  import os
2
 
3
+ import openai
4
+ from dotenv import load_dotenv
5
+ from langchain.chains import LLMChain
6
  from langchain.embeddings.openai import OpenAIEmbeddings
7
+ from langchain.llms import OpenAI
8
+ from langchain.prompts import PromptTemplate
9
  from langchain.vectorstores import FAISS
10
 
11
+ load_dotenv()
12
+
13
+ embeddings = OpenAIEmbeddings()
14
+
15
+ prompt_template = """Answer the question using the given context to the best of your ability.
16
+ If you don't know, answer I don't know.
17
+ Context: {context}
18
+ Topic: {topic}"""
19
+
20
+ PROMPT = PromptTemplate(
21
+ template=prompt_template, input_variables=["context", "topic"]
22
+ )
23
+
24
+ llm = OpenAI(temperature=0)
25
+
26
+ chain = LLMChain(llm=llm, prompt=PROMPT)
27
 
28
  def initialize_index(index_name):
29
+ path = f"./vectorStores/{index_name}"
30
+ if os.path.exists(path=path):
31
+ return FAISS.load_local(folder_path=path, embeddings=embeddings)
32
  else:
33
+ faiss = FAISS.from_texts("./data/updated_calregs.txt", embedding=embeddings)
34
+ faiss.save_local(path)
35
  return faiss
36
+
37
+
38
+ # faiss = initialize_index("langOpen")
39
+
40
+ def answer_question(index, input):
41
+ docs = index.similarity_search(input, k=4)
42
+ inputs = [{"context": doc.page_content, "topic": input} for doc in docs]
43
+ result = chain.apply(inputs)[0]['text']
44
+ return result
models/vectorStores/langOpen/index.faiss ADDED
Binary file (160 kB). View file
 
models/vectorStores/langOpen/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fc144adcdf1a75f209600074611237befd5bcab89913f0ed301f67765547e02
3
+ size 2330
requirements.txt CHANGED
@@ -16,4 +16,5 @@ langchain
16
  python-dotenv
17
  python-multipart
18
  httpx
19
- openai
 
 
16
  python-dotenv
17
  python-multipart
18
  httpx
19
+ openai
20
+ faiss-cpu
static/scripts/chatbot.js CHANGED
@@ -1,13 +1,15 @@
1
  const msgerForm = get(".msger-inputarea");
2
  const msgerInput = get(".msger-input");
3
  const msgerChat = get(".msger-chat");
 
4
  const isUserVerified = false;
5
 
6
  // Icons made by Freepik from www.flaticon.com
7
  const BOT_IMG = "../static/icons/dialogflow-insights-svgrepo-com.svg";
8
  const PERSON_IMG = "";
9
- const BOT_NAME = " ChatBot";
10
- const PERSON_NAME = "You";
 
11
 
12
  msgerForm.addEventListener("submit", async (event) => {
13
  event.preventDefault();
@@ -15,11 +17,12 @@ msgerForm.addEventListener("submit", async (event) => {
15
  const msgText = msgerInput.value;
16
  if (!msgText) return;
17
 
 
 
18
  appendMessage(PERSON_NAME, PERSON_IMG, "right", msgText);
19
  msgerInput.value = "";
20
 
21
- botResponse(msgText);
22
- // authenticate(msgText);
23
  });
24
 
25
  function appendMessage(name, img, side, text) {
@@ -44,17 +47,22 @@ function appendMessage(name, img, side, text) {
44
  msgerChat.scrollTop += 500;
45
  }
46
 
47
- async function botResponse(text) {
48
  // Bot Response
49
  try {
50
- const response = await fetch(`reply?input=${text}`);
 
 
51
  const data = await response.json();
52
- const reply = data.bot_reply;
53
- appendMessage(BOT_NAME, BOT_IMG, "left", reply.response);
 
 
 
54
  } catch (error) {
55
  console.error(error);
56
  appendMessage(
57
- BOT_NAME,
58
  BOT_IMG,
59
  "left",
60
  "Sorry, I'm not able to respond at the moment."
 
1
  const msgerForm = get(".msger-inputarea");
2
  const msgerInput = get(".msger-input");
3
  const msgerChat = get(".msger-chat");
4
+ const modelSelect = get(".msger-select");
5
  const isUserVerified = false;
6
 
7
  // Icons made by Freepik from www.flaticon.com
8
  const BOT_IMG = "../static/icons/dialogflow-insights-svgrepo-com.svg";
9
  const PERSON_IMG = "";
10
+
11
+ let BOT_NAME = " ChatBot";
12
+ let PERSON_NAME = "You";
13
 
14
  msgerForm.addEventListener("submit", async (event) => {
15
  event.preventDefault();
 
17
  const msgText = msgerInput.value;
18
  if (!msgText) return;
19
 
20
+ const selectedModel = modelSelect.value;
21
+
22
  appendMessage(PERSON_NAME, PERSON_IMG, "right", msgText);
23
  msgerInput.value = "";
24
 
25
+ botResponse(msgText, selectedModel);
 
26
  });
27
 
28
  function appendMessage(name, img, side, text) {
 
47
  msgerChat.scrollTop += 500;
48
  }
49
 
50
+ async function botResponse(text, selectedModel) {
51
  // Bot Response
52
  try {
53
+ const response = await fetch(
54
+ `reply?input=${text}&modelName=${selectedModel}`
55
+ );
56
  const data = await response.json();
57
+ let reply = data.bot_reply;
58
+ if (selectedModel == "bloom") {
59
+ reply = reply.response;
60
+ }
61
+ appendMessage(BOT_NAME + " - " + selectedModel, BOT_IMG, "left", reply);
62
  } catch (error) {
63
  console.error(error);
64
  appendMessage(
65
+ BOT_NAME + "- " + selectedModel,
66
  BOT_IMG,
67
  "left",
68
  "Sorry, I'm not able to respond at the moment."
static/scripts/login.js CHANGED
@@ -23,13 +23,18 @@ loginForm.addEventListener("submit", async (event) => {
23
 
24
  if ("redirect" in loginData) {
25
  // if the login response contains a "redirect" property, it means the login was successful
26
- const initResp = await fetch(`initLlamaIndex?token=${token}`, {
27
  method: "POST",
28
  });
29
 
30
- const initData = await initResp.json();
 
 
 
 
 
31
 
32
- if ("success" in initData) {
33
  window.location.href = loginData.redirect;
34
  }
35
  } else if ("error" in loginData) {
 
23
 
24
  if ("redirect" in loginData) {
25
  // if the login response contains a "redirect" property, it means the login was successful
26
+ const initLlamaRep = await fetch(`initLlamaIndex?token=${token}`, {
27
  method: "POST",
28
  });
29
 
30
+ const initLangOpenRep = await fetch(`initLangOpen?token=${token}`, {
31
+ method: "POST",
32
+ });
33
+
34
+ const initLlamaData = await initLlamaRep.json();
35
+ const initLangOpenData = await initLangOpenRep.json();
36
 
37
+ if ("success" in initLlamaData || "success" in initLangOpenData) {
38
  window.location.href = loginData.redirect;
39
  }
40
  } else if ("error" in loginData) {
static/styles/chatbot.css CHANGED
@@ -147,6 +147,13 @@ body {
147
  background: #ddd;
148
  }
149
 
 
 
 
 
 
 
 
150
  .msger-send-btn {
151
  margin-left: 10px;
152
  background: rgb(0, 196, 65);
 
147
  background: #ddd;
148
  }
149
 
150
+ .msger-select {
151
+ margin-right: 10px;
152
+ background: #ddd;
153
+ font-weight: bold;
154
+ cursor: pointer;
155
+ }
156
+
157
  .msger-send-btn {
158
  margin-left: 10px;
159
  background: rgb(0, 196, 65);
templates/chatbot.html CHANGED
@@ -31,6 +31,10 @@
31
  </main>
32
 
33
  <form class="msger-inputarea">
 
 
 
 
34
  <input type="text" class="msger-input" id="textInput" placeholder="Enter your message...">
35
  <button type="submit" class="msger-send-btn">Send</button>
36
  </form>
 
31
  </main>
32
 
33
  <form class="msger-inputarea">
34
+ <select id="modelSelect" class="msger-select">
35
+ <option value="bloom" selected>Llama Bloom</option>
36
+ <option value="langChain">LangChain OpenAI</option>
37
+ </select>
38
  <input type="text" class="msger-input" id="textInput" placeholder="Enter your message...">
39
  <button type="submit" class="msger-send-btn">Send</button>
40
  </form>
index.json β†’ vectorStores/bloomLlama.json RENAMED
File without changes
vectorStores/langOpen/index.faiss ADDED
Binary file (160 kB). View file
 
vectorStores/langOpen/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a46ff99c3fefe49dcfe253ffc702d6358be36780af555e716f8b1f86cb86deb8
3
+ size 2330