souravmighty commited on
Commit
1187c2e
1 Parent(s): fc7c4e4

Remove persist directory

Browse files
Files changed (1) hide show
  1. app.py +23 -20
app.py CHANGED
@@ -34,13 +34,34 @@ async def on_chat_start():
34
  [
35
  Select(
36
  id="Model",
37
- label="Open Source Model",
38
  values=["llama3-8b-8192", "llama3-70b-8192", "mixtral-8x7b-32768", "gemma-7b-it"],
39
  initial_index=0,
40
  )
41
  ]
42
  ).send()
43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  files = None #Initialize variable to store uploaded files
45
 
46
  # Wait for the user to upload a file
@@ -77,32 +98,14 @@ async def on_chat_start():
77
  embeddings = SentenceTransformerEmbeddings(model_name = "sentence-transformers/all-MiniLM-L6-v2")
78
  #embeddings = OllamaEmbeddings(model="llama2:7b")
79
  docsearch = await cl.make_async(Chroma.from_texts)(
80
- texts, embeddings, metadatas=metadatas, persist_directory='./chroma_db'
81
  )
82
- docsearch.persist()
83
 
84
  # Let the user know that the system is ready
85
  msg.content = f"Processing `{file.name}` done. You can now ask questions!"
86
  await msg.update()
87
 
88
- await setup_agent(settings)
89
-
90
-
91
- @cl.on_settings_update
92
- async def setup_agent(settings):
93
- print("Setup agent with settings:", settings)
94
-
95
- user_env = cl.user_session.get("env")
96
- os.environ["GROQ_API_KEY"] = user_env.get("GROQ_API_KEY")
97
-
98
- # embeddings = OllamaEmbeddings(model="nomic-embed-text")
99
- embeddings = SentenceTransformerEmbeddings(model_name = "sentence-transformers/all-MiniLM-L6-v2")
100
  memory=get_memory()
101
-
102
- docsearch = await cl.make_async(Chroma)(
103
- persist_directory="./chroma_db",
104
- embedding_function=embeddings
105
- )
106
 
107
 
108
  # Create a chain that uses the Chroma vector store
 
34
  [
35
  Select(
36
  id="Model",
37
+ label="Choose your favorite LLM:",
38
  values=["llama3-8b-8192", "llama3-70b-8192", "mixtral-8x7b-32768", "gemma-7b-it"],
39
  initial_index=0,
40
  )
41
  ]
42
  ).send()
43
 
44
+ await setup_agent(settings)
45
+
46
+
47
+ @cl.on_settings_update
48
+ async def setup_agent(settings):
49
+
50
+ user_env = cl.user_session.get("env")
51
+ os.environ["GROQ_API_KEY"] = user_env.get("GROQ_API_KEY")
52
+
53
+ # embeddings = OllamaEmbeddings(model="nomic-embed-text")
54
+ # embeddings = SentenceTransformerEmbeddings(model_name = "sentence-transformers/all-MiniLM-L6-v2")
55
+ # memory=get_memory()
56
+
57
+ # docsearch = await cl.make_async(Chroma)(
58
+ # persist_directory="./chroma_db",
59
+ # embedding_function=embeddings
60
+ # )
61
+
62
+ msg = cl.Message(content = f"You are using '{settings['Model']}' as LLM.")
63
+ await msg.send()
64
+
65
  files = None #Initialize variable to store uploaded files
66
 
67
  # Wait for the user to upload a file
 
98
  embeddings = SentenceTransformerEmbeddings(model_name = "sentence-transformers/all-MiniLM-L6-v2")
99
  #embeddings = OllamaEmbeddings(model="llama2:7b")
100
  docsearch = await cl.make_async(Chroma.from_texts)(
101
+ texts, embeddings, metadatas=metadatas
102
  )
 
103
 
104
  # Let the user know that the system is ready
105
  msg.content = f"Processing `{file.name}` done. You can now ask questions!"
106
  await msg.update()
107
 
 
 
 
 
 
 
 
 
 
 
 
 
108
  memory=get_memory()
 
 
 
 
 
109
 
110
 
111
  # Create a chain that uses the Chroma vector store