ryanrwatkins commited on
Commit
2da8950
1 Parent(s): b75f96c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -14
app.py CHANGED
@@ -7,13 +7,21 @@ import langchain
7
  import chromadb
8
  #import faiss
9
 
 
 
 
 
 
 
10
 
11
 
12
- prompt_templates = {"All Needs Gurus": "I want you to act as a needs assessment expert."}
13
-
14
  def get_empty_state():
15
  return {"total_tokens": 0, "messages": []}
16
 
 
 
 
 
17
  def download_prompt_templates():
18
  url = "https://huggingface.co/spaces/ryanrwatkins/needs/raw/main/gurus.txt"
19
  try:
@@ -34,14 +42,12 @@ def download_prompt_templates():
34
  choices = choices[:1] + sorted(choices[1:])
35
  return gr.update(value=choices[0], choices=choices)
36
 
37
-
38
-
39
-
40
-
41
  def on_prompt_template_change(prompt_template):
42
  if not isinstance(prompt_template, str): return
43
  return prompt_templates[prompt_template]
44
 
 
 
45
  def submit_message(prompt, prompt_template, temperature, max_tokens, context_length, state):
46
 
47
  openai.api_key = os.environ['openai_key']
@@ -60,17 +66,14 @@ def submit_message(prompt, prompt_template, temperature, max_tokens, context_len
60
 
61
 
62
  try:
63
- completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + history[-context_length*2:] + [prompt_msg], temperature=temperature, max_tokens=max_tokens)
64
 
65
  # completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + history[-context_length*2:] + [prompt_msg], temperature=temperature, max_tokens=max_tokens)
66
 
67
- # vectordb = "./embeddings"
68
-
69
-
70
-
71
- # completion = ChatVectorDBChain.from_llm(OpenAI(temperature=0, model_name="gpt-3.5-turbo"), vectordb, return_source_documents=True)
72
- # query = "Have Romeo and Juliet spent the night together? Provide a verbose answer, referencing passages from the book."
73
- # result = completion({"question": query, "chat_history": chat_history})
74
  # from https://blog.devgenius.io/chat-with-document-s-using-openai-chatgpt-api-and-text-embedding-6a0ce3dc8bc8
75
 
76
  history.append(prompt_msg)
 
7
  import chromadb
8
  #import faiss
9
 
10
+ # Use Chroma in Colab to create vector embeddings, I then saved them to HuggingFace so now I have to set it use them here.
11
+ from chromadb.config import Settings
12
+ client = chromadb.Client(Settings(
13
+ chroma_db_impl="duckdb+parquet",
14
+ persist_directory="./embeddings" # Optional, defaults to .chromadb/ in the current directory
15
+ ))
16
 
17
 
 
 
18
  def get_empty_state():
19
  return {"total_tokens": 0, "messages": []}
20
 
21
+
22
+ #Initial prompt template, others added below from TXT file
23
+ prompt_templates = {"All Needs Gurus": "I want you to act as a needs assessment expert."}
24
+
25
  def download_prompt_templates():
26
  url = "https://huggingface.co/spaces/ryanrwatkins/needs/raw/main/gurus.txt"
27
  try:
 
42
  choices = choices[:1] + sorted(choices[1:])
43
  return gr.update(value=choices[0], choices=choices)
44
 
 
 
 
 
45
  def on_prompt_template_change(prompt_template):
46
  if not isinstance(prompt_template, str): return
47
  return prompt_templates[prompt_template]
48
 
49
+
50
+
51
  def submit_message(prompt, prompt_template, temperature, max_tokens, context_length, state):
52
 
53
  openai.api_key = os.environ['openai_key']
 
66
 
67
 
68
  try:
69
+ #completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + history[-context_length*2:] + [prompt_msg], temperature=temperature, max_tokens=max_tokens)
70
 
71
  # completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + history[-context_length*2:] + [prompt_msg], temperature=temperature, max_tokens=max_tokens)
72
 
73
+ persist_directory = "./embeddings"
74
+ vectordb = Chroma.from_documents(romeoandjuliet_doc, embeddings, persist_directory=persist_directory)
75
+ completion = ChatVectorDBChain.from_llm(OpenAI(temperature=temperature, max_tokens=max_tokens, model_name="gpt-3.5-turbo"), vectordb, return_source_documents=True)
76
+ result = completion({"question": system_prompt + history[-context_length*2:] + [prompt_msg]})
 
 
 
77
  # from https://blog.devgenius.io/chat-with-document-s-using-openai-chatgpt-api-and-text-embedding-6a0ce3dc8bc8
78
 
79
  history.append(prompt_msg)