ryanrwatkins commited on
Commit
9f50f0d
1 Parent(s): 0247216

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -101,7 +101,7 @@ def submit_message(prompt, prompt_template, temperature, max_tokens, context_len
101
  # completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + history[-context_length*2:] + [prompt_msg], temperature=temperature, max_tokens=max_tokens)
102
 
103
  completion = ChatVectorDBChain.from_llm(OpenAI(temperature=temperature, max_tokens=max_tokens, model_name="gpt-3.5-turbo"), vectordb, return_source_documents=True)
104
- result = completion({"question": system_prompt + history[-context_length*2:] + [prompt_msg], "chat_history": chat_history})
105
  # from https://blog.devgenius.io/chat-with-document-s-using-openai-chatgpt-api-and-text-embedding-6a0ce3dc8bc8
106
 
107
  history.append(prompt_msg)
@@ -162,7 +162,7 @@ with gr.Blocks(css=css) as demo:
162
  prompt_template_preview = gr.Markdown(elem_id="prompt_template_preview")
163
  with gr.Accordion("Advanced parameters", open=False):
164
  temperature = gr.Slider(minimum=0, maximum=2.0, value=0.7, step=0.1, label="Flexibility", info="Higher = more creative/chaotic, Lower = just the guru")
165
- max_tokens = gr.Slider(minimum=100, maximum=400, value=400, step=1, label="Max tokens per response")
166
  context_length = gr.Slider(minimum=1, maximum=5, value=2, step=1, label="Context length", info="Number of previous questions you have asked. Be careful with high values, it can blow up the token budget quickly.")
167
 
168
 
 
101
  # completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + history[-context_length*2:] + [prompt_msg], temperature=temperature, max_tokens=max_tokens)
102
 
103
  completion = ChatVectorDBChain.from_llm(OpenAI(temperature=temperature, max_tokens=max_tokens, model_name="gpt-3.5-turbo"), vectordb, return_source_documents=True)
104
+ result = completion({"question": system_prompt + [prompt_msg], "chat_history": history[-context_length*2:]})
105
  # from https://blog.devgenius.io/chat-with-document-s-using-openai-chatgpt-api-and-text-embedding-6a0ce3dc8bc8
106
 
107
  history.append(prompt_msg)
 
162
  prompt_template_preview = gr.Markdown(elem_id="prompt_template_preview")
163
  with gr.Accordion("Advanced parameters", open=False):
164
  temperature = gr.Slider(minimum=0, maximum=2.0, value=0.7, step=0.1, label="Flexibility", info="Higher = more creative/chaotic, Lower = just the guru")
165
+ max_tokens = gr.Slider(minimum=100, maximum=400, value=200, step=1, label="Max tokens per response")
166
  context_length = gr.Slider(minimum=1, maximum=5, value=2, step=1, label="Context length", info="Number of previous questions you have asked. Be careful with high values, it can blow up the token budget quickly.")
167
 
168