wop commited on
Commit
72f42a0
1 Parent(s): 820263c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +97 -95
app.py CHANGED
@@ -7,132 +7,134 @@ import datetime
7
  import json
8
 
9
  _ = load_dotenv(find_dotenv())
10
- st.set_page_config(page_icon="", layout="wide", page_title="...")
11
 
12
  def icon(emoji: str):
13
- """Shows an emoji as a Notion-style page icon."""
14
- st.write(
15
- f'<span style="font-size: 78px; line-height: 1">{emoji}</span>',
16
- unsafe_allow_html=True,
17
- )
18
-
19
 
20
  icon("⚡")
21
 
22
  st.subheader("Chatbot", divider="rainbow", anchor=False)
23
 
24
  client = Groq(
25
- api_key=os.environ['GROQ_API_KEY'],
26
  )
27
 
28
- # Read saved prompts from file
29
- with open("saved_prompts.txt", "r") as f:
30
- saved_prompts = f.read().split("<|>")
31
-
32
- prompt_names = [p.split(" ", 1)[0] for p in saved_prompts]
33
- prompt_map = {name: prompt for name, prompt in zip(prompt_names, saved_prompts)}
34
-
35
  # Initialize chat history and selected model
36
  if "messages" not in st.session_state:
37
- st.session_state.messages = []
38
 
39
  if "selected_model" not in st.session_state:
40
- st.session_state.selected_model = None
 
 
 
 
 
 
41
 
42
  # Define model details
43
  models = {
44
- "mixtral-8x7b-32768": {
45
- "name": "Mixtral-8x7b-Instruct-v0.1",
46
- "tokens": 32768,
47
- "developer": "Mistral",
48
- },
49
- "gemma-7b-it": {"name": "Gemma-7b-it", "tokens": 8192, "developer": "Google"},
50
- "llama2-70b-4096": {"name": "LLaMA2-70b-chat", "tokens": 4096, "developer": "Meta"},
51
- "llama3-70b-8192": {"name": "LLaMA3-70b-8192", "tokens": 8192, "developer": "Meta"},
52
- "llama3-8b-8192": {"name": "LLaMA3-8b-8192", "tokens": 8192, "developer": "Meta"},
53
  }
54
 
55
  # Layout for model selection and max_tokens slider
56
- col1, col2 = st.columns(2)
57
 
58
  with col1:
59
- def update_prompt(selected_prompt): # Callback function for dropdown
60
- global prompt
61
- prompt = prompt_map[selected_prompt]
62
-
63
- prompt_option = st.selectbox(
64
- "Choose a prompt:",
65
- options=list(models.keys()),
66
- format_func=lambda x: models[x]["name"],
67
- index=0, # Default to the first model in the list
68
- on_change=update_prompt, # Call update_prompt on selection change
69
- )
70
-
71
- # Chat input without value argument
72
- if prompt := st.chat_input("Enter your prompt here..."):
73
- st.session_state.messages.append({"role": "user", "content": prompt})
74
-
75
- with st.chat_message("user", avatar="❓"):
76
- st.markdown(prompt)
77
 
78
  # Detect model change and clear chat history if model has changed
79
  if st.session_state.selected_model != model_option:
80
- st.session_state.messages = []
81
- st.session_state.selected_model = model_option
82
 
83
  max_tokens_range = models[model_option]["tokens"]
84
 
85
  with col2:
86
- # Adjust max_tokens slider dynamically based on the selected model
87
- max_tokens = st.slider(
88
- "Max Tokens:",
89
- min_value=512, # Minimum value to allow some flexibility
90
- max_value=max_tokens_range,
91
- # Default value or max allowed if less
92
- value=min(32768, max_tokens_range),
93
- step=512,
94
- help=f"Adjust the maximum number of tokens (words) for the model's response. Max for selected model: {max_tokens_range}",
95
- )
 
 
 
 
 
 
 
 
96
 
97
  # Display chat messages from history on app rerun
98
  for message in st.session_state.messages:
99
- avatar = "" if message["role"] == "assistant" else "❓"
100
- with st.chat_message(message["role"], avatar=avatar):
101
- st.markdown(message["content"])
102
 
103
  def generate_chat_responses(chat_completion) -> Generator[str, None, None]:
104
- """Yield chat response content from the Groq API response."""
105
- for chunk in chat_completion:
106
- if chunk.choices[0].delta.content:
107
- yield chunk.choices[0].delta.content
108
-
109
- # Fetch response from Groq API
110
- try:
111
- chat_completion = client.chat.completions.create(
112
- model=model_option,
113
- messages=[
114
- {"role": m["role"], "content": m["content"]}
115
- for m in st.session_state.messages
116
- ],
117
- max_tokens=max_tokens,
118
- stream=True,
119
- )
120
-
121
- # Use the generator function with st.write_stream
122
- with st.chat_message("assistant", avatar=""):
123
- chat_responses_generator = generate_chat_responses(chat_completion)
124
- full_response = st.write_stream(chat_responses_generator)
125
- except Exception as e:
126
- st.error(e, icon="")
127
-
128
- # Append the full response to session_state.messages
129
- if isinstance(full_response, str):
130
- st.session_state.messages.append(
131
- {"role": "assistant", "content": full_response}
132
- )
133
- else:
134
- # Handle the case where full_response is not a string
135
- combined_response = "\n".join(str(item) for item in full_response)
136
- st.session_state.messages.append(
137
- {"role": "assistant", "content": combined_response}
138
- )
 
 
 
 
 
 
 
 
 
7
  import json
8
 
9
  _ = load_dotenv(find_dotenv())
10
+ st.set_page_config(page_icon="💬", layout="wide", page_title="...")
11
 
12
  def icon(emoji: str):
13
+ """Shows an emoji as a Notion-style page icon."""
14
+ st.write(
15
+ f'<span style="font-size: 78px; line-height: 1">{emoji}</span>',
16
+ unsafe_allow_html=True,
17
+ )
 
18
 
19
  icon("⚡")
20
 
21
  st.subheader("Chatbot", divider="rainbow", anchor=False)
22
 
23
  client = Groq(
24
+ api_key=os.environ['GROQ_API_KEY'],
25
  )
26
 
 
 
 
 
 
 
 
27
  # Initialize chat history and selected model
28
  if "messages" not in st.session_state:
29
+ st.session_state.messages = []
30
 
31
  if "selected_model" not in st.session_state:
32
+ st.session_state.selected_model = None
33
+
34
+ # prompts
35
+ prompts = {
36
+ "none": "",
37
+ "python interpreter": "emulate the output of this program like you are the python interpreter, only answer with the result of this emulation. Ask the user for each missing input, sequentially and only once per message, in the same way a python interpreter would. Do not fill in for my inputs. Take my inputs from the message directly after you ask for input."
38
+ }
39
 
40
  # Define model details
41
  models = {
42
+ "mixtral-8x7b-32768": {
43
+ "name": "Mixtral-8x7b-Instruct-v0.1",
44
+ "tokens": 32768,
45
+ "developer": "Mistral",
46
+ },
47
+ "gemma-7b-it": {"name": "Gemma-7b-it", "tokens": 8192, "developer": "Google"},
48
+ "llama2-70b-4096": {"name": "LLaMA2-70b-chat", "tokens": 4096, "developer": "Meta"},
49
+ "llama3-70b-8192": {"name": "LLaMA3-70b-8192", "tokens": 8192, "developer": "Meta"},
50
+ "llama3-8b-8192": {"name": "LLaMA3-8b-8192", "tokens": 8192, "developer": "Meta"},
51
  }
52
 
53
  # Layout for model selection and max_tokens slider
54
+ col1, col2, col3 = st.columns(3)
55
 
56
  with col1:
57
+ model_option = st.selectbox(
58
+ "Choose a model:",
59
+ options=list(models.keys()),
60
+ format_func=lambda x: models[x]["name"],
61
+ index=0, # Default to the first model in the list
62
+ )
 
 
 
 
 
 
 
 
 
 
 
 
63
 
64
  # Detect model change and clear chat history if model has changed
65
  if st.session_state.selected_model != model_option:
66
+ st.session_state.messages = []
67
+ st.session_state.selected_model = model_option
68
 
69
  max_tokens_range = models[model_option]["tokens"]
70
 
71
  with col2:
72
+ # Adjust max_tokens slider dynamically based on the selected model
73
+ max_tokens = st.slider(
74
+ "Max Tokens:",
75
+ min_value=512, # Minimum value to allow some flexibility
76
+ max_value=max_tokens_range,
77
+ # Default value or max allowed if less
78
+ value=min(32768, max_tokens_range),
79
+ step=512,
80
+ help=f"Adjust the maximum number of tokens (words) for the model's response. Max for selected model: {max_tokens_range}",
81
+ )
82
+
83
+ with col3:
84
+ prompt_selection = st.selectbox(
85
+ "Choose a prompt:",
86
+ options=list(prompts.keys()),
87
+ format_func=lambda x: prompts[x]["name"],
88
+ index=0,
89
+ )
90
 
91
  # Display chat messages from history on app rerun
92
  for message in st.session_state.messages:
93
+ avatar = "🧠" if message["role"] == "assistant" else "❓"
94
+ with st.chat_message(message["role"], avatar=avatar):
95
+ st.markdown(message["content"])
96
 
97
  def generate_chat_responses(chat_completion) -> Generator[str, None, None]:
98
+ """Yield chat response content from the Groq API response."""
99
+ for chunk in chat_completion:
100
+ if chunk.choices[0].delta.content:
101
+ yield chunk.choices[0].delta.content
102
+
103
+ selected_prompt_text = prompts.get(prompt_selection)
104
+
105
+ if prompt := st.chat_input("Enter your prompt here...") or prompt := selected_prompt_text:
106
+ st.session_state.messages.append({"role": "user", "content": prompt})
107
+
108
+ with st.chat_message("user", avatar=""):
109
+ st.markdown(prompt)
110
+
111
+ # Fetch response from Groq API
112
+ try:
113
+ chat_completion = client.chat.completions.create(
114
+ model=model_option,
115
+ messages=[
116
+ {"role": m["role"], "content": m["content"]}
117
+ for m in st.session_state.messages
118
+ ],
119
+ max_tokens=max_tokens,
120
+ stream=True,
121
+ )
122
+
123
+ # Use the generator function with st.write_stream
124
+ with st.chat_message("assistant", avatar="🧠"):
125
+ chat_responses_generator = generate_chat_responses(chat_completion)
126
+ full_response = st.write_stream(chat_responses_generator)
127
+ except Exception as e:
128
+ st.error(e, icon="🚨")
129
+
130
+ # Append the full response to session_state.messages
131
+ if isinstance(full_response, str):
132
+ st.session_state.messages.append(
133
+ {"role": "assistant", "content": full_response}
134
+ )
135
+ else:
136
+ # Handle the case where full_response is not a string
137
+ combined_response = "\n".join(str(item) for item in full_response)
138
+ st.session_state.messages.append(
139
+ {"role": "assistant", "content": combined_response}
140
+ )