LVKinyanjui commited on
Commit
e35ef75
·
1 Parent(s): 5e068a9

Renamed the right main app file

Browse files
Files changed (3) hide show
  1. app.py +75 -19
  2. st_image_chat.py +0 -93
  3. st_long_context_basic.py +37 -0
app.py CHANGED
@@ -1,37 +1,93 @@
1
- # CREDITS
2
- # https://gist.github.com/truevis/f31706b8af60e8c73d62b281bddb988f
3
-
4
  import streamlit as st
5
  from groq import Groq
6
-
7
  import os
 
 
 
 
8
  client = Groq(
9
  api_key=os.environ.get("GROQ_API_KEY"),
10
  )
11
 
12
- def generate_response(user_input):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  stream = client.chat.completions.create(
14
- model="llama-3.2-3b-preview", #128K model
15
- messages=[
16
- {"role": "system", "content": "You are a helpful assistant"},
17
- {"role": "user", "content": user_input},
18
- ],
19
  temperature=0.1,
20
- # max_tokens=128000,
21
  top_p=1,
22
  stream=True,
23
  stop=None,
24
  )
25
-
26
  for chunk in stream:
27
  content = chunk.choices[0].delta.content
28
  if content:
29
- yield content # Yield content for streaming
 
 
 
 
 
 
 
30
 
31
- st.title("Groq API Response Streaming")
 
 
 
32
  user_input = st.chat_input('Message to Assistant...', key='prompt_input')
33
- if user_input: # Get user input
34
- with st.spinner("Generating response..."):
35
- st.write_stream(generate_response(user_input)) # Use st.write_stream to display streamed content
36
- st.markdown("Message: " + user_input)
37
- st.markdown("---") # Add a newline after the
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
  from groq import Groq
 
3
  import os
4
+ import base64
5
+ from io import BytesIO
6
+
7
+ # Initialize Groq client
8
  client = Groq(
9
  api_key=os.environ.get("GROQ_API_KEY"),
10
  )
11
 
12
+ # Initialize session state for chat history if it doesn't exist
13
+ if 'messages' not in st.session_state:
14
+ st.session_state.messages = []
15
+
16
+ def encode_image(uploaded_file):
17
+ bytes_data = uploaded_file.getvalue()
18
+ base64_image = base64.b64encode(bytes_data).decode('utf-8')
19
+ return f"data:image/{uploaded_file.type.split('/')[-1]};base64,{base64_image}"
20
+
21
+ def generate_response(messages, current_image=None):
22
+ # Create a copy of messages for the API call
23
+ api_messages = messages.copy()
24
+
25
+ # If there's a current image, add it to the last user message
26
+ if current_image:
27
+ api_messages[-1] = {
28
+ "role": "user",
29
+ "content": [
30
+ {"type": "text", "text": api_messages[-1]["content"]},
31
+ {
32
+ "type": "image_url",
33
+ "image_url": {"url": current_image}
34
+ }
35
+ ]
36
+ }
37
+
38
+ model = "llama-3.2-11b-vision-preview" if current_image else "llama-3.2-3b-preview"
39
+
40
  stream = client.chat.completions.create(
41
+ model=model,
42
+ messages=api_messages,
 
 
 
43
  temperature=0.1,
 
44
  top_p=1,
45
  stream=True,
46
  stop=None,
47
  )
48
+
49
  for chunk in stream:
50
  content = chunk.choices[0].delta.content
51
  if content:
52
+ yield content
53
+
54
+ st.title("Groq Chat")
55
+
56
+ # Display chat history
57
+ for message in st.session_state.messages:
58
+ with st.chat_message(message["role"]):
59
+ st.markdown(message["content"])
60
 
61
+ # File uploader for images
62
+ uploaded_file = st.file_uploader("Upload an image (optional)", type=['png', 'jpg', 'jpeg'])
63
+
64
+ # Get user input
65
  user_input = st.chat_input('Message to Assistant...', key='prompt_input')
66
+
67
+ if user_input:
68
+ # Add user message to chat history (text only)
69
+ st.session_state.messages.append({"role": "user", "content": user_input})
70
+
71
+ # Display user message with image if present
72
+ with st.chat_message("user"):
73
+ st.markdown(user_input)
74
+ if uploaded_file:
75
+ st.image(uploaded_file)
76
+
77
+ # Generate and display assistant response
78
+ with st.chat_message("assistant"):
79
+ response_placeholder = st.empty()
80
+ full_response = ""
81
+
82
+ # Prepare image for API call if present
83
+ current_image = encode_image(uploaded_file) if uploaded_file else None
84
+
85
+ # Stream the response
86
+ with st.spinner("Generating response..."):
87
+ for content in generate_response(st.session_state.messages, current_image):
88
+ full_response += content
89
+ response_placeholder.markdown(full_response + "▌")
90
+ response_placeholder.markdown(full_response)
91
+
92
+ # Add assistant response to chat history (text only)
93
+ st.session_state.messages.append({"role": "assistant", "content": full_response})
st_image_chat.py DELETED
@@ -1,93 +0,0 @@
1
- import streamlit as st
2
- from groq import Groq
3
- import os
4
- import base64
5
- from io import BytesIO
6
-
7
- # Initialize Groq client
8
- client = Groq(
9
- api_key=os.environ.get("GROQ_API_KEY"),
10
- )
11
-
12
- # Initialize session state for chat history if it doesn't exist
13
- if 'messages' not in st.session_state:
14
- st.session_state.messages = []
15
-
16
- def encode_image(uploaded_file):
17
- bytes_data = uploaded_file.getvalue()
18
- base64_image = base64.b64encode(bytes_data).decode('utf-8')
19
- return f"data:image/{uploaded_file.type.split('/')[-1]};base64,{base64_image}"
20
-
21
- def generate_response(messages, current_image=None):
22
- # Create a copy of messages for the API call
23
- api_messages = messages.copy()
24
-
25
- # If there's a current image, add it to the last user message
26
- if current_image:
27
- api_messages[-1] = {
28
- "role": "user",
29
- "content": [
30
- {"type": "text", "text": api_messages[-1]["content"]},
31
- {
32
- "type": "image_url",
33
- "image_url": {"url": current_image}
34
- }
35
- ]
36
- }
37
-
38
- model = "llama-3.2-11b-vision-preview" if current_image else "llama-3.2-3b-preview"
39
-
40
- stream = client.chat.completions.create(
41
- model=model,
42
- messages=api_messages,
43
- temperature=0.1,
44
- top_p=1,
45
- stream=True,
46
- stop=None,
47
- )
48
-
49
- for chunk in stream:
50
- content = chunk.choices[0].delta.content
51
- if content:
52
- yield content
53
-
54
- st.title("Groq Chat")
55
-
56
- # Display chat history
57
- for message in st.session_state.messages:
58
- with st.chat_message(message["role"]):
59
- st.markdown(message["content"])
60
-
61
- # File uploader for images
62
- uploaded_file = st.file_uploader("Upload an image (optional)", type=['png', 'jpg', 'jpeg'])
63
-
64
- # Get user input
65
- user_input = st.chat_input('Message to Assistant...', key='prompt_input')
66
-
67
- if user_input:
68
- # Add user message to chat history (text only)
69
- st.session_state.messages.append({"role": "user", "content": user_input})
70
-
71
- # Display user message with image if present
72
- with st.chat_message("user"):
73
- st.markdown(user_input)
74
- if uploaded_file:
75
- st.image(uploaded_file)
76
-
77
- # Generate and display assistant response
78
- with st.chat_message("assistant"):
79
- response_placeholder = st.empty()
80
- full_response = ""
81
-
82
- # Prepare image for API call if present
83
- current_image = encode_image(uploaded_file) if uploaded_file else None
84
-
85
- # Stream the response
86
- with st.spinner("Generating response..."):
87
- for content in generate_response(st.session_state.messages, current_image):
88
- full_response += content
89
- response_placeholder.markdown(full_response + "▌")
90
- response_placeholder.markdown(full_response)
91
-
92
- # Add assistant response to chat history (text only)
93
- st.session_state.messages.append({"role": "assistant", "content": full_response})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
st_long_context_basic.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # CREDITS
2
+ # https://gist.github.com/truevis/f31706b8af60e8c73d62b281bddb988f
3
+
4
+ import streamlit as st
5
+ from groq import Groq
6
+
7
+ import os
8
+ client = Groq(
9
+ api_key=os.environ.get("GROQ_API_KEY"),
10
+ )
11
+
12
+ def generate_response(user_input):
13
+ stream = client.chat.completions.create(
14
+ model="llama-3.2-3b-preview", #128K model
15
+ messages=[
16
+ {"role": "system", "content": "You are a helpful assistant"},
17
+ {"role": "user", "content": user_input},
18
+ ],
19
+ temperature=0.1,
20
+ # max_tokens=128000,
21
+ top_p=1,
22
+ stream=True,
23
+ stop=None,
24
+ )
25
+
26
+ for chunk in stream:
27
+ content = chunk.choices[0].delta.content
28
+ if content:
29
+ yield content # Yield content for streaming
30
+
31
+ st.title("Groq API Response Streaming")
32
+ user_input = st.chat_input('Message to Assistant...', key='prompt_input')
33
+ if user_input: # Get user input
34
+ with st.spinner("Generating response..."):
35
+ st.write_stream(generate_response(user_input)) # Use st.write_stream to display streamed content
36
+ st.markdown("Message: " + user_input)
37
+ st.markdown("---") # Add a newline after the