towardsinnovationlab commited on
Commit
88a3d14
·
verified ·
1 Parent(s): 17bea4a

Upload 2_chatbot.py

Browse files
Files changed (1) hide show
  1. pages/2_chatbot.py +108 -0
pages/2_chatbot.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from openai import OpenAI
2
+ from mistralai import Mistral
3
+ from llamaapi import LlamaAPI
4
+ import streamlit as st
5
+ from IPython.display import display, Math
6
+
7
+ # Sidebar for model selection
8
+ with st.sidebar:
9
+ option = st.selectbox(
10
+ 'Please select your model',
11
+ ('o3-mini','o3','GPT-5','GPT-4o','GPT-4o-mini','GPT-4.1','o4-mini','Mixtral 8x7B','Mixtral 8x22B', 'Mistral Large 2','Mistral NeMo',
12
+ 'Llama-3.1-405B','Llama-3.2-3B','Llama-3.3-70B'))
13
+ st.write('You selected:', option)
14
+
15
+ # API Key input
16
+ api_key = st.text_input("Please Copy & Paste your API_KEY", key="chatbot_api_key", type="password")
17
+
18
+ # Reset button
19
+ if st.button('Reset Conversation'):
20
+ st.session_state["messages"] = []
21
+ st.info("Please change your API_KEY if you change model.")
22
+
23
+
24
+
25
+ # Title and caption
26
+ st.title("💬 AI Chatbot")
27
+ st.caption("🚀 Your Personal AI Assistant powered by Streamlit and LLMs")
28
+
29
+
30
+
31
+ # Initialize messages if not present in session state
32
+ if 'messages' not in st.session_state:
33
+ st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you?"}]
34
+
35
+ # Display messages
36
+ for msg in st.session_state.messages:
37
+ st.chat_message(msg["role"]).write(msg["content"])
38
+
39
+
40
+ # Chat input
41
+ if prompt := st.chat_input():
42
+ if not api_key:
43
+ st.info("Please add your API_KEY to go ahead.")
44
+ st.stop()
45
+
46
+ # Append user message to session state
47
+ st.session_state.messages.append({"role": "user", "content": prompt})
48
+ st.chat_message("user").write(prompt)
49
+
50
+ # Client initialization based on selected model
51
+ if option == 'Mixtral 8x7B':
52
+ client = Mistral(api_key=api_key)
53
+ response = client.chat.complete(model="open-mixtral-8x7b", messages=st.session_state.messages)
54
+ elif option == 'Mixtral 8x22B':
55
+ client = Mistral(api_key=api_key)
56
+ response = client.chat.complete(model="open-mixtral-8x22b", messages=st.session_state.messages)
57
+ elif option == 'Mistral Large 2':
58
+ client = Mistral(api_key=api_key)
59
+ response = client.chat.complete(model="mistral-large-2407", messages=st.session_state.messages)
60
+ elif option == 'Mathstral':
61
+ client = Mistral(api_key=api_key)
62
+ response = client.chat.complete(model="mistralai/mathstral-7B-v0.1", messages=st.session_state.messages)
63
+ elif option == 'Mistral NeMo':
64
+ client = Mistral(api_key=api_key)
65
+ response = client.chat.complete(model="open-mistral-nemo-2407", messages=st.session_state.messages)
66
+ elif option == 'o3-mini':
67
+ client = OpenAI(api_key=api_key)
68
+ response = client.chat.completions.create(model="o3-mini-2025-01-31", messages=st.session_state.messages)
69
+ elif option == 'o3':
70
+ client = OpenAI(api_key=api_key)
71
+ response = client.chat.completions.create(model="o3", messages=st.session_state.messages)
72
+ elif option == 'GPT-5':
73
+ client = OpenAI(api_key=api_key)
74
+ response = client.chat.completions.create(model="gpt-5-2025-08-07", messages=st.session_state.messages)
75
+ elif option == 'GPT-4o':
76
+ client = OpenAI(api_key=api_key)
77
+ response = client.chat.completions.create(model="gpt-4o", messages=st.session_state.messages)
78
+ elif option == 'GPT-4o-mini':
79
+ client = OpenAI(api_key=api_key)
80
+ response = client.chat.completions.create(model="gpt-4o-mini", messages=st.session_state.messages)
81
+ elif option == 'GPT-4.1':
82
+ client = OpenAI(api_key=api_key)
83
+ response = client.chat.completions.create(model="gpt-4-turbo-2024-04-09", messages=st.session_state.messages)
84
+ elif option == 'o4-mini':
85
+ client = OpenAI(api_key=api_key)
86
+ response = client.chat.completions.create(model="gpt-3.5-turbo", messages=st.session_state.messages)
87
+ elif option == 'Llama-3.1-405B':
88
+ client = OpenAI(api_key=api_key,base_url="https://api.llama-api.com")
89
+ response = client.chat.completions.create(model="llama3.1-405b", messages=st.session_state.messages, max_tokens=1000)
90
+ elif option == 'Llama-3.2-3B':
91
+ client = OpenAI(api_key=api_key,base_url="https://api.llama-api.com")
92
+ response = client.chat.completions.create(model="llama3.2-3b", messages=st.session_state.messages, max_tokens=1000)
93
+ elif option == 'Llama-3.3-70B':
94
+ client = OpenAI(api_key=api_key,base_url="https://api.llama-api.com")
95
+ response = client.chat.completions.create(model="llama3.3-70b", messages=st.session_state.messages, max_tokens=1000)
96
+ else:
97
+ st.error("Selected model is not supported.")
98
+ st.stop()
99
+
100
+
101
+ # Process response and update session state
102
+ msg = response.choices[0].message.content
103
+ st.session_state.messages.append({"role": "assistant", "content": msg})
104
+ st.chat_message("assistant").write(msg)
105
+
106
+
107
+
108
+