Sagar23p commited on
Commit
cbfb0d8
1 Parent(s): 4bed33f

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +164 -0
app.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### app.py code is taken from https://huggingface.co/spaces/ngebodh/SimpleChatbot-Backup/blob/main/app.py
2
+ ### https://medium.com/@nigelgebodh/large-language-models-chatting-with-ai-chatbots-from-google-mistral-ai-and-hugging-face-b33efedea38d
3
+
4
+ """ Simple Chatbot
5
+ @author: Sagar Padhiyar
6
+ @email: spadhiyar230595@gmail.com
7
+ """
8
+
9
+ import streamlit as st
10
+ from openai import OpenAI
11
+ import os
12
+ import sys
13
+ from dotenv import load_dotenv, dotenv_values
14
+ load_dotenv()
15
+
16
+
17
+
18
+
19
+
20
+ # initialize the client
21
+ client = OpenAI(
22
+ base_url="https://api-inference.huggingface.co/v1",
23
+ api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN')#"hf_xxx" # Replace with your token
24
+ )
25
+
26
+
27
+
28
+
29
+ #Create supported models
30
+ model_links ={
31
+ "Mistral":"mistralai/Mistral-7B-Instruct-v0.2",
32
+ "Gemma-7B":"google/gemma-7b-it",
33
+ "Gemma-2B":"google/gemma-2b-it",
34
+ "Zephyr-7B-β":"HuggingFaceH4/zephyr-7b-beta",
35
+ # "Llama-2":"meta-llama/Llama-2-7b-chat-hf"
36
+
37
+ }
38
+
39
+ #Pull info about the model to display
40
+ model_info ={
41
+ "Mistral":
42
+ {'description':"""The Mistral model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
43
+ \nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-7b/) team as has over **7 billion parameters.** \n""",
44
+ 'logo':'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'},
45
+ "Gemma-7B":
46
+ {'description':"""The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
47
+ \nIt was created by the [**Google's AI Team**](https://blog.google/technology/developers/gemma-open-models/) team as has over **7 billion parameters.** \n""",
48
+ 'logo':'https://pbs.twimg.com/media/GG3sJg7X0AEaNIq.jpg'},
49
+ "Gemma-2B":
50
+ {'description':"""The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
51
+ \nIt was created by the [**Google's AI Team**](https://blog.google/technology/developers/gemma-open-models/) team as has over **2 billion parameters.** \n""",
52
+ 'logo':'https://pbs.twimg.com/media/GG3sJg7X0AEaNIq.jpg'},
53
+ "Zephyr-7B":
54
+ {'description':"""The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
55
+ \nFrom Huggingface: \n\
56
+ Zephyr is a series of language models that are trained to act as helpful assistants. \
57
+ [Zephyr 7B Gemma](https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1)\
58
+ is the third model in the series, and is a fine-tuned version of google/gemma-7b \
59
+ that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
60
+ 'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1/resolve/main/thumbnail.png'},
61
+ "Zephyr-7B-β":
62
+ {'description':"""The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
63
+ \nFrom Huggingface: \n\
64
+ Zephyr is a series of language models that are trained to act as helpful assistants. \
65
+ [Zephyr-7B-β](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta)\
66
+ is the second model in the series, and is a fine-tuned version of mistralai/Mistral-7B-v0.1 \
67
+ that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
68
+ 'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha/resolve/main/thumbnail.png'},
69
+
70
+ }
71
+
72
+ def reset_conversation():
73
+ '''
74
+ Resets Conversation
75
+ '''
76
+ st.session_state.conversation = []
77
+ st.session_state.messages = []
78
+ return None
79
+
80
+
81
+
82
+
83
+ # Define the available models
84
+ models =[key for key in model_links.keys()]
85
+
86
+ # Create the sidebar with the dropdown for model selection
87
+ selected_model = st.sidebar.selectbox("Select Model", models)
88
+
89
+ #Create a temperature slider
90
+ temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
91
+
92
+
93
+ #Add reset button to clear conversation
94
+ st.sidebar.button('Reset Chat', on_click=reset_conversation) #Reset button
95
+
96
+
97
+ # Create model description
98
+ st.sidebar.write(f"You're now chatting with **{selected_model}**")
99
+ st.sidebar.markdown(model_info[selected_model]['description'])
100
+ st.sidebar.image(model_info[selected_model]['logo'])
101
+ st.sidebar.markdown("*Generated content may be inaccurate or false.*")
102
+ st.sidebar.markdown("\nLearn how to build this chatbot [here](https://ngebodh.github.io/projects/2024-03-05/).")
103
+
104
+
105
+
106
+ if "prev_option" not in st.session_state:
107
+ st.session_state.prev_option = selected_model
108
+
109
+ if st.session_state.prev_option != selected_model:
110
+ st.session_state.messages = []
111
+ # st.write(f"Changed to {selected_model}")
112
+ st.session_state.prev_option = selected_model
113
+ reset_conversation()
114
+
115
+
116
+
117
+ #Pull in the model we want to use
118
+ repo_id = model_links[selected_model]
119
+
120
+
121
+ st.subheader(f'AI - {selected_model}')
122
+ # st.title(f'ChatBot Using {selected_model}')
123
+
124
+ # Set a default model
125
+ if selected_model not in st.session_state:
126
+ st.session_state[selected_model] = model_links[selected_model]
127
+
128
+ # Initialize chat history
129
+ if "messages" not in st.session_state:
130
+ st.session_state.messages = []
131
+
132
+
133
+ # Display chat messages from history on app rerun
134
+ for message in st.session_state.messages:
135
+ with st.chat_message(message["role"]):
136
+ st.markdown(message["content"])
137
+
138
+
139
+
140
+ # Accept user input
141
+ if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
142
+
143
+ # Display user message in chat message container
144
+ with st.chat_message("user"):
145
+ st.markdown(prompt)
146
+ # Add user message to chat history
147
+ st.session_state.messages.append({"role": "user", "content": prompt})
148
+
149
+
150
+ # Display assistant response in chat message container
151
+ with st.chat_message("assistant"):
152
+ stream = client.chat.completions.create(
153
+ model=model_links[selected_model],
154
+ messages=[
155
+ {"role": m["role"], "content": m["content"]}
156
+ for m in st.session_state.messages
157
+ ],
158
+ temperature=temp_values,#0.5,
159
+ stream=True,
160
+ max_tokens=3000,
161
+ )
162
+
163
+ response = st.write_stream(stream)
164
+ st.session_state.messages.append({"role": "assistant", "content": response})