Satyam-Singh commited on
Commit
f7dfc7b
1 Parent(s): 6ed8cb1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -79
app.py CHANGED
@@ -112,10 +112,10 @@ convo = model.start_chat(history=[
112
 
113
 
114
  def gemini_chat(message, history):
115
- response = chat.send_message(message)
116
  return response.text
117
 
118
- chat = model.start_chat()
119
 
120
  gr.ChatInterface(
121
  fn=gemini_chat,
@@ -126,81 +126,7 @@ gr.ChatInterface(
126
  likeable=True,
127
  layout="panel"
128
  ),
129
- title="PaLM-2",
130
- description="This is unofficial demo of ```PaLM-2``` based on ```Google API```. ```History/context``` memory does not work in this demo.",
131
  concurrency_limit=20,
132
- ).launch(show_api=False)
133
-
134
-
135
-
136
-
137
-
138
-
139
-
140
-
141
-
142
- """import PIL.Image
143
- import gradio as gr
144
- import base64
145
- import time
146
- import os
147
- import google.generativeai as genai
148
-
149
- # Set Google API key
150
- genai.configure(api_key = os.environ['GOOGLE_PALM_KEY'])
151
-
152
- # Create the Model
153
- txt_model = genai.GenerativeModel('gemini-pro')
154
- vis_model = genai.GenerativeModel('gemini-pro-vision')
155
-
156
- # Image to Base 64 Converter
157
- def image_to_base64(image_path):
158
- with open(image_path, 'rb') as img:
159
- encoded_string = base64.b64encode(img.read())
160
- return encoded_string.decode('utf-8')
161
-
162
- # Function that takes User Inputs and displays it on ChatUI
163
- def query_message(history,txt,img):
164
- if not img:
165
- history += [(txt,None)]
166
- return history
167
- base64 = image_to_base64(img)
168
- data_url = f"data:image/jpeg;base64,{base64}"
169
- history += [(f"{txt} ![]({data_url})", None)]
170
- return history
171
-
172
- # Function that takes User Inputs, generates Response and displays on Chat UI
173
- def llm_response(history,text,img):
174
- if not img:
175
- response = txt_model.generate_content(text)
176
- history += [(None,response.text)]
177
- return history
178
-
179
- else:
180
- img = PIL.Image.open(img)
181
- response = vis_model.generate_content([text,img])
182
- history += [(None,response.text)]
183
- return history
184
-
185
- # Interface Code
186
- with gr.Blocks() as app:
187
- with gr.Row():
188
- chatbot = gr.Chatbot(
189
- scale = 2,
190
- height=750
191
- )
192
- text_box = gr.Textbox(
193
- placeholder="Enter text and press enter, or upload an image",
194
- container=False,
195
- )
196
-
197
- btn = gr.Button("Submit")
198
- clicked = btn.click(query_message,
199
- [chatbot,text_box],
200
- chatbot
201
- ).then(llm_response,
202
- [chatbot,text_box],
203
- chatbot
204
- )
205
- app.queue()
206
- app.launch(debug=True)"""
 
112
 
113
 
114
  def gemini_chat(message, history):
115
+ response = convo.send_message(message)
116
  return response.text
117
 
118
+ #chat = model.start_chat()
119
 
120
  gr.ChatInterface(
121
  fn=gemini_chat,
 
126
  likeable=True,
127
  layout="panel"
128
  ),
129
+ title="LLAVA: Large Language Virtual Assistant",
130
+ description="Official Demo Of ```LLAVA``` based on ```Large Language Virtual Assistant ```.,
131
  concurrency_limit=20,
132
+ ).launch(show_api=True)