jykoh commited on
Commit
bd7215a
1 Parent(s): 2feda0d

Fix bug with input_image

Browse files
Files changed (1) hide show
  1. app.py +8 -15
app.py CHANGED
@@ -64,19 +64,18 @@ def upload_image(state, image_input):
64
  input_image = Image.open(image_input.name).resize(
65
  (224, 224)).convert('RGB')
66
  input_image.save(image_input.name) # Overwrite with smaller image.
67
- conversation += [(f"![](/file={image_input.name})", "")]
68
- return [conversation, chat_history, input_image], conversation
69
 
70
 
71
  def reset():
72
- return [[], [], None], []
73
 
74
 
75
  def reset_last(state):
76
  conversation = state[0][:-1]
77
  chat_history = state[1][:-2]
78
- input_image = state[2]
79
- return [conversation, chat_history, input_image], conversation
80
 
81
 
82
  def save_image_to_local(image: Image.Image):
@@ -94,16 +93,10 @@ def generate_for_prompt(input_text, state, ret_scale_factor, max_num_rets, num_w
94
  input_prompt = 'Q: ' + input_text + '\nA:'
95
  conversation = state[0]
96
  chat_history = state[1]
97
- input_image = state[2]
98
  print('Generating for', chat_history, flush=True)
99
 
100
  # If an image was uploaded, prepend it to the model.
101
- model_inputs = None
102
- if input_image is not None:
103
- model_inputs = chat_history + [input_image]
104
- else:
105
- model_inputs = chat_history
106
-
107
  model_inputs.append(input_prompt)
108
 
109
  top_p = 1.0
@@ -144,8 +137,8 @@ def generate_for_prompt(input_text, state, ret_scale_factor, max_num_rets, num_w
144
 
145
  # Set input image to None.
146
  print('state', state, flush=True)
147
- print('updated state', [conversation, chat_history, None], flush=True)
148
- return [conversation, chat_history, None], conversation, gr.update(visible=True), gr.update(visible=True)
149
 
150
 
151
  with gr.Blocks(css=css) as demo:
@@ -157,7 +150,7 @@ with gr.Blocks(css=css) as demo:
157
  For faster inference without waiting in queue, you may duplicate the space and use your own GPU. <a href="https://huggingface.co/spaces/haoheliu/audioldm-text-to-audio-generation?duplicate=true"><img style="margin-top: 0em; margin-bottom: 0em" src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a>
158
  """)
159
 
160
- gr_state = gr.State([[], [], None]) # chat_history, input_image
161
 
162
  with gr.Row():
163
  with gr.Column(scale=0.7, min_width=500):
64
  input_image = Image.open(image_input.name).resize(
65
  (224, 224)).convert('RGB')
66
  input_image.save(image_input.name) # Overwrite with smaller image.
67
+ conversation += [(f'<img src="/file={image_input.name}" style="display: inline-block;">', "")]
68
+ return [conversation, chat_history + [input_image, ""]], conversation
69
 
70
 
71
  def reset():
72
+ return [[], []], []
73
 
74
 
75
  def reset_last(state):
76
  conversation = state[0][:-1]
77
  chat_history = state[1][:-2]
78
+ return [conversation, chat_history], conversation
 
79
 
80
 
81
  def save_image_to_local(image: Image.Image):
93
  input_prompt = 'Q: ' + input_text + '\nA:'
94
  conversation = state[0]
95
  chat_history = state[1]
 
96
  print('Generating for', chat_history, flush=True)
97
 
98
  # If an image was uploaded, prepend it to the model.
99
+ model_inputs = chat_history
 
 
 
 
 
100
  model_inputs.append(input_prompt)
101
 
102
  top_p = 1.0
137
 
138
  # Set input image to None.
139
  print('state', state, flush=True)
140
+ print('updated state', [conversation, chat_history], flush=True)
141
+ return [conversation, chat_history], conversation, gr.update(visible=True), gr.update(visible=True)
142
 
143
 
144
  with gr.Blocks(css=css) as demo:
150
  For faster inference without waiting in queue, you may duplicate the space and use your own GPU. <a href="https://huggingface.co/spaces/haoheliu/audioldm-text-to-audio-generation?duplicate=true"><img style="margin-top: 0em; margin-bottom: 0em" src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a>
151
  """)
152
 
153
+ gr_state = gr.State([[], []]) # conversation, chat_history
154
 
155
  with gr.Row():
156
  with gr.Column(scale=0.7, min_width=500):