Penghao Wu commited on
Commit
323cabb
1 Parent(s): aa506f8

add empty cache

Browse files
Files changed (1) hide show
  1. app.py +3 -0
app.py CHANGED
@@ -116,6 +116,7 @@ def inference(input_str, input_image):
116
  ## input valid check
117
  if not re.match(r"^[A-Za-z ,.!?\'\"]+$", input_str) or len(input_str) < 1:
118
  output_str = "[Error] Invalid input: ", input_str
 
119
  return output_str, None
120
 
121
  # Model Inference
@@ -133,6 +134,7 @@ def inference(input_str, input_image):
133
  missing_objects = [missing_object.strip() for missing_object in missing_objects]
134
 
135
  if len(missing_objects) == 0:
 
136
  return prediction, None, None, None
137
 
138
  search_result = []
@@ -210,6 +212,7 @@ def inference(input_str, input_image):
210
  if len(failed_objects) > 0:
211
  search_result_str += "Targets unable to locate after search: {}.".format(', '.join(failed_objects))
212
 
 
213
  return "Need to conduct visual search to search for: {}.".format(', '.join(missing_objects)), search_result_str, search_result_image, response
214
 
215
  demo = gr.Interface(
 
116
  ## input valid check
117
  if not re.match(r"^[A-Za-z ,.!?\'\"]+$", input_str) or len(input_str) < 1:
118
  output_str = "[Error] Invalid input: ", input_str
119
+ torch.cuda.empty_cache()
120
  return output_str, None
121
 
122
  # Model Inference
 
134
  missing_objects = [missing_object.strip() for missing_object in missing_objects]
135
 
136
  if len(missing_objects) == 0:
137
+ torch.cuda.empty_cache()
138
  return prediction, None, None, None
139
 
140
  search_result = []
 
212
  if len(failed_objects) > 0:
213
  search_result_str += "Targets unable to locate after search: {}.".format(', '.join(failed_objects))
214
 
215
+ torch.cuda.empty_cache()
216
  return "Need to conduct visual search to search for: {}.".format(', '.join(missing_objects)), search_result_str, search_result_image, response
217
 
218
  demo = gr.Interface(