Yw22 commited on
Commit
b29b979
1 Parent(s): 1822775
Files changed (1) hide show
  1. helpers.py +96 -95
helpers.py CHANGED
@@ -305,102 +305,103 @@ class Examples:
305
  """
306
  Caches all of the examples so that their predictions can be shown immediately.
307
  """
308
- if Context.root_block is None:
309
- raise ValueError("Cannot cache examples if not in a Blocks context")
310
- if Path(self.cached_file).exists():
311
- print(
312
- f"Using cache from '{utils.abspath(self.cached_folder)}' directory. If method or examples have changed since last caching, delete this folder to clear cache.\n"
313
- )
314
- else:
315
- print(f"Caching examples at: '{utils.abspath(self.cached_folder)}'")
316
- cache_logger = CSVLogger()
317
-
318
- generated_values = []
319
- if inspect.isgeneratorfunction(self.fn):
320
-
321
- def get_final_item(*args): # type: ignore
322
- x = None
323
- generated_values.clear()
324
- for x in self.fn(*args): # noqa: B007 # type: ignore
325
- generated_values.append(x)
326
- return x
327
-
328
- fn = get_final_item
329
- elif inspect.isasyncgenfunction(self.fn):
330
-
331
- async def get_final_item(*args):
332
- x = None
333
- generated_values.clear()
334
- async for x in self.fn(*args): # noqa: B007 # type: ignore
335
- generated_values.append(x)
336
- return x
337
-
338
- fn = get_final_item
339
- else:
340
- fn = self.fn
341
-
342
- # create a fake dependency to process the examples and get the predictions
343
- from gradio.events import EventListenerMethod
344
-
345
- dependency, fn_index = Context.root_block.set_event_trigger(
346
- [EventListenerMethod(Context.root_block, "load")],
347
- fn=fn,
348
- inputs=self.inputs_with_examples, # type: ignore
349
- outputs=self.outputs, # type: ignore
350
- preprocess=self.preprocess and not self._api_mode,
351
- postprocess=self.postprocess and not self._api_mode,
352
- batch=self.batch,
353
- )
354
-
355
- assert self.outputs is not None
356
- cache_logger.setup(self.outputs, self.cached_folder)
357
- for example_id, _ in enumerate(self.examples):
358
- print(f"Caching example {example_id + 1}/{len(self.examples)}")
359
- processed_input = self.processed_examples[example_id]
360
- if self.batch:
361
- processed_input = [[value] for value in processed_input]
362
- with utils.MatplotlibBackendMananger():
363
- prediction = await Context.root_block.process_api(
364
- fn_index=fn_index,
365
- inputs=processed_input,
366
- request=None,
367
- )
368
- output = prediction["data"]
369
- if len(generated_values):
370
- output = merge_generated_values_into_output(
371
- self.outputs, generated_values, output
372
- )
373
-
374
- if self.batch:
375
- output = [value[0] for value in output]
376
- cache_logger.flag(output)
377
- # Remove the "fake_event" to prevent bugs in loading interfaces from spaces
378
- Context.root_block.dependencies.remove(dependency)
379
- Context.root_block.fns.pop(fn_index)
380
-
381
- # Remove the original load_input_event and replace it with one that
382
- # also populates the input. We do it this way to to allow the cache()
383
- # method to be called independently of the create() method
384
- index = Context.root_block.dependencies.index(self.load_input_event)
385
- Context.root_block.dependencies.pop(index)
386
- Context.root_block.fns.pop(index)
387
-
388
- def load_example(example_id):
389
- processed_example = self.non_none_processed_examples[
390
- example_id
391
- ] + self.load_from_cache(example_id)
392
- return utils.resolve_singleton(processed_example)
393
-
394
- self.load_input_event = self.dataset.click(
395
- load_example,
396
- inputs=[self.dataset],
397
- outputs=self.inputs_with_examples + self.outputs, # type: ignore
398
- show_progress="hidden",
399
- postprocess=False,
400
- queue=False,
401
- api_name=self.api_name, # type: ignore
402
- )
403
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
404
  def load_from_cache(self, example_id: int) -> list[Any]:
405
  """Loads a particular cached example for the interface.
406
  Parameters:
 
305
  """
306
  Caches all of the examples so that their predictions can be shown immediately.
307
  """
308
+ # if Context.root_block is None:
309
+ # raise ValueError("Cannot cache examples if not in a Blocks context")
310
+ # if Path(self.cached_file).exists():
311
+ # print(
312
+ # f"Using cache from '{utils.abspath(self.cached_folder)}' directory. If method or examples have changed since last caching, delete this folder to clear cache.\n"
313
+ # )
314
+ # else:
315
+ # print(f"Caching examples at: '{utils.abspath(self.cached_folder)}'")
316
+ # cache_logger = CSVLogger()
317
+
318
+ # generated_values = []
319
+ # if inspect.isgeneratorfunction(self.fn):
320
+
321
+ # def get_final_item(*args): # type: ignore
322
+ # x = None
323
+ # generated_values.clear()
324
+ # for x in self.fn(*args): # noqa: B007 # type: ignore
325
+ # generated_values.append(x)
326
+ # return x
327
+
328
+ # fn = get_final_item
329
+ # elif inspect.isasyncgenfunction(self.fn):
330
+
331
+ # async def get_final_item(*args):
332
+ # x = None
333
+ # generated_values.clear()
334
+ # async for x in self.fn(*args): # noqa: B007 # type: ignore
335
+ # generated_values.append(x)
336
+ # return x
337
+
338
+ # fn = get_final_item
339
+ # else:
340
+ # fn = self.fn
341
+
342
+ # # create a fake dependency to process the examples and get the predictions
343
+ # from gradio.events import EventListenerMethod
344
+
345
+ # dependency, fn_index = Context.root_block.set_event_trigger(
346
+ # [EventListenerMethod(Context.root_block, "load")],
347
+ # fn=fn,
348
+ # inputs=self.inputs_with_examples, # type: ignore
349
+ # outputs=self.outputs, # type: ignore
350
+ # preprocess=self.preprocess and not self._api_mode,
351
+ # postprocess=self.postprocess and not self._api_mode,
352
+ # batch=self.batch,
353
+ # )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
354
 
355
+ # assert self.outputs is not None
356
+ # cache_logger.setup(self.outputs, self.cached_folder)
357
+ # for example_id, _ in enumerate(self.examples):
358
+ # print(f"Caching example {example_id + 1}/{len(self.examples)}")
359
+ # processed_input = self.processed_examples[example_id]
360
+ # if self.batch:
361
+ # processed_input = [[value] for value in processed_input]
362
+ # with utils.MatplotlibBackendMananger():
363
+ # prediction = await Context.root_block.process_api(
364
+ # fn_index=fn_index,
365
+ # inputs=processed_input,
366
+ # request=None,
367
+ # )
368
+ # output = prediction["data"]
369
+ # if len(generated_values):
370
+ # output = merge_generated_values_into_output(
371
+ # self.outputs, generated_values, output
372
+ # )
373
+
374
+ # if self.batch:
375
+ # output = [value[0] for value in output]
376
+ # cache_logger.flag(output)
377
+ # # Remove the "fake_event" to prevent bugs in loading interfaces from spaces
378
+ # Context.root_block.dependencies.remove(dependency)
379
+ # Context.root_block.fns.pop(fn_index)
380
+
381
+ # # Remove the original load_input_event and replace it with one that
382
+ # # also populates the input. We do it this way to to allow the cache()
383
+ # # method to be called independently of the create() method
384
+ # index = Context.root_block.dependencies.index(self.load_input_event)
385
+ # Context.root_block.dependencies.pop(index)
386
+ # Context.root_block.fns.pop(index)
387
+
388
+ # def load_example(example_id):
389
+ # processed_example = self.non_none_processed_examples[
390
+ # example_id
391
+ # ] + self.load_from_cache(example_id)
392
+ # return utils.resolve_singleton(processed_example)
393
+
394
+ # self.load_input_event = self.dataset.click(
395
+ # load_example,
396
+ # inputs=[self.dataset],
397
+ # outputs=self.inputs_with_examples + self.outputs, # type: ignore
398
+ # show_progress="hidden",
399
+ # postprocess=False,
400
+ # queue=False,
401
+ # api_name=self.api_name, # type: ignore
402
+ # )
403
+ pass
404
+
405
  def load_from_cache(self, example_id: int) -> list[Any]:
406
  """Loads a particular cached example for the interface.
407
  Parameters: