Spaces:
Runtime error
Runtime error
pseudotensor
commited on
Commit
•
31cc3ef
1
Parent(s):
374efab
Update with h2oGPT hash 61628d335bdb685fdcc63ca9821cf5607f41a9e3
Browse files- generate.py +0 -1
- gradio_runner.py +4 -1
- utils.py +4 -0
generate.py
CHANGED
@@ -865,7 +865,6 @@ def evaluate(
|
|
865 |
outputs = ""
|
866 |
for new_text in streamer:
|
867 |
outputs += new_text
|
868 |
-
print('.', end='', flush=True)
|
869 |
yield prompter.get_response(outputs, prompt=inputs_decoded,
|
870 |
sanitize_bot_response=sanitize_bot_response)
|
871 |
decoded_output = outputs
|
|
|
865 |
outputs = ""
|
866 |
for new_text in streamer:
|
867 |
outputs += new_text
|
|
|
868 |
yield prompter.get_response(outputs, prompt=inputs_decoded,
|
869 |
sanitize_bot_response=sanitize_bot_response)
|
870 |
decoded_output = outputs
|
gradio_runner.py
CHANGED
@@ -4,7 +4,8 @@ import os
|
|
4 |
import sys
|
5 |
|
6 |
from gradio_themes import H2oTheme, SoftTheme, get_h2o_title, get_simple_title, get_dark_js
|
7 |
-
from utils import get_githash, flatten_list, zip_data, s3up, clear_torch_cache, get_torch_allocated, system_info_print
|
|
|
8 |
from finetune import prompt_type_to_model_name, prompt_types_strings, generate_prompt, inv_prompt_type_to_model_lower
|
9 |
from generate import get_model, languages_covered, evaluate, eval_func_param_names, score_qa
|
10 |
|
@@ -872,6 +873,8 @@ def go_gradio(**kwargs):
|
|
872 |
|
873 |
scheduler = BackgroundScheduler()
|
874 |
scheduler.add_job(func=clear_torch_cache, trigger="interval", seconds=20)
|
|
|
|
|
875 |
scheduler.start()
|
876 |
|
877 |
demo.launch(share=kwargs['share'], server_name="0.0.0.0", show_error=True,
|
|
|
4 |
import sys
|
5 |
|
6 |
from gradio_themes import H2oTheme, SoftTheme, get_h2o_title, get_simple_title, get_dark_js
|
7 |
+
from utils import get_githash, flatten_list, zip_data, s3up, clear_torch_cache, get_torch_allocated, system_info_print, \
|
8 |
+
ping
|
9 |
from finetune import prompt_type_to_model_name, prompt_types_strings, generate_prompt, inv_prompt_type_to_model_lower
|
10 |
from generate import get_model, languages_covered, evaluate, eval_func_param_names, score_qa
|
11 |
|
|
|
873 |
|
874 |
scheduler = BackgroundScheduler()
|
875 |
scheduler.add_job(func=clear_torch_cache, trigger="interval", seconds=20)
|
876 |
+
if is_public:
|
877 |
+
scheduler.add_job(func=ping, trigger="interval", seconds=60)
|
878 |
scheduler.start()
|
879 |
|
880 |
demo.launch(share=kwargs['share'], server_name="0.0.0.0", show_error=True,
|
utils.py
CHANGED
@@ -52,6 +52,10 @@ def clear_torch_cache():
|
|
52 |
gc.collect()
|
53 |
|
54 |
|
|
|
|
|
|
|
|
|
55 |
def get_torch_allocated():
|
56 |
import torch
|
57 |
return torch.cuda.memory_allocated()
|
|
|
52 |
gc.collect()
|
53 |
|
54 |
|
55 |
+
def ping():
|
56 |
+
print('Ping: %s' % str(datetime.now()), flush=True)
|
57 |
+
|
58 |
+
|
59 |
def get_torch_allocated():
|
60 |
import torch
|
61 |
return torch.cuda.memory_allocated()
|