Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -3,9 +3,9 @@ import gradio as gr
|
|
3 |
from random import randint
|
4 |
from all_models import models
|
5 |
from datetime import datetime
|
6 |
-
from concurrent.futures import TimeoutError, ThreadPoolExecutor
|
7 |
import time
|
8 |
-
|
9 |
import traceback # For better error reporting
|
10 |
|
11 |
#os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
|
@@ -13,7 +13,7 @@ import traceback # For better error reporting
|
|
13 |
#for handler in logger.handlers[:]:
|
14 |
# handler.close()
|
15 |
# logger.removeHandler(handler)
|
16 |
-
|
17 |
|
18 |
now2 = 0
|
19 |
index_tracker = 0
|
@@ -142,14 +142,7 @@ def gen_fn(model_str, prompt):
|
|
142 |
processed_models_count = 0
|
143 |
return response
|
144 |
|
145 |
-
|
146 |
-
##print(f"TimeoutError: Model '{model_str}' did not respond within 150 seconds.")
|
147 |
-
processed_models_count += 1
|
148 |
-
if processed_models_count == len(models):
|
149 |
-
## print("\nCycle Complete! Updated Scores:")
|
150 |
-
## print(model_scores)
|
151 |
-
processed_models_count = 0
|
152 |
-
return None
|
153 |
|
154 |
except Exception as e:
|
155 |
if processed_models_count == 0:
|
|
|
3 |
from random import randint
|
4 |
from all_models import models
|
5 |
from datetime import datetime
|
6 |
+
#from concurrent.futures import TimeoutError, ThreadPoolExecutor
|
7 |
import time
|
8 |
+
import logging
|
9 |
import traceback # For better error reporting
|
10 |
|
11 |
#os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
|
|
|
13 |
#for handler in logger.handlers[:]:
|
14 |
# handler.close()
|
15 |
# logger.removeHandler(handler)
|
16 |
+
logging.basicConfig(level=logging.DEBUG)
|
17 |
|
18 |
now2 = 0
|
19 |
index_tracker = 0
|
|
|
142 |
processed_models_count = 0
|
143 |
return response
|
144 |
|
145 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
146 |
|
147 |
except Exception as e:
|
148 |
if processed_models_count == 0:
|