Update app.py
Browse files
app.py
CHANGED
@@ -11,11 +11,17 @@ from sklearn.metrics import precision_score, recall_score, f1_score
|
|
11 |
import requests
|
12 |
from datasets import load_dataset
|
13 |
|
|
|
|
|
|
|
|
|
14 |
# Load the model
|
15 |
ner_pipeline = pipeline("ner", model="Sevixdd/roberta-base-finetuned-ner")
|
|
|
16 |
|
17 |
# Load the dataset
|
18 |
dataset = load_dataset("surrey-nlp/PLOD-filtered")
|
|
|
19 |
|
20 |
# --- Prometheus Metrics Setup ---
|
21 |
REQUEST_COUNT = Counter('gradio_request_count', 'Total number of requests')
|
@@ -25,9 +31,7 @@ RESPONSE_SIZE = Histogram('gradio_response_size_bytes', 'Size of responses in by
|
|
25 |
CPU_USAGE = Gauge('system_cpu_usage_percent', 'System CPU usage in percent')
|
26 |
MEM_USAGE = Gauge('system_memory_usage_percent', 'System memory usage in percent')
|
27 |
QUEUE_LENGTH = Gauge('chat_queue_length', 'Length of the chat queue')
|
28 |
-
|
29 |
-
# --- Logging Setup ---
|
30 |
-
logging.basicConfig(filename="chat_log.txt", level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
|
31 |
|
32 |
# --- Queue and Metrics ---
|
33 |
chat_queue = Queue() # Define chat_queue globally
|
@@ -89,7 +93,7 @@ def chat_function(index):
|
|
89 |
return full_response
|
90 |
except Exception as e:
|
91 |
ERROR_COUNT.inc()
|
92 |
-
logging.error(f"Error in chat processing: {e}")
|
93 |
return f"An error occurred. Please try again. Error: {e}"
|
94 |
|
95 |
# Function to simulate stress test
|
|
|
11 |
import requests
|
12 |
from datasets import load_dataset
|
13 |
|
14 |
+
# --- Logging Setup ---
|
15 |
+
logging.basicConfig(filename="chat_log.txt", level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
|
16 |
+
logging.debug("Logging setup complete.")
|
17 |
+
|
18 |
# Load the model
|
19 |
ner_pipeline = pipeline("ner", model="Sevixdd/roberta-base-finetuned-ner")
|
20 |
+
logging.debug("NER pipeline loaded.")
|
21 |
|
22 |
# Load the dataset
|
23 |
dataset = load_dataset("surrey-nlp/PLOD-filtered")
|
24 |
+
logging.debug("Dataset loaded.")
|
25 |
|
26 |
# --- Prometheus Metrics Setup ---
|
27 |
REQUEST_COUNT = Counter('gradio_request_count', 'Total number of requests')
|
|
|
31 |
CPU_USAGE = Gauge('system_cpu_usage_percent', 'System CPU usage in percent')
|
32 |
MEM_USAGE = Gauge('system_memory_usage_percent', 'System memory usage in percent')
|
33 |
QUEUE_LENGTH = Gauge('chat_queue_length', 'Length of the chat queue')
|
34 |
+
logging.debug("Prometheus metrics setup complete.")
|
|
|
|
|
35 |
|
36 |
# --- Queue and Metrics ---
|
37 |
chat_queue = Queue() # Define chat_queue globally
|
|
|
93 |
return full_response
|
94 |
except Exception as e:
|
95 |
ERROR_COUNT.inc()
|
96 |
+
logging.error(f"Error in chat processing: {e}", exc_info=True)
|
97 |
return f"An error occurred. Please try again. Error: {e}"
|
98 |
|
99 |
# Function to simulate stress test
|