lobrien001 commited on
Commit
7079c07
1 Parent(s): fc5d17b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -41
app.py CHANGED
@@ -9,7 +9,7 @@ import random
9
  from transformers import pipeline, AutoConfig
10
 
11
  # Load the model and its configuration
12
- model_name = "Sevixdd/roberta-base-finetuned-ner"
13
  ner_pipeline = pipeline("ner", model=model_name)
14
  config = AutoConfig.from_pretrained(model_name)
15
 
@@ -29,7 +29,7 @@ def chat_function(message, history):
29
  REQUEST_COUNT.inc()
30
  try:
31
  if chat_queue.full():
32
- return "The model is currently processing a request. Please wait."
33
 
34
  chat_queue.put(message)
35
  logging.info(f"User: {message}")
@@ -38,23 +38,23 @@ def chat_function(message, history):
38
  response = f"Response from NER model: {ner_result}"
39
  logging.info(f"Bot: {response}")
40
 
41
- time.sleep(random.uniform(0.5, 2.5)) # Simulate processing
42
 
43
  chat_queue.get()
44
  return response
45
  except Exception as e:
46
  logging.error(f"Error: {e}")
47
- return "An error occurred. Please try again."
48
 
49
  # --- Gradio Interface ---
50
  with gr.Blocks(
51
  css="""
52
  body {
53
- background-image: url("stag.jpeg");
54
- background-size: cover;
55
  background-repeat: no-repeat;
56
  }
57
- """,
58
  title="PLOD Filtered with Monitoring"
59
  ) as demo:
60
  with gr.Tab("Chat"):
@@ -68,41 +68,11 @@ with gr.Blocks(
68
  # ... other tabs (Performance Metrics, Infrastructure, Logs) ...
69
 
70
  # --- Update Functions ---
71
- def update_metrics(request_count_display, avg_latency_display):
72
- # ... (implementation) ...
73
-
74
- def update_usage(cpu_usage_display, mem_usage_display):
75
- # ... (implementation) ...
76
-
77
- def update_logs(logs_display):
78
- # ... (implementation) ...
79
-
80
-
81
- # --- Start Threads ---
82
- threading.Thread(target=start_http_server, args=(8000,), daemon=True).start()
83
- threading.Thread(target=update_metrics, args=(request_count_display, avg_latency_display), daemon=True).start()
84
- threading.Thread(target=update_usage, args=(cpu_usage_display, mem_usage_display), daemon=True).start()
85
- threading.Thread(target=update_logs, args=(logs_display,), daemon=True).start()
86
-
87
-
88
- # --- Simulate Chat Interactions (Optional) ---
89
- def simulate_interactions():
90
- # ... (implementation) ...
91
-
92
- # threading.Thread(target=simulate_interactions, daemon=True).start() # Uncomment to enable
93
 
 
 
 
94
 
95
  # Launch the app
96
  demo.launch(share=True)
97
-
98
-
99
-
100
-
101
-
102
-
103
-
104
-
105
-
106
-
107
-
108
-
 
9
  from transformers import pipeline, AutoConfig
10
 
11
  # Load the model and its configuration
12
+ model_name = "Sevixdd/roberta-base-finetuned-ner" # Make sure this model is available
13
  ner_pipeline = pipeline("ner", model=model_name)
14
  config = AutoConfig.from_pretrained(model_name)
15
 
 
29
  REQUEST_COUNT.inc()
30
  try:
31
  if chat_queue.full():
32
+ return "The model is busy. Please wait..." # More user-friendly message
33
 
34
  chat_queue.put(message)
35
  logging.info(f"User: {message}")
 
38
  response = f"Response from NER model: {ner_result}"
39
  logging.info(f"Bot: {response}")
40
 
41
+ time.sleep(random.uniform(0.5, 2.5)) # Simulate processing (adjust as needed)
42
 
43
  chat_queue.get()
44
  return response
45
  except Exception as e:
46
  logging.error(f"Error: {e}")
47
+ return "An error occurred. Please try again later." # More helpful error message
48
 
49
  # --- Gradio Interface ---
50
  with gr.Blocks(
51
  css="""
52
  body {
53
+ background-image: url("stag.jpeg");
54
+ background-size: cover;
55
  background-repeat: no-repeat;
56
  }
57
+ """,
58
  title="PLOD Filtered with Monitoring"
59
  ) as demo:
60
  with gr.Tab("Chat"):
 
68
  # ... other tabs (Performance Metrics, Infrastructure, Logs) ...
69
 
70
  # --- Update Functions ---
71
+ # ... (Implement update functions for metrics, usage, and logs here)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
+ # --- Background Threads ---
74
+ threading.Thread(target=start_http_server, args=(8000,), daemon=True).start()
75
+ # ... (Threads for metrics, usage, and logs update)
76
 
77
  # Launch the app
78
  demo.launch(share=True)