lobrien001 commited on
Commit
fc4df2e
·
verified ·
1 Parent(s): ef7e3f3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -5
app.py CHANGED
@@ -5,9 +5,11 @@ import time
5
  from prometheus_client import start_http_server, Counter, Histogram
6
  import threading
7
  import psutil
 
 
8
 
9
  # Load the model
10
- gr.load("models/Sevixdd/roberta-base-finetuned-ner").launch()
11
 
12
  # --- Prometheus Metrics Setup ---
13
  REQUEST_COUNT = Counter('gradio_request_count', 'Total number of requests')
@@ -29,10 +31,12 @@ def chat_function(message, history):
29
  chat_queue.put(message)
30
  logging.info(f"User: {message}")
31
 
32
- # ... (Your chatbot processing logic here) ...
33
- time.sleep(2) # Simulate processing delay
34
- response = chat_queue.get()
 
35
  logging.info(f"Bot: {response}")
 
36
 
37
  return response
38
  except Exception as e:
@@ -65,13 +69,22 @@ with gr.Blocks(title="PLOD Filtered with Monitoring") as demo:
65
  while True:
66
  cpu_usage_display.update(psutil.cpu_percent())
67
  mem_usage_display.update(psutil.virtual_memory().percent)
68
- time.sleep(5)
69
 
70
  # --- Start Threads ---
71
  threading.Thread(target=start_http_server, args=(8000,), daemon=True).start()
72
  threading.Thread(target=update_metrics, daemon=True).start()
73
  threading.Thread(target=update_usage, daemon=True).start()
74
 
 
 
 
 
 
 
 
 
 
75
  # Launch the app
76
  demo.launch(share=True)
77
 
@@ -81,3 +94,4 @@ demo.launch(share=True)
81
 
82
 
83
 
 
 
5
  from prometheus_client import start_http_server, Counter, Histogram
6
  import threading
7
  import psutil
8
+ import random
9
+ from transformers import pipeline
10
 
11
  # Load the model
12
+ ner_pipeline = pipeline("ner", model="Sevixdd/roberta-base-finetuned-ner")
13
 
14
  # --- Prometheus Metrics Setup ---
15
  REQUEST_COUNT = Counter('gradio_request_count', 'Total number of requests')
 
31
  chat_queue.put(message)
32
  logging.info(f"User: {message}")
33
 
34
+ # Simulate some processing using the NER pipeline
35
+ ner_result = ner_pipeline(message)
36
+
37
+ response = f"Response from NER model: {ner_result}"
38
  logging.info(f"Bot: {response}")
39
+ time.sleep(random.uniform(0.5, 2.5)) # Simulate processing time
40
 
41
  return response
42
  except Exception as e:
 
69
  while True:
70
  cpu_usage_display.update(psutil.cpu_percent())
71
  mem_usage_display.update(psutil.virtual_memory().percent)
72
+ time.sleep(5) # Update every 5 seconds
73
 
74
  # --- Start Threads ---
75
  threading.Thread(target=start_http_server, args=(8000,), daemon=True).start()
76
  threading.Thread(target=update_metrics, daemon=True).start()
77
  threading.Thread(target=update_usage, daemon=True).start()
78
 
79
+ # --- Simulate Chat Interactions ---
80
+ def simulate_interactions():
81
+ messages = ["Hello bot!", "What's your name?", "Tell me a joke.", "Who are you?"]
82
+ for msg in messages:
83
+ chat_function(msg, [])
84
+ time.sleep(random.uniform(1, 5)) # Random interval between messages
85
+
86
+ threading.Thread(target=simulate_interactions, daemon=True).start() # Start simulation
87
+
88
  # Launch the app
89
  demo.launch(share=True)
90
 
 
94
 
95
 
96
 
97
+