oceansweep commited on
Commit
62cadc2
·
verified ·
1 Parent(s): 790d9fe

Upload 2 files

Browse files
App_Function_Libraries/Metrics/logger_config.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # logger_config.py
2
+ #
3
+ # Imports
4
+ import logging
5
+ from logging.handlers import RotatingFileHandler
6
+ from pythonjsonlogger import jsonlogger
7
+ import os
8
+ #
9
+ ############################################################################################################
10
+ #
11
+ # Functions:
12
+
13
+ def setup_logger(log_file_path="tldw_app_logs.json"):
14
+ """
15
+ Sets up the logger with both StreamHandler and FileHandler, formatted in JSON.
16
+
17
+ Parameters:
18
+ log_file_path (str): Path to the JSON log file.
19
+
20
+ Returns:
21
+ logging.Logger: Configured logger instance.
22
+ """
23
+ logger = logging.getLogger("tldw_app_logs")
24
+ logger.setLevel(logging.DEBUG) # Set to DEBUG for detailed logs
25
+
26
+ # Prevent adding multiple handlers if the logger is already configured
27
+ if not logger.handlers:
28
+ # StreamHandler for console output
29
+ stream_handler = logging.StreamHandler()
30
+ stream_formatter = jsonlogger.JsonFormatter(
31
+ '%(asctime)s %(levelname)s %(name)s event %(event)s type %(type)s value %(value)s labels %(labels)s timestamp %(timestamp)s'
32
+ )
33
+ stream_handler.setFormatter(stream_formatter)
34
+ logger.addHandler(stream_handler)
35
+
36
+ # Ensure the directory for log_file_path exists
37
+ log_dir = os.path.dirname(log_file_path)
38
+ if log_dir and not os.path.exists(log_dir):
39
+ os.makedirs(log_dir, exist_ok=True)
40
+
41
+ # RotatingFileHandler for writing logs to a JSON file with rotation
42
+ file_handler = RotatingFileHandler(
43
+ log_file_path, maxBytes=10*1024*1024, backupCount=5 # 10 MB per file, keep 5 backups
44
+ )
45
+ file_formatter = jsonlogger.JsonFormatter(
46
+ '%(asctime)s %(levelname)s %(name)s event %(event)s type %(type)s value %(value)s labels %(labels)s timestamp %(timestamp)s'
47
+ )
48
+ file_handler.setFormatter(file_formatter)
49
+ logger.addHandler(file_handler)
50
+
51
+ return logger
52
+
53
+ # Initialize the logger
54
+ logger = setup_logger()
55
+
56
+ #
57
+ # End of Functions
58
+ ############################################################################################################
App_Function_Libraries/Metrics/metrics_logger.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # metrics_logger.py
2
+ #
3
+ # Imports
4
+ from datetime import datetime, timezone
5
+ #
6
+ # Third-party Imports
7
+ #
8
+ # Local Imports
9
+ from App_Function_Libraries.Metrics.logger_config import logger
10
+ #
11
+ ############################################################################################################
12
+ #
13
+ # Functions:
14
+
15
+ def log_counter(metric_name, labels=None, value=1):
16
+ log_entry = {
17
+ "event": metric_name,
18
+ "type": "counter",
19
+ "value": value,
20
+ "labels": labels or {},
21
+ # datetime.datetime.utcnow() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.now(datetime.UTC).
22
+ # FIXME
23
+ "timestamp": datetime.now(timezone.utc).isoformat() + "Z"
24
+ }
25
+ logger.info("metric", extra=log_entry)
26
+
27
+ def log_histogram(metric_name, value, labels=None):
28
+ log_entry = {
29
+ "event": metric_name,
30
+ "type": "histogram",
31
+ "value": value,
32
+ "labels": labels or {},
33
+ # datetime.datetime.utcnow() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.now(datetime.UTC).
34
+ # FIXME
35
+ "timestamp": datetime.now(timezone.utc).isoformat() + "Z"
36
+ }
37
+ logger.info("metric", extra=log_entry)
38
+
39
+ #
40
+ # End of Functions
41
+ ############################################################################################################
42
+
43
+ # # Prometheus
44
+ # # metrics_logger.py (Prometheus version)
45
+ # from prometheus_client import Counter, Histogram, start_http_server
46
+ # import logging
47
+ # from functools import wraps
48
+ # import time
49
+ #
50
+ # # Initialize Prometheus metrics
51
+ # VIDEOS_PROCESSED = Counter('videos_processed_total', 'Total number of videos processed', ['whisper_model', 'api_name'])
52
+ # VIDEOS_FAILED = Counter('videos_failed_total', 'Total number of videos failed to process', ['whisper_model', 'api_name'])
53
+ # TRANSCRIPTIONS_GENERATED = Counter('transcriptions_generated_total', 'Total number of transcriptions generated', ['whisper_model'])
54
+ # SUMMARIES_GENERATED = Counter('summaries_generated_total', 'Total number of summaries generated', ['whisper_model'])
55
+ # VIDEO_PROCESSING_TIME = Histogram('video_processing_time_seconds', 'Time spent processing videos', ['whisper_model', 'api_name'])
56
+ # TOTAL_PROCESSING_TIME = Histogram('total_processing_time_seconds', 'Total time spent processing all videos', ['whisper_model', 'api_name'])
57
+ #
58
+ # def init_metrics_server(port=8000):
59
+ # start_http_server(port)
60
+ #
61
+ # def log_counter(metric_name, labels=None, value=1):
62
+ # if metric_name == "videos_processed_total":
63
+ # VIDEOS_PROCESSED.labels(**(labels or {})).inc(value)
64
+ # elif metric_name == "videos_failed_total":
65
+ # VIDEOS_FAILED.labels(**(labels or {})).inc(value)
66
+ # elif metric_name == "transcriptions_generated_total":
67
+ # TRANSCRIPTIONS_GENERATED.labels(**(labels or {})).inc(value)
68
+ # elif metric_name == "summaries_generated_total":
69
+ # SUMMARIES_GENERATED.labels(**(labels or {})).inc(value)
70
+ #
71
+ # def log_histogram(metric_name, value, labels=None):
72
+ # if metric_name == "video_processing_time_seconds":
73
+ # VIDEO_PROCESSING_TIME.labels(**(labels or {})).observe(value)
74
+ # elif metric_name == "total_processing_time_seconds":
75
+ # TOTAL_PROCESSING_TIME.labels(**(labels or {})).observe(value)
76
+
77
+
78
+ # # main.py or equivalent entry point
79
+ # from metrics_logger import init_metrics_server
80
+ #
81
+ #
82
+ # def main():
83
+ # # Start Prometheus metrics server on port 8000
84
+ # init_metrics_server(port=8000)
85
+ #
86
+ # # Initialize and launch your Gradio app
87
+ # create_video_transcription_tab()
88
+ #
89
+ #
90
+ # if __name__ == "__main__":
91
+ # main()
92
+
93
+ # prometheus.yml
94
+ # scrape_configs:
95
+ # - job_name: 'video_transcription_app'
96
+ # static_configs:
97
+ # - targets: ['localhost:8000'] # Replace with your application's host and port
98
+