File size: 26,782 Bytes
d87fc8a
 
 
be26939
d87fc8a
 
 
 
be3bf7d
 
d87fc8a
 
db0aaba
d87fc8a
 
 
 
 
 
 
 
 
 
cfef316
d87fc8a
 
 
cfef316
 
 
d87fc8a
 
 
 
 
 
 
 
 
 
cfef316
 
d87fc8a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
be3bf7d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d87fc8a
 
 
 
 
 
84cf73f
 
d87fc8a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
be26939
d87fc8a
 
 
 
 
be26939
 
 
be3bf7d
fda0540
be26939
be3bf7d
 
be26939
fda0540
be3bf7d
 
 
 
 
 
 
 
be26939
be3bf7d
 
be26939
be3bf7d
 
0c04c7f
be3bf7d
be26939
0c04c7f
a12ef5d
871d90d
 
 
 
 
 
 
be26939
 
 
871d90d
be26939
 
 
 
 
fda0540
 
 
 
 
 
 
 
 
 
a12ef5d
 
 
fda0540
 
be26939
 
a12ef5d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
be26939
 
 
 
 
 
 
 
 
 
 
 
0c04c7f
 
be26939
 
84cf73f
be26939
84cf73f
 
be3bf7d
be26939
 
 
 
 
be3bf7d
 
be26939
 
 
 
0c04c7f
 
 
 
be26939
 
 
 
be3bf7d
be26939
 
 
a12ef5d
be26939
 
d87fc8a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a12ef5d
 
 
 
 
 
 
be3bf7d
a12ef5d
be3bf7d
 
a12ef5d
 
be3bf7d
a12ef5d
be3bf7d
a12ef5d
be3bf7d
a12ef5d
be3bf7d
 
 
 
a12ef5d
be3bf7d
a12ef5d
be3bf7d
 
 
a12ef5d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27f5ac2
be3bf7d
27f5ac2
 
 
 
be3bf7d
27f5ac2
 
 
be3bf7d
 
 
27f5ac2
 
be3bf7d
 
27f5ac2
 
 
 
 
 
 
be3bf7d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6caaf6d
be3bf7d
6caaf6d
 
 
be3bf7d
6caaf6d
be3bf7d
6caaf6d
 
be3bf7d
 
6caaf6d
be3bf7d
6caaf6d
352f852
be3bf7d
6caaf6d
be3bf7d
 
 
6caaf6d
be3bf7d
 
 
6caaf6d
 
be3bf7d
 
 
 
 
 
6caaf6d
 
 
 
d87fc8a
 
be3bf7d
 
 
 
 
 
d87fc8a
be3bf7d
d87fc8a
be3bf7d
 
 
 
d87fc8a
 
be3bf7d
 
 
 
d87fc8a
be3bf7d
 
 
 
d87fc8a
 
 
 
 
 
 
 
 
be26939
 
891c332
84cf73f
d87fc8a
 
 
 
 
 
 
 
a12ef5d
 
 
 
 
 
 
d87fc8a
 
 
 
 
 
 
 
 
84cf73f
d87fc8a
 
871d90d
 
be26939
d87fc8a
 
 
891c332
 
 
 
d87fc8a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
891c332
 
 
 
871d90d
 
 
891c332
be3bf7d
 
84cf73f
891c332
 
 
 
 
be26939
891c332
be26939
891c332
 
 
 
 
 
 
be3bf7d
891c332
 
 
be3bf7d
 
 
 
 
891c332
 
 
 
 
 
 
 
be26939
891c332
be26939
 
d87fc8a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
from huggingface_hub import HfFileSystem
import pandas as pd
from utils import logger
from datetime import datetime, timedelta
import threading
import traceback
import json
import re
import random
from typing import List, Tuple, Optional, Dict

# NOTE: if caching is an issue, try adding `use_listings_cache=False`
fs = HfFileSystem()

IMPORTANT_MODELS = [
    "auto",
    "bert",  # old but dominant (encoder only)
    "gpt2",  # old (decoder)
    "t5",  # old (encoder-decoder)
    "modernbert",  # (encoder only)
    "vit",  # old (vision) - fixed comma
    "clip",  # old but dominant (vision)
    "detr",  # objection detection, segmentation (vision)
    "table_transformer",  # objection detection (visioin) - maybe just detr?
    "got_ocr2",  # ocr (vision)
    "whisper",  # old but dominant (audio)
    "wav2vec2",  # old (audio)
    "qwen2_audio",  # (audio)
    "speech_t5",  # (audio)
    "csm",  # (audio)
    "llama",  # new and dominant (meta)
    "gemma3",  # new (google)
    "qwen2",  # new (Alibaba)
    "mistral3",  # new (Mistral) - added missing comma
    "qwen2_5_vl",  # new (vision)
    "llava",  # many models from it (vision)
    "smolvlm",  # new (video)
    "internvl",  # new (video)
    "gemma3n",  # new (omnimodal models)
    "qwen2_5_omni",  # new (omnimodal models)
    # "gpt_oss",  # new (quite used)
    "qwen2_5_omni",  # new (omnimodal models)
]

KEYS_TO_KEEP = [
    "success_amd",
    "success_nvidia",
    "skipped_amd",
    "skipped_nvidia",
    "failed_multi_no_amd",
    "failed_multi_no_nvidia",
    "failed_single_no_amd",
    "failed_single_no_nvidia",
    "failures_amd",
    "failures_nvidia",
    "job_link_amd",
    "job_link_nvidia",
]

# ============================================================================
# HELPER FUNCTIONS
# ============================================================================

def generate_fake_dates(num_days: int = 7) -> List[str]:
    """Generate fake dates for the last N days."""
    today = datetime.now()
    return [(today - timedelta(days=i)).strftime("%Y-%m-%d") for i in range(num_days)]

def parse_json_field(value) -> dict:
    """Safely parse a JSON field that might be a string or dict."""
    if isinstance(value, str):
        try:
            return json.loads(value)
        except:
            return {}
    return value if isinstance(value, dict) else {}

def extract_date_from_path(path: str, pattern: str) -> Optional[str]:
    """Extract date from file path using regex pattern."""
    match = re.search(pattern, path)
    return match.group(1) if match else None

def get_test_names(tests: list) -> set:
    """Extract test names from a list of test dictionaries."""
    return {test.get('line', '') for test in tests}

def safe_extract(row: pd.Series, key: str) -> int:
    """Safely extract an integer value from a DataFrame row."""
    return int(row.get(key, 0)) if pd.notna(row.get(key, 0)) else 0

# ============================================================================
# DATA LOADING FUNCTIONS
# ============================================================================

def log_dataframe_link(link: str) -> str:
    """
    Adds the link to the dataset in the logs, modifies it to get a clockable link and then returns the date of the 
    report.
    """
    if link.startswith("sample_"):
        return "9999-99-99"
    logger.info(f"Reading df located at {link}")
    # Make sure the links starts with an http adress
    if link.startswith("hf://"):
        link = "https://huggingface.co/" + link.removeprefix("hf://")
    # Pattern to match transformers_daily_ci followed by any path, then a date (YYYY-MM-DD format)
    pattern = r'transformers_daily_ci(.*?)/(\d{4}-\d{2}-\d{2})'
    match = re.search(pattern, link)
    # Failure case: 
    if not match:
        logger.error("Could not find transformers_daily_ci and.or date in the link")
        return "9999-99-99"
    # Replace the path between with blob/main
    path_between = match.group(1)
    link = link.replace("transformers_daily_ci" + path_between, "transformers_daily_ci/blob/main")
    logger.info(f"Link to data source: {link}")
    # Return the date
    return match.group(2)

def infer_latest_update_msg(date_df_amd: str, date_df_nvidia: str) -> str:
    # Early return if one of the dates is invalid
    if date_df_amd.startswith("9999") and date_df_nvidia.startswith("9999"):
        return "could not find last update time"
    # Warn if dates are not the same
    if date_df_amd != date_df_nvidia:
        logger.warning(f"Different dates found: {date_df_amd} (AMD) vs {date_df_nvidia} (NVIDIA)")
    # Take the latest date and format it
    try:
        latest_date = max(date_df_amd, date_df_nvidia)
        yyyy, mm, dd = latest_date.split("-")
        return f"last updated {mm}/{dd}/{yyyy}"
    except Exception as e:
        logger.error(f"When trying to infer latest date, got error {e}")
        return "could not find last update time"

def read_one_dataframe(json_path: str, device_label: str) -> tuple[pd.DataFrame, str]:
    df_upload_date = log_dataframe_link(json_path)
    df = pd.read_json(json_path, orient="index")
    df.index.name = "model_name"
    df[f"failed_multi_no_{device_label}"] = df["failures"].apply(lambda x: len(x["multi"]) if "multi" in x else 0)
    df[f"failed_single_no_{device_label}"] = df["failures"].apply(lambda x: len(x["single"]) if "single" in x else 0)
    return df, df_upload_date

def get_available_dates() -> List[str]:
    """Get list of available dates from both AMD and NVIDIA datasets."""
    try:
        # Get file lists
        amd_src = "hf://datasets/optimum-amd/transformers_daily_ci/**/runs/**/ci_results_run_models_gpu/model_results.json"
        nvidia_src = "hf://datasets/hf-internal-testing/transformers_daily_ci/*/ci_results_run_models_gpu/model_results.json"
        
        files_amd = sorted(fs.glob(amd_src, refresh=True), reverse=True)
        files_nvidia = sorted(fs.glob(nvidia_src, refresh=True), reverse=True)
        
        logger.info(f"Found {len(files_amd)} AMD files, {len(files_nvidia)} NVIDIA files")
        
        # Extract dates using patterns
        amd_pattern = r'transformers_daily_ci/(\d{4}-\d{2}-\d{2})/runs/[^/]+/ci_results_run_models_gpu/model_results\.json'
        nvidia_pattern = r'transformers_daily_ci/(\d{4}-\d{2}-\d{2})/ci_results_run_models_gpu/model_results\.json'
        
        amd_dates = {extract_date_from_path(f, amd_pattern) for f in files_amd}
        amd_dates.discard(None)  # Remove None values
        
        nvidia_dates = {extract_date_from_path(f, nvidia_pattern) for f in files_nvidia}
        nvidia_dates.discard(None)
        
        logger.info(f"AMD dates: {sorted(amd_dates, reverse=True)[:5]}...")
        logger.info(f"NVIDIA dates: {sorted(nvidia_dates, reverse=True)[:5]}...")
        
        # Return intersection of both datasets
        common_dates = sorted(amd_dates.intersection(nvidia_dates), reverse=True)
        logger.info(f"Common dates: {len(common_dates)} dates where both AMD and NVIDIA have data")
        
        if common_dates:
            return common_dates[:30]  # Limit to last 30 days
        
        # No real dates available - log warning and return empty list
        # This will allow the system to fall back to sample data properly
        logger.warning("No common dates found between AMD and NVIDIA datasets")
        return []
        
    except Exception as e:
        logger.error(f"Error getting available dates: {e}")
        return []


def get_data_for_date(target_date: str) -> tuple[pd.DataFrame, str]:
    """Get data for a specific date."""
    try:
        # For AMD, we need to find the specific run file for the date
        # AMD structure: YYYY-MM-DD/runs/{run_id}/ci_results_run_models_gpu/model_results.json
        amd_src = f"hf://datasets/optimum-amd/transformers_daily_ci/{target_date}/runs/*/ci_results_run_models_gpu/model_results.json"
        amd_files = fs.glob(amd_src, refresh=True)
        
        if not amd_files:
            raise FileNotFoundError(f"No AMD data found for date {target_date}")
        
        # Use the first (most recent) run for the date
        amd_file = amd_files[0]
        # Ensure the AMD file path has the hf:// prefix
        if not amd_file.startswith("hf://"):
            amd_file = f"hf://{amd_file}"
        
        # NVIDIA structure: YYYY-MM-DD/ci_results_run_models_gpu/model_results.json
        nvidia_src = f"hf://datasets/hf-internal-testing/transformers_daily_ci/{target_date}/ci_results_run_models_gpu/model_results.json"
        
        # Read dataframes - try each platform independently
        df_amd = pd.DataFrame()
        df_nvidia = pd.DataFrame()
        
        try:
            df_amd, _ = read_one_dataframe(amd_file, "amd")
            logger.info(f"Successfully loaded AMD data for {target_date}")
        except Exception as e:
            logger.warning(f"Failed to load AMD data for {target_date}: {e}")
        
        try:
            df_nvidia, _ = read_one_dataframe(nvidia_src, "nvidia")
            logger.info(f"Successfully loaded NVIDIA data for {target_date}")
        except Exception as e:
            logger.warning(f"Failed to load NVIDIA data for {target_date}: {e}")
        
        # If both failed, return empty dataframe
        if df_amd.empty and df_nvidia.empty:
            logger.warning(f"No data available for either platform on {target_date}")
            return pd.DataFrame(), target_date
        
        # Join both dataframes (outer join to include data from either platform)
        if not df_amd.empty and not df_nvidia.empty:
            joined = df_amd.join(df_nvidia, rsuffix="_nvidia", lsuffix="_amd", how="outer")
        elif not df_amd.empty:
            joined = df_amd.copy()
        else:
            joined = df_nvidia.copy()
        
        joined = joined[KEYS_TO_KEEP]
        joined.index = joined.index.str.replace("^models_", "", regex=True)
        
        # Filter out all but important models
        important_models_lower = [model.lower() for model in IMPORTANT_MODELS]
        filtered_joined = joined[joined.index.str.lower().isin(important_models_lower)]
        
        return filtered_joined, target_date
        
    except Exception as e:
        logger.error(f"Error getting data for date {target_date}: {e}")
        # Return empty dataframe instead of sample data for historical functionality
        return pd.DataFrame(), target_date


def get_historical_data(start_date: str, end_date: str, sample_data = False) -> pd.DataFrame:
    """Get historical data for a date range."""
    if sample_data:
        return get_fake_historical_data(start_date, end_date)
    
    try:
        start_dt = datetime.strptime(start_date, "%Y-%m-%d")
        end_dt = datetime.strptime(end_date, "%Y-%m-%d")
        historical_data = []
        
        # Load data for each day in range
        current_dt = start_dt
        while current_dt <= end_dt:
            date_str = current_dt.strftime("%Y-%m-%d")
            try:
                df, _ = get_data_for_date(date_str)
                if not df.empty:
                    df['date'] = date_str
                    historical_data.append(df)
                    logger.info(f"Loaded data for {date_str}")
            except Exception as e:
                logger.warning(f"Could not load data for {date_str}: {e}")
            current_dt += timedelta(days=1)
        
        return pd.concat(historical_data, ignore_index=False) if historical_data else pd.DataFrame()
        
    except Exception as e:
        logger.error(f"Error getting historical data: {e}")
        return get_fake_historical_data(start_date, end_date)


def get_distant_data() -> tuple[pd.DataFrame, str]:
    # Retrieve AMD dataframe
    amd_src = "hf://datasets/optimum-amd/transformers_daily_ci/**/runs/**/ci_results_run_models_gpu/model_results.json"
    files_amd = sorted(fs.glob(amd_src, refresh=True), reverse=True)
    df_amd, date_df_amd = read_one_dataframe(f"hf://{files_amd[0]}", "amd")
    # Retrieve NVIDIA dataframe, which pattern should be:
    # hf://datasets/hf-internal-testing`/transformers_daily_ci/raw/main/YYYY-MM-DD/ci_results_run_models_gpu/model_results.json 
    nvidia_src = "hf://datasets/hf-internal-testing/transformers_daily_ci/*/ci_results_run_models_gpu/model_results.json"
    files_nvidia = sorted(fs.glob(nvidia_src, refresh=True), reverse=True)
    # NOTE: should this be removeprefix instead of lstrip?
    nvidia_path = files_nvidia[0].lstrip('datasets/hf-internal-testing/transformers_daily_ci/')
    nvidia_path = "https://huggingface.co/datasets/hf-internal-testing/transformers_daily_ci/raw/main/" + nvidia_path
    df_nvidia, date_df_nvidia = read_one_dataframe(nvidia_path, "nvidia")
    # Infer and format the latest df date
    latest_update_msg = infer_latest_update_msg(date_df_amd, date_df_nvidia)
    # Join both dataframes
    joined = df_amd.join(df_nvidia, rsuffix="_nvidia", lsuffix="_amd", how="outer")
    joined = joined[KEYS_TO_KEEP]
    joined.index = joined.index.str.replace("^models_", "", regex=True)
    # Fitler out all but important models
    important_models_lower = [model.lower() for model in IMPORTANT_MODELS]
    filtered_joined = joined[joined.index.str.lower().isin(important_models_lower)]
    # Warn for ach missing important models
    for model in IMPORTANT_MODELS:
        if model not in filtered_joined.index:
            print(f"[WARNING] Model {model} was missing from index.")            
    return filtered_joined, latest_update_msg


def get_sample_data() -> tuple[pd.DataFrame, str]:
    # Retrieve sample dataframes
    df_amd, _ = read_one_dataframe("sample_amd.json", "amd")
    df_nvidia, _ = read_one_dataframe("sample_nvidia.json", "nvidia")
    # Join both dataframes
    joined = df_amd.join(df_nvidia, rsuffix="_nvidia", lsuffix="_amd", how="outer")
    joined = joined[KEYS_TO_KEEP]
    joined.index = joined.index.str.replace("^models_", "", regex=True)
    # Fitler out all but important models
    important_models_lower = [model.lower() for model in IMPORTANT_MODELS]
    filtered_joined = joined[joined.index.str.lower().isin(important_models_lower)]
    # Prefix all model names with "sample_"
    filtered_joined.index = "sample_" + filtered_joined.index
    return filtered_joined, "sample data was loaded"


def get_fake_historical_data(start_date: str, end_date: str) -> pd.DataFrame:
    """Generate fake historical data for a date range when real data loading fails."""
    try:
        start_dt = datetime.strptime(start_date, "%Y-%m-%d")
        end_dt = datetime.strptime(end_date, "%Y-%m-%d")
        sample_df, _ = get_sample_data()
        historical_data = []
        
        # Generate data for each date
        current_dt = start_dt
        while current_dt <= end_dt:
            date_df = sample_df.copy()
            date_df['date'] = current_dt.strftime("%Y-%m-%d")
            
            # Add random variations to make it realistic
            for idx in date_df.index:
                # Vary success/skipped counts (±20%)
                for col in ['success_amd', 'success_nvidia', 'skipped_amd', 'skipped_nvidia']:
                    if col in date_df.columns and pd.notna(date_df.loc[idx, col]):
                        val = date_df.loc[idx, col]
                        if val > 0:
                            date_df.loc[idx, col] = max(0, int(val * random.uniform(0.8, 1.2)))
                
                # Vary failure counts more dramatically (±50-100%)
                for col in ['failed_multi_no_amd', 'failed_multi_no_nvidia', 'failed_single_no_amd', 'failed_single_no_nvidia']:
                    if col in date_df.columns and pd.notna(date_df.loc[idx, col]):
                        val = date_df.loc[idx, col]
                        date_df.loc[idx, col] = max(0, int(val * random.uniform(0.5, 2.0)))
            
            historical_data.append(date_df)
            current_dt += timedelta(days=1)
        
        if not historical_data:
            return pd.DataFrame()
        
        combined_df = pd.concat(historical_data, ignore_index=False)
        logger.info(f"Generated fake historical data: {len(combined_df)} records from {start_date} to {end_date}")
        return combined_df
        
    except Exception as e:
        logger.error(f"Error generating fake historical data: {e}")
        return pd.DataFrame()

def find_failure_first_seen(historical_df: pd.DataFrame, model_name: str, test_name: str, device: str, gpu_type: str) -> Optional[str]:
    """Find the first date when a specific test failure appeared in historical data."""
    if historical_df.empty:
        return None
    
    try:
        model_data = historical_df[historical_df.index == model_name.lower()].copy()
        if model_data.empty:
            return None
        
        # Check each date (oldest first) for this failure
        for _, row in model_data.sort_values('date').iterrows():
            failures = parse_json_field(row.get(f'failures_{device}'))
            if gpu_type in failures:
                for test in failures[gpu_type]:
                    if test.get('line', '') == test_name:
                        return row.get('date')
        return None
        
    except Exception as e:
        logger.error(f"Error finding first seen date for {test_name}: {e}")
        return None


def _find_device_regressions(model_name: str, current_failures: dict, yesterday_failures: dict, device: str) -> list[dict]:
    """Helper to find regressions for a specific device."""
    regressions = []
    for gpu_type in ['single', 'multi']:
        current_tests = get_test_names(current_failures.get(gpu_type, []))
        yesterday_tests = get_test_names(yesterday_failures.get(gpu_type, []))
        
        # Find NEW failures: failing NOW but NOT yesterday
        new_tests = current_tests - yesterday_tests
        for test_name in new_tests:
            if test_name:  # Skip empty names
                regressions.append({
                    'model': model_name,
                    'test': test_name.split('::')[-1],  # Short name
                    'test_full': test_name,  # Full name
                    'device': device,
                    'gpu_type': gpu_type
                })
    return regressions

def find_new_regressions(current_df: pd.DataFrame, historical_df: pd.DataFrame) -> list[dict]:
    """Compare current failures against previous day's failures to find new regressions."""
    if current_df.empty or historical_df.empty:
        return []
    
    # Get yesterday's data
    available_dates = sorted(historical_df['date'].unique(), reverse=True)
    if not available_dates:
        return []
    
    yesterday_data = historical_df[historical_df['date'] == available_dates[0]]
    new_regressions = []
    
    # For each model, compare current vs yesterday
    for model_name in current_df.index:
        current_row = current_df.loc[model_name]
        yesterday_row = yesterday_data[yesterday_data.index == model_name.lower()]
        
        # Parse current failures
        current_amd = parse_json_field(current_row.get('failures_amd', {}))
        current_nvidia = parse_json_field(current_row.get('failures_nvidia', {}))
        
        # Parse yesterday failures
        yesterday_amd = {}
        yesterday_nvidia = {}
        if not yesterday_row.empty:
            yesterday_row = yesterday_row.iloc[0]
            yesterday_amd = parse_json_field(yesterday_row.get('failures_amd', {}))
            yesterday_nvidia = parse_json_field(yesterday_row.get('failures_nvidia', {}))
        
        # Find regressions for both devices
        new_regressions.extend(_find_device_regressions(model_name, current_amd, yesterday_amd, 'amd'))
        new_regressions.extend(_find_device_regressions(model_name, current_nvidia, yesterday_nvidia, 'nvidia'))
    
    return new_regressions


def extract_model_data(row: pd.Series) -> tuple[dict[str, int], dict[str, int], int, int, int, int]:
    """Extract and process model data from DataFrame row."""
    # Extract all counts
    counts = {key: safe_extract(row, key) for key in [
        'success_amd', 'success_nvidia', 'skipped_amd', 'skipped_nvidia',
        'failed_multi_no_amd', 'failed_multi_no_nvidia', 
        'failed_single_no_amd', 'failed_single_no_nvidia'
    ]}
    
    # Create stats dictionaries
    amd_stats = {
        'passed': counts['success_amd'],
        'failed': counts['failed_multi_no_amd'] + counts['failed_single_no_amd'],
        'skipped': counts['skipped_amd'],
        'error': 0
    }
    nvidia_stats = {
        'passed': counts['success_nvidia'],
        'failed': counts['failed_multi_no_nvidia'] + counts['failed_single_no_nvidia'],
        'skipped': counts['skipped_nvidia'],
        'error': 0
    }
    
    return (amd_stats, nvidia_stats, counts['failed_multi_no_amd'], 
            counts['failed_single_no_amd'], counts['failed_multi_no_nvidia'], 
            counts['failed_single_no_nvidia'])



class CIResults:

    def __init__(self):
        self.df = pd.DataFrame()
        self.available_models = []
        self.latest_update_msg = ""
        self.available_dates = []
        self.historical_df = pd.DataFrame()
        self.all_historical_data = pd.DataFrame()  # Store all historical data at startup
        self.sample_data = False

    def load_data(self) -> None:
        """Load data from the data source."""
        # Try loading the distant data, and fall back on sample data for local tinkering
        try:
            logger.info("Loading distant data...")
            new_df, latest_update_msg = get_distant_data()
            self.latest_update_msg = latest_update_msg
            self.available_dates = get_available_dates()
            logger.info(f"Available dates: {len(self.available_dates)} dates")
            if self.available_dates:
                logger.info(f"Date range: {self.available_dates[-1]} to {self.available_dates[0]}")
            else:
                logger.warning("No available dates found")
                self.available_dates = []
        except Exception as e:
            error_msg = [
                "Loading data failed:",
                "-" * 120,
                traceback.format_exc(),
                "-" * 120,
                "Falling back on sample data."
            ]
            logger.error("\n".join(error_msg))
            self.sample_data = True
            new_df, latest_update_msg = get_sample_data()
            self.latest_update_msg = latest_update_msg
            # Generate fake dates for sample data historical functionality
            self.available_dates = generate_fake_dates()
        
        # Update attributes
        self.df = new_df
        self.available_models = new_df.index.tolist()
        
        # Load all historical data at startup
        self.load_all_historical_data()
        
        # Log and return distant load status
        logger.info(f"Data loaded successfully: {len(self.available_models)} models")
        logger.info(f"Models: {self.available_models[:5]}{'...' if len(self.available_models) > 5 else ''}")
        logger.info(f"Latest update message: {self.latest_update_msg}")
        # Log a preview of the df
        msg = {}
        for model in self.available_models[:3]:
            msg[model] = {}
            for col in self.df.columns:
                value = self.df.loc[model, col]
                if not isinstance(value, int):
                    value = str(value)
                    if len(value) > 10:
                        value = value[:10] + "..."
                msg[model][col] = value
        logger.info(json.dumps(msg, indent=4))

    def load_all_historical_data(self) -> None:
        """Load all available historical data at startup."""
        try:
            if not self.available_dates:
                logger.warning("No available dates found, skipping historical data load")
                self.all_historical_data = pd.DataFrame()
                return
            
            logger.info(f"Loading all historical data for {len(self.available_dates)} dates...")
            start_date, end_date = self.available_dates[-1], self.available_dates[0]
            self.all_historical_data = get_historical_data(start_date, end_date, self.sample_data)
            logger.info(f"All historical data loaded: {len(self.all_historical_data)} records")
        except Exception as e:
            logger.error(f"Error loading all historical data: {e}")
            self.all_historical_data = pd.DataFrame()

    def load_historical_data(self, start_date: str, end_date: str) -> None:
        """Load historical data for a date range from pre-loaded data."""
        try:
            logger.info(f"Filtering historical data from {start_date} to {end_date}")
            
            if self.all_historical_data.empty:
                logger.warning("No pre-loaded historical data available")
                self.historical_df = pd.DataFrame()
                return
            
            # Filter by date range
            start_dt = datetime.strptime(start_date, "%Y-%m-%d")
            end_dt = datetime.strptime(end_date, "%Y-%m-%d")
            
            filtered_data = [
                self.all_historical_data[self.all_historical_data['date'] == date_str]
                for date_str in self.all_historical_data['date'].unique()
                if start_dt <= datetime.strptime(date_str, "%Y-%m-%d") <= end_dt
            ]
            
            if filtered_data:
                self.historical_df = pd.concat(filtered_data, ignore_index=False)
                logger.info(f"Historical data filtered: {len(self.historical_df)} records for {start_date} to {end_date}")
            else:
                self.historical_df = pd.DataFrame()
                logger.warning(f"No historical data found for date range {start_date} to {end_date}")
                
        except Exception as e:
            logger.error(f"Error filtering historical data: {e}")
            self.historical_df = pd.DataFrame()

    def schedule_data_reload(self):
        """Schedule the next data reload."""
        def reload_data():
            self.load_data()
            # Schedule the next reload in 15 minutes (900 seconds)
            timer = threading.Timer(900.0, reload_data)
            timer.daemon = True  # Dies when main thread dies
            timer.start()
            logger.info("Next data reload scheduled in 15 minutes")

        # Start the first reload timer
        timer = threading.Timer(900.0, reload_data)
        timer.daemon = True
        timer.start()
        logger.info("Data auto-reload scheduled every 15 minutes")