import sys
import json
from pathlib import Path
from collections import Counter
from datetime import datetime

LABELS = ["≤15秒", "≤1分钟", "≤5分钟", "＞5分钟"]


def read_first_line(p: Path) -> str:
    try:
        with p.open('r', encoding='utf-8') as f:
            return f.readline().strip()
    except Exception:
        return ''


def main():
    files = sorted(Path('results').glob('*.txt'))
    if not files:
        print("No txt files under results/")
        return

    first_lines = [read_first_line(p) for p in files]
    label_counts = Counter()
    for line in first_lines:
        if line.startswith('视频时长:'):
            label = line.split(':', 1)[1].strip()
            label_counts[label] += 1

    # infer skipped by recent modification time threshold (2 minutes)
    mtimes = [p.stat().st_mtime for p in files]
    max_mtime = max(mtimes)
    threshold = max_mtime - 120  # 2 minutes window
    updated = [p for p in files if p.stat().st_mtime > threshold]
    skipped = [p for p in files if p.stat().st_mtime <= threshold]

    # print text summary
    print(f"Total files: {len(files)}")
    print("Distribution:")
    for label in LABELS:
        print(f"  {label} -> {label_counts.get(label, 0)}")
    print(f"Skipped total: {len(skipped)}")
    print("Skipped files:")
    for p in skipped:
        print(f"  {p.name}")

    # also write a small json for reliable capture
    data = {
        "total": len(files),
        "distribution": {label: label_counts.get(label, 0) for label in LABELS},
        "skipped_total": len(skipped),
        "skipped_files": [p.name for p in skipped],
    }
    Path('duration_counts.json').write_text(json.dumps(data, ensure_ascii=False, indent=2), encoding='utf-8')


if __name__ == '__main__':
    main()
