import os
import sys
import json
import re
import types
import csv
import numpy as np
import random
import math
import subprocess
from collections import defaultdict
import time

PROJECT_ROOT = os.path.dirname(os.path.dirname(__file__))
LIBEER_REPO_ROOT = os.path.join(PROJECT_ROOT, 'LibEER')
LIBEER_PKG_ROOT = os.path.join(LIBEER_REPO_ROOT, 'LibEER')
for p in [LIBEER_PKG_ROOT, LIBEER_REPO_ROOT]:
    if p not in sys.path:
        sys.path.insert(0, p)

from data_utils.split import get_split_index

try:
    import cv2
except Exception as e:
    cv2 = None
try:
    import pickle
except Exception:
    pickle = None
try:
    import mediapipe as mp
except Exception as e:
    mp = None


def ensure_dir(p):
    os.makedirs(p, exist_ok=True)


def list_videos(root, exts):
    for base, _, files in os.walk(root):
        for fn in files:
            fext = os.path.splitext(fn)[1].lower()
            if fext in exts:
                yield os.path.join(base, fn)


def parse_subject(path, pattern):
    m = re.search(pattern, path, re.IGNORECASE)
    if not m:
        return None
    g = m.group(1)
    if g.isdigit():
        return f"s{int(g):02d}"
    return f"s{g}"


def parse_trial(basename, pattern):
    m = re.search(pattern, basename, re.IGNORECASE)
    if not m:
        return None
    g = m.group(1)
    if isinstance(g, (list, tuple)):
        g = g[0]
    if str(g).isdigit():
        return f"trial{int(g):02d}"
    return f"trial{g}"


def unique_linspace_indices(start, end_exclusive, k):
    length = max(0, end_exclusive - start)
    if length <= 0 or k <= 0:
        return []
    k = min(k, length)
    arr = np.linspace(start, end_exclusive - 1, num=k)
    idx = np.unique(arr.astype(np.int64))
    if idx.size < k:
        need = k - idx.size
        extra = np.random.randint(start, end_exclusive, size=need, dtype=np.int64)
        idx = np.unique(np.concatenate([idx, extra]))
    return idx.tolist()


def clamp(v, lo, hi):
    return max(lo, min(hi, v))


def expand_box(x, y, w, h, margin, W, H):
    cx = x + w / 2.0
    cy = y + h / 2.0
    nw = w * (1.0 + margin * 2.0)
    nh = h * (1.0 + margin * 2.0)
    nx = int(round(cx - nw / 2.0))
    ny = int(round(cy - nh / 2.0))
    nx2 = int(round(cx + nw / 2.0))
    ny2 = int(round(cy + nh / 2.0))
    nx = clamp(nx, 0, W - 1)
    ny = clamp(ny, 0, H - 1)
    nx2 = clamp(nx2, 1, W)
    ny2 = clamp(ny2, 1, H)
    return nx, ny, nx2 - nx, ny2 - ny


def safe_imwrite(path, img):
    ext = os.path.splitext(path)[1]
    try:
        ok, buf = cv2.imencode(ext, img)
        if not ok:
            return False
        buf.tofile(path)
        return True
    except Exception:
        return cv2.imwrite(path, img)


def init_mediapipe_detector(args):
    if mp is None:
        return None
    try:
        model_sel = int(getattr(args, 'mediapipe_model', 0))
        min_conf = float(getattr(args, 'mediapipe_min_confidence', 0.6))
        return mp.solutions.face_detection.FaceDetection(
            model_selection=model_sel,
            min_detection_confidence=min_conf,
        )
    except Exception:
        return None


def detect_face_bbox_mediapipe(img_bgr, mp_detector, min_face):
    try:
        img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
        res = mp_detector.process(img_rgb)
        if not res or not getattr(res, 'detections', None):
            return None
        H, W = img_bgr.shape[:2]
        best = None
        best_area = 0
        for det in res.detections:
            rld = getattr(det, 'location_data', None)
            rbb = getattr(rld, 'relative_bounding_box', None) if rld is not None else None
            if rbb is None:
                continue
            x = max(0, int(round(rbb.xmin * W)))
            y = max(0, int(round(rbb.ymin * H)))
            w = int(round(rbb.width * W))
            h = int(round(rbb.height * H))
            if w < min_face or h < min_face:
                continue
            area = w * h
            if area > best_area:
                best_area = area
                best = (x, y, w, h)
        return best
    except Exception:
        return None


def crop_and_resize(img_bgr, box, size, margin):
    H, W = img_bgr.shape[:2]
    x, y, w, h = box
    x, y, w, h = expand_box(x, y, w, h, margin, W, H)
    x2 = x + w
    y2 = y + h
    x = clamp(x, 0, W - 1)
    y = clamp(y, 0, H - 1)
    x2 = clamp(x2, 1, W)
    y2 = clamp(y2, 1, H)
    face = img_bgr[y:y2, x:x2]
    if face.size == 0:
        return None
    return cv2.resize(face, (size, size), interpolation=cv2.INTER_AREA)


def center_crop_resize(img_bgr, size):
    H, W = img_bgr.shape[:2]
    s = min(H, W)
    y = (H - s) // 2
    x = (W - s) // 2
    crop = img_bgr[y:y + s, x:x + s]
    if crop.size == 0:
        return None
    return cv2.resize(crop, (size, size), interpolation=cv2.INTER_AREA)


def _std_au_name(name):
    m = re.search(r"(\d+)", str(name))
    if not m:
        return None
    return f"AU{int(m.group(1)):02d}"


def build_au_header(args):
    au_list = getattr(args, 'au_list', ["AU1","AU2","AU4","AU6","AU7","AU10","AU12","AU14","AU17","AU23","AU24","AU27","AU45"]) or []
    aus = [_std_au_name(a) for a in au_list if _std_au_name(a) is not None]
    cols = [
        'subject',
        'group',
        'label',
        'window_index',
        'start_time',
        'end_time',
        'valid_ratio',
        'frame_count',
    ]
    for au in aus:
        cols.extend([f"{au}_mean", f"{au}_std", f"{au}_max", f"{au}_ratio"])
    cooc = getattr(args, 'cooc_pairs', []) or []
    for pair in cooc:
        if isinstance(pair, (list, tuple)) and len(pair) == 2:
            a = _std_au_name(pair[0])
            b = _std_au_name(pair[1])
            if a and b:
                cols.append(f"cooc_{a}_{b}_ratio")
    return cols, aus


def build_au_sequence_header(args):
    cols = [
        'path',
        'subject',
        'group',
        'label',
        'window_index',
        'start_time',
        'end_time',
        'valid_ratio',
        'frame_count',
    ]
    return cols


def sliding_time_windows(duration_s, win_s, stride_s):
    if win_s <= 0 or stride_s <= 0 or duration_s <= 0:
        return []
    res = []
    s = 0.0
    while s + win_s <= duration_s + 1e-6:
        e = min(duration_s, s + win_s)
        res.append((s, e))
        s += stride_s
    return res


def read_openface_csv(csv_path):
    if not os.path.isfile(csv_path):
        return None
    with open(csv_path, 'r', encoding='utf-8') as f:
        reader = csv.reader(f)
        try:
            header = next(reader)
        except StopIteration:
            return None
        name_to_idx = {n.strip(): i for i, n in enumerate(header)}
        ts_idx = name_to_idx.get('timestamp')
        conf_idx = name_to_idx.get('confidence')
        succ_idx = name_to_idx.get('success')
        au_idx = {}
        for n, i in name_to_idx.items():
            if re.match(r'^AU\d{2}_r$', n):
                au_idx[n] = i
        if ts_idx is None or conf_idx is None or succ_idx is None or len(au_idx) == 0:
            return None
        ts = []
        conf = []
        succ = []
        au_vals = {k: [] for k in au_idx.keys()}
        for row in reader:
            try:
                ts.append(float(row[ts_idx]))
                conf.append(float(row[conf_idx]))
                succ.append(int(float(row[succ_idx])))
                for k, i in au_idx.items():
                    try:
                        au_vals[k].append(float(row[i]))
                    except Exception:
                        au_vals[k].append(0.0)
            except Exception:
                continue
        if len(ts) == 0:
            return None
        data = {
            'timestamp': np.array(ts, dtype=np.float64),
            'confidence': np.array(conf, dtype=np.float32),
            'success': np.array(succ, dtype=np.int32),
        }
        for k, v in au_vals.items():
            data[k] = np.array(v, dtype=np.float32)
        return data


def _maybe_run_openface(video_path, args, out_dir):
    bin_path = getattr(args, 'openface_bin', None)
    auto = bool(getattr(args, 'auto_run_openface', False))
    if not auto or not bin_path or not os.path.isfile(bin_path):
        return False
    try:
        os.makedirs(out_dir, exist_ok=True)
        cmd = [
            bin_path,
            '-f', video_path,
            '-out_dir', out_dir,
            '-aus',
            '-q'
        ]
        subprocess.run(cmd, check=False)
        return True
    except Exception:
        return False


def process_video_au(video_path, subject, args, au_rows, group_map, label_cache, group_label_map, openface_out_root, header, aus):
    cap = cv2.VideoCapture(video_path)
    if not cap.isOpened():
        return 0
    cap.release()
    base = os.path.splitext(os.path.basename(video_path))[0]
    group_by = str(getattr(args, 'group_by', 'video')).lower()
    trial_rgx = getattr(args, 'trial_regex', r'trial(\d+)')
    if group_by == 'trial':
        trial_id = parse_trial(base, trial_rgx)
        group_id = trial_id if trial_id is not None else base
    else:
        group_id = base
    group_key = f"{subject}/{group_id}"
    label_type = str(getattr(args, 'label_type', 'subject')).lower()
    if label_type.startswith('emotion') and group_by == 'trial':
        label_name = compute_emotion_label_for_group(subject, group_id, args, label_cache)
        if label_name is None:
            label_name = subject
    else:
        label_name = subject
    if group_key not in group_label_map:
        group_label_map[group_key] = label_name
    out_dir = openface_out_root
    os.makedirs(out_dir, exist_ok=True)
    csv_path = os.path.join(out_dir, f"{base}.csv")
    if not os.path.isfile(csv_path):
        _maybe_run_openface(video_path, args, out_dir)
    data = read_openface_csv(csv_path)
    if data is None:
        return 0
    ts = data['timestamp']
    conf = data['confidence']
    succ = data['success']
    valid_mask = (succ == 1) & (conf >= float(getattr(args, 'min_frame_confidence', 0.8)))
    win_s = float(getattr(args, 'segment_seconds', getattr(args, 'window_seconds', 5)))
    stride_s = float(getattr(args, 'window_stride_seconds', win_s))
    if ts.size == 0:
        return 0
    ts_min = float(np.min(ts))
    ts_max = float(np.max(ts))
    wins = []
    vid_dur = None
    try:
        cap = cv2.VideoCapture(video_path)
        if cap.isOpened():
            fps = cap.get(cv2.CAP_PROP_FPS) or 0.0
            total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT) or 0.0
            if fps and total_frames:
                vid_dur = float(total_frames) / float(fps)
        cap.release()
    except Exception:
        pass
    if vid_dur is None or not np.isfinite(vid_dur) or vid_dur <= 0:
        vid_dur = ts_max - ts_min
    s = 0.0
    while s + win_s <= vid_dur + 1e-6:
        e = min(vid_dur, s + win_s)
        wins.append((ts_min + s, ts_min + e))
        s += stride_s
    thr = float(getattr(args, 'au_threshold', 0.5))
    cooc = getattr(args, 'cooc_pairs', []) or []
    count = 0
    for wi, (st, ed) in enumerate(wins):
        in_win = (ts >= st) & (ts < ed)
        if not np.any(in_win):
            continue
        in_win_valid = in_win & valid_mask
        total_cnt = int(np.sum(in_win))
        valid_cnt = int(np.sum(in_win_valid))
        if total_cnt <= 0 or valid_cnt <= 0:
            continue
        vr = valid_cnt / float(total_cnt)
        if vr < float(getattr(args, 'min_window_valid_ratio', 0.8)):
            continue
        row = [subject, group_key, group_label_map[group_key], wi, round(float(st), 3), round(float(ed), 3), round(float(vr), 3), total_cnt]
        for au in aus:
            col = f"{au}_r"
            if col in data:
                vals = data[col][in_win_valid]
            else:
                vals = np.zeros((valid_cnt,), dtype=np.float32)
            if vals.size == 0:
                mean_v = 0.0
                std_v = 0.0
                max_v = 0.0
                ratio_v = 0.0
            else:
                mean_v = float(np.mean(vals))
                std_v = float(np.std(vals))
                max_v = float(np.max(vals))
                ratio_v = float(np.mean(vals > thr))
            row.extend([mean_v, std_v, max_v, ratio_v])
        for pair in cooc:
            if isinstance(pair, (list, tuple)) and len(pair) == 2:
                a = _std_au_name(pair[0])
                b = _std_au_name(pair[1])
                if a and b:
                    ca = f"{a}_r"
                    cb = f"{b}_r"
                    if ca in data and cb in data:
                        va = data[ca][in_win_valid]
                        vb = data[cb][in_win_valid]
                        if va.size == 0 or vb.size == 0:
                            cooc_ratio = 0.0
                        else:
                            cooc_ratio = float(np.mean((va > thr) & (vb > thr)))
                    else:
                        cooc_ratio = 0.0
                    row.append(cooc_ratio)
        au_rows.append(row)
        group_map[group_key].append(row)
        count += 1
    return count


def process_video_au_sequence(video_path, subject, args, rows, group_map, label_cache, group_label_map, openface_out_root, save_root, header, aus):
    base = os.path.splitext(os.path.basename(video_path))[0]
    group_by = str(getattr(args, 'group_by', 'video')).lower()
    trial_rgx = getattr(args, 'trial_regex', r'trial(\d+)')
    if group_by == 'trial':
        trial_id = parse_trial(base, trial_rgx)
        group_id = trial_id if trial_id is not None else base
    else:
        group_id = base
    group_key = f"{subject}/{group_id}"
    label_type = str(getattr(args, 'label_type', 'subject')).lower()
    if label_type.startswith('emotion') and group_by == 'trial':
        label_name = compute_emotion_label_for_group(subject, group_id, args, label_cache)
        if label_name is None:
            label_name = subject
    else:
        label_name = subject
    if group_key not in group_label_map:
        group_label_map[group_key] = label_name
    out_dir = openface_out_root
    os.makedirs(out_dir, exist_ok=True)
    csv_path = os.path.join(out_dir, f"{base}.csv")
    if not os.path.isfile(csv_path):
        _maybe_run_openface(video_path, args, out_dir)
    data = read_openface_csv(csv_path)
    if data is None:
        return 0
    ts = data['timestamp']
    conf = data['confidence']
    succ = data['success']
    valid_mask = (succ == 1) & (conf >= float(getattr(args, 'min_frame_confidence', 0.8)))
    win_s = float(getattr(args, 'segment_seconds', getattr(args, 'window_seconds', 5)))
    stride_s = float(getattr(args, 'window_stride_seconds', win_s))
    if ts.size == 0:
        return 0
    ts_min = float(np.min(ts))
    ts_max = float(np.max(ts))
    wins = []
    vid_dur = None
    try:
        cap = cv2.VideoCapture(video_path)
        if cap.isOpened():
            fps = cap.get(cv2.CAP_PROP_FPS) or 0.0
            total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT) or 0.0
            if fps and total_frames:
                vid_dur = float(total_frames) / float(fps)
        cap.release()
    except Exception:
        pass
    if vid_dur is None or not np.isfinite(vid_dur) or vid_dur <= 0:
        vid_dur = ts_max - ts_min
    s = 0.0
    while s + win_s <= vid_dur + 1e-6:
        e = min(vid_dur, s + win_s)
        wins.append((ts_min + s, ts_min + e))
        s += stride_s
    frame_step = int(getattr(args, 'frame_sample_stride', 1))
    max_frames = int(getattr(args, 'max_frames_per_segment', 0) or 0)
    save_fmt = str(getattr(args, 'au_save_format', 'npy')).lower()
    keep_ts = bool(getattr(args, 'keep_timestamps', True))
    count = 0
    subj_dir = os.path.join(save_root, subject)
    ensure_dir(subj_dir)
    for wi, (st, ed) in enumerate(wins):
        in_win = (ts >= st) & (ts < ed)
        if not np.any(in_win):
            continue
        in_win_valid = in_win & valid_mask
        total_cnt = int(np.sum(in_win))
        valid_cnt = int(np.sum(in_win_valid))
        if total_cnt <= 0 or valid_cnt <= 0:
            continue
        vr = valid_cnt / float(total_cnt)
        if vr < float(getattr(args, 'min_window_valid_ratio', 0.8)):
            continue
        idx = np.where(in_win_valid)[0]
        if frame_step > 1:
            idx = idx[::frame_step]
        if idx.size == 0:
            continue
        if max_frames > 0 and idx.size > max_frames:
            sel = np.linspace(0, idx.size - 1, num=max_frames)
            sel = np.unique(sel.astype(np.int64))
            idx = idx[sel]
        X = []
        for au in aus:
            col = f"{au}_r"
            if col in data:
                vals = data[col][idx]
            else:
                vals = np.zeros((idx.size,), dtype=np.float32)
            X.append(vals.astype(np.float32))
        if len(X) == 0:
            continue
        seq = np.stack(X, axis=1)
        ts_rel = ts[idx] - st
        if save_fmt == 'npz':
            out_name = f"{subject}_{group_id}_seg{wi:03d}.npz"
            out_path = os.path.join(subj_dir, out_name)
            save_kwargs = {"au": seq, "aus": np.array(aus)}
            if keep_ts:
                save_kwargs["ts"] = ts_rel
            np.savez(out_path, **save_kwargs)
        else:
            out_name = f"{subject}_{group_id}_seg{wi:03d}.npy"
            out_path = os.path.join(subj_dir, out_name)
            np.save(out_path, seq)
        rel = os.path.relpath(out_path, os.path.join(args.output_dir, args.dataset))
        row = [rel, subject, group_key, group_label_map[group_key], wi, round(float(st), 3), round(float(ed), 3), round(float(vr), 3), int(seq.shape[0])]
        rows.append(row)
        group_map[group_key].append(row)
        count += 1
    return count


def process_video(video_path, subject, args, mp_detector, img_root, writer, total_count, meta_rows, group_map, label_cache, group_label_map):
    cap = cv2.VideoCapture(video_path)
    if not cap.isOpened():
        return 0
    fps = cap.get(cv2.CAP_PROP_FPS)
    if fps is None or fps <= 0:
        fps = 25.0
    total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT) or 0)
    if total_frames <= 0:
        cap.release()
        return 0
    seg_len_frames = None
    if getattr(args, 'segment_seconds', None):
        try:
            seg_len_frames = max(1, int(round(float(args.segment_seconds) * fps)))
        except Exception:
            seg_len_frames = None
    segments = []
    if seg_len_frames is None:
        segments = [(0, total_frames)]
    else:
        s = 0
        while s < total_frames:
            e = min(total_frames, s + seg_len_frames)
            if e > s:
                segments.append((s, e))
            s = e
    base = os.path.splitext(os.path.basename(video_path))[0]
    group_by = str(getattr(args, 'group_by', 'video')).lower()
    trial_rgx = getattr(args, 'trial_regex', r'trial(\d+)')
    if group_by == 'trial':
        trial_id = parse_trial(base, trial_rgx)
        group_id = trial_id if trial_id is not None else base
    else:
        group_id = base
    group_key = f"{subject}/{group_id}"
    # 生成该组的情绪标签（如配置要求）
    label_type = str(getattr(args, 'label_type', 'subject')).lower()
    label_name = None
    if label_type.startswith('emotion') and group_by == 'trial':
        label_name = compute_emotion_label_for_group(subject, group_id, args, label_cache)
        if label_name is None:
            label_name = subject
    else:
        label_name = subject
    if group_key not in group_label_map:
        group_label_map[group_key] = label_name
    subj_dir = os.path.join(img_root, subject)
    ensure_dir(subj_dir)
    saved = 0
    for si, (st, ed) in enumerate(segments):
        k = max(1, int(getattr(args, 'frames_per_segment', 1)))
        idxs = unique_linspace_indices(st, ed, k)
        for fi, fidx in enumerate(idxs):
            cap.set(cv2.CAP_PROP_POS_FRAMES, int(fidx))
            ok, frame = cap.read()
            if not ok or frame is None:
                continue
            face_img = None
            if mp_detector is not None:
                bbox = detect_face_bbox_mediapipe(frame, mp_detector, int(getattr(args, 'min_face_size', 40)))
                if bbox is not None:
                    face_img = crop_and_resize(frame, bbox, int(getattr(args, 'img_size', 224)), float(getattr(args, 'face_margin', 0.2)))
            if face_img is None:
                face_img = center_crop_resize(frame, int(getattr(args, 'img_size', 224)))
            if face_img is None:
                continue
            out_name = f"{subject}_{base}_seg{si:03d}_f{int(fidx):06d}.jpg"
            out_path = os.path.join(subj_dir, out_name)
            if os.path.isfile(out_path) and getattr(args, 'skip_existing', True):
                rel = os.path.relpath(out_path, os.path.join(args.output_dir, args.dataset))
                writer.writerow([rel, subject, group_key, group_label_map[group_key]])
                meta_rows.append([rel, subject, group_key, group_label_map[group_key]])
                group_map[group_key].append([rel, subject, group_key, group_label_map[group_key]])
                saved += 1
                total_count[0] += 1
                continue
            ok = safe_imwrite(out_path, face_img)
            if not ok:
                continue
            rel = os.path.relpath(out_path, os.path.join(args.output_dir, args.dataset))
            writer.writerow([rel, subject, group_key, group_label_map[group_key]])
            meta_rows.append([rel, subject, group_key, group_label_map[group_key]])
            group_map[group_key].append([rel, subject, group_key, group_label_map[group_key]])
            saved += 1
            total_count[0] += 1
    cap.release()
    return saved


def _numeric_suffix(s):
    m = re.search(r"(\d+)$", s)
    return int(m.group(1)) if m else 0


def split_and_save(meta_rows, group_map, args, save_base, header=None):
    fold_shuffle = getattr(args, 'fold_shuffle', True)
    if isinstance(fold_shuffle, bool):
        fold_shuffle = 'True' if fold_shuffle else 'False'
    setting = types.SimpleNamespace(
        split_type=getattr(args, 'split_type', 'train-val-test'),
        experiment_mode=getattr(args, 'experiment_mode', 'subject-dependent'),
        fold_num=getattr(args, 'fold_num', 5),
        fold_shuffle=fold_shuffle,
        seed=getattr(args, 'seed', 2024),
        test_size=getattr(args, 'test_size', 0.2),
        val_size=getattr(args, 'val_size', 0.2),
        front=getattr(args, 'front', 9),
        sr=None,
    )

    try:
        random.seed(setting.seed)
    except Exception:
        pass

    ri = int(getattr(args, 'round_index', 1))
    splits = {'train': [], 'val': [], 'test': []}

    subj2groups = defaultdict(list)
    for gk in group_map.keys():
        subj = gk.split('/', 1)[0]
        subj2groups[subj].append(gk)

    def apply_split(groups_list, tts):
        ridx = max(0, min(ri - 1, len(tts['train']) - 1))
        for split_name in ['train', 'val', 'test']:
            idx_list = tts.get(split_name, [[]])[ridx]
            if len(idx_list) == 1 and idx_list[0] == -1:
                continue
            for gi in idx_list:
                gk = groups_list[gi]
                splits[split_name].extend(group_map[gk])

    exp_mode = str(setting.experiment_mode).lower()
    label_type = str(getattr(args, 'label_type', 'subject')).lower()
    if header is not None and isinstance(header, (list, tuple)) and 'label' in header:
        label_idx = header.index('label')
    else:
        label_idx = -1
    if exp_mode == 'subject-independent':
        subjects = sorted(subj2groups.keys())
        data = [None] * len(subjects)
        labels = [[s] for s in subjects]
        tts = get_split_index(data, labels, setting)
        ridx = max(0, min(ri - 1, len(tts['train']) - 1))
        def idx_to_set(name):
            idxs = tts.get(name, [[]])[ridx]
            return set(subjects[i] for i in idxs)
        train_subj = idx_to_set('train')
        val_subj = idx_to_set('val')
        test_subj = idx_to_set('test')
        for subj, gks in subj2groups.items():
            target = 'train'
            if subj in test_subj:
                target = 'test'
            elif subj in val_subj:
                target = 'val'
            for gk in gks:
                splits[target].extend(group_map[gk])
    else:
        for subj, gks in subj2groups.items():
            groups_list = list(gks)
            if len(groups_list) == 0:
                continue
            data = [None] * len(groups_list)
            if label_type.startswith('emotion'):
                labels = [[group_map[gk][0][label_idx]] for gk in groups_list]
            else:
                labels = [[subj] for _ in groups_list]
            tts = get_split_index(data, labels, setting)
            apply_split(groups_list, tts)

    split_base = os.path.join(save_base, getattr(args, 'experiment_mode', 'subject-dependent'), getattr(args, 'split_type', 'train-val-test'), f"round_{ri}")
    ensure_dir(split_base)
    for name in ['train', 'val', 'test']:
        out_csv = os.path.join(split_base, f"{name}.csv")
        with open(out_csv, 'w', newline='', encoding='utf-8') as f:
            w = csv.writer(f)
            if header is None:
                header = ['path', 'subject', 'group', 'label']
            w.writerow(header)
            for row in splits[name]:
                w.writerow(row)
    print(f"splits written to: {split_base} | train={len(splits['train'])}, val={len(splits['val'])}, test={len(splits['test'])}")

def main():
    cfg_path = os.path.join(os.path.dirname(__file__), 'configs', 'video_face_config.json')
    if not os.path.isfile(cfg_path):
        raise FileNotFoundError(cfg_path)
    with open(cfg_path, 'r', encoding='utf-8') as f:
        cfg = json.load(f)
    args = types.SimpleNamespace(**cfg)
    try:
        random.seed(getattr(args, 'seed', 2024))
        np.random.seed(getattr(args, 'seed', 2024))
    except Exception:
        pass
    if cv2 is None:
        raise RuntimeError('需要安装 opencv-python')
    exts = [e.lower() for e in getattr(args, 'video_exts', ['.mp4', '.avi', '.mkv', '.mov'])]
    save_base = os.path.join(args.output_dir, args.dataset)
    mode = str(getattr(args, 'mode', 'images')).lower()
    print(f"[start] cfg={cfg_path} dataset={args.dataset} mode={mode} video_root={args.video_root} save_base={save_base}", flush=True)
    if mode.startswith('au') or mode in ['openface', 'pyfeat']:
        rep = str(getattr(args, 'au_representation', 'stats')).lower()
        if rep in ['sequence', 'seq', 'ts']:
            au_rows = []
            group_map = defaultdict(list)
            label_cache = {}
            group_label_map = {}
            openface_out_root = getattr(args, 'openface_output_root', None)
            if not openface_out_root:
                openface_out_root = os.path.join(save_base, 'openface_out')
            header = build_au_sequence_header(args)
            _, aus = build_au_header(args)
            au_save_root = os.path.join(save_base, getattr(args, 'au_output_subdir', 'au_seq'))
            ensure_dir(au_save_root)
            for vp in list_videos(args.video_root, exts):
                subj = parse_subject(vp, getattr(args, 'subject_regex', r's(\d{2})'))
                if subj is None:
                    subj = 'unknown'
                _ = process_video_au_sequence(vp, subj, args, au_rows, group_map, label_cache, group_label_map, openface_out_root, au_save_root, header, aus)
            out_csv = os.path.join(save_base, getattr(args, 'output_au_csv_name', 'au_seq_index.csv'))
            ensure_dir(os.path.dirname(out_csv))
            with open(out_csv, 'w', newline='', encoding='utf-8') as fcsv:
                w = csv.writer(fcsv)
                w.writerow(header)
                for row in au_rows:
                    w.writerow(row)
            split_and_save(au_rows, group_map, args, save_base, header=header)
        else:
            au_rows = []
            group_map = defaultdict(list)
            label_cache = {}
            group_label_map = {}
            openface_out_root = getattr(args, 'openface_output_root', None)
            if not openface_out_root:
                openface_out_root = os.path.join(save_base, 'openface_out')
            header, aus = build_au_header(args)
            for vp in list_videos(args.video_root, exts):
                subj = parse_subject(vp, getattr(args, 'subject_regex', r's(\d{2})'))
                if subj is None:
                    subj = 'unknown'
                _ = process_video_au(vp, subj, args, au_rows, group_map, label_cache, group_label_map, openface_out_root, header, aus)
            out_csv = os.path.join(save_base, getattr(args, 'output_au_csv_name', 'au_segments.csv'))
            ensure_dir(os.path.dirname(out_csv))
            with open(out_csv, 'w', newline='', encoding='utf-8') as fcsv:
                w = csv.writer(fcsv)
                w.writerow(header)
                for row in au_rows:
                    w.writerow(row)
            split_and_save(au_rows, group_map, args, save_base, header=header)
    else:
        img_root = os.path.join(save_base, 'images')
        ensure_dir(img_root)
        index_csv = os.path.join(save_base, getattr(args, 'index_csv_name', 'index.csv'))
        fd = str(getattr(args, 'face_detector', 'none')).lower()
        mp_detector = None
        if fd == 'mediapipe':
            if mp is None:
                raise RuntimeError('需要安装 mediapipe')
            mp_detector = init_mediapipe_detector(args)
        print_progress = bool(getattr(args, 'print_progress', True))
        max_videos = int(getattr(args, 'max_videos', 0) or 0)
        stop_after_n_images = int(getattr(args, 'stop_after_n_images', 0) or 0)
        stop_after_seconds = float(getattr(args, 'stop_after_seconds', 0) or 0)
        stop_flag_path = str(getattr(args, 'stop_flag_path', '') or os.path.join(save_base, 'STOP'))
        if print_progress:
            print(f"[info] face_detector={fd} images_dir={img_root} index_csv={index_csv}", flush=True)
            print(f"[info] limits: max_videos={max_videos} stop_after_n_images={stop_after_n_images} stop_after_seconds={stop_after_seconds} stop_flag={stop_flag_path}", flush=True)
        total = [0]
        meta_rows = []
        group_map = defaultdict(list)
        label_cache = {}
        group_label_map = {}
        ensure_dir(os.path.dirname(index_csv))
        with open(index_csv, 'w', newline='', encoding='utf-8') as fcsv:
            writer = csv.writer(fcsv)
            writer.writerow(['path', 'subject', 'group', 'label'])
            start_t = time.time()
            processed_videos = 0
            for vp in list_videos(args.video_root, exts):
                if print_progress:
                    print(f"[video] start {vp}", flush=True)
                subj = parse_subject(vp, getattr(args, 'subject_regex', r's(\d{2})'))
                if subj is None:
                    subj = 'unknown'
                saved = process_video(vp, subj, args, mp_detector, img_root, writer, total, meta_rows, group_map, label_cache, group_label_map)
                processed_videos += 1
                if print_progress:
                    print(f"[video] done {vp} saved={saved} total={total[0]}", flush=True)
                if max_videos > 0 and processed_videos >= max_videos:
                    if print_progress:
                        print("[stop] reach max_videos", flush=True)
                    break
                if stop_after_n_images > 0 and total[0] >= stop_after_n_images:
                    if print_progress:
                        print("[stop] reach stop_after_n_images", flush=True)
                    break
                if stop_after_seconds > 0 and (time.time() - start_t) >= stop_after_seconds:
                    if print_progress:
                        print("[stop] reach stop_after_seconds", flush=True)
                    break
                if stop_flag_path and os.path.isfile(stop_flag_path):
                    if print_progress:
                        print("[stop] stop_flag detected", flush=True)
                    break
        if mp_detector is not None and hasattr(mp_detector, 'close'):
            try:
                mp_detector.close()
            except Exception:
                pass
        print(f"saved_images={total[0]}", flush=True)
    if mode.startswith('au') or mode in ['openface', 'pyfeat']:
        pass
    else:
        split_and_save(meta_rows, group_map, args, save_base)


def load_deap_subject_labels(deap_root, subject):
    if pickle is None:
        return None
    sid = subject.lower().replace('s', '')
    try:
        num = int(sid)
    except Exception:
        return None
    fn = f"s{num:02d}.dat"
    path = os.path.join(deap_root, fn)
    if not os.path.isfile(path):
        return None
    try:
        with open(path, 'rb') as f:
            obj = pickle.load(f, encoding='latin1')
        if isinstance(obj, dict) and 'labels' in obj:
            return np.array(obj['labels'])
        return None
    except Exception:
        return None


def compute_emotion_label_for_group(subject, group_id, args, label_cache):
    deap_root = getattr(args, 'deap_label_path', None)
    if not deap_root or not os.path.isdir(deap_root):
        return None
    m = re.search(r"trial(\d+)", group_id, re.IGNORECASE)
    if not m:
        return None
    tnum = int(m.group(1))
    # DEAP trial 索引 1..40 -> 数组索引 0..39
    tidx = max(1, min(40, tnum)) - 1
    if subject not in label_cache:
        arr = load_deap_subject_labels(deap_root, subject)
        if arr is None or arr.shape[0] < tidx + 1:
            return None
        label_cache[subject] = arr
    arr = label_cache[subject]
    ltype = str(getattr(args, 'label_type', 'emotion')).lower()
    if ltype == 'emotion_va4':
        v = float(arr[tidx, 0])
        a = float(arr[tidx, 1])
        tv = float(getattr(args, 'emotion_threshold_valence', getattr(args, 'emotion_threshold', 5.0)))
        ta = float(getattr(args, 'emotion_threshold_arousal', getattr(args, 'emotion_threshold', 5.0)))
        vc = 1 if v >= tv else 0
        ac = 1 if a >= ta else 0
        return f"va_{vc}{ac}"
    else:
        emo_name = str(getattr(args, 'emotion_name', 'valence')).lower()
        col_map = {'valence': 0, 'arousal': 1, 'dominance': 2, 'liking': 3}
        col = col_map.get(emo_name, 0)
        val = float(arr[tidx, col])
        thr = float(getattr(args, 'emotion_threshold', 5.0))
        cls = 1 if val >= thr else 0
        return f"{emo_name}_{cls}"


if __name__ == '__main__':
    main()
