import os
import re
import csv
from typing import List, Dict, Tuple
from PIL import Image
import numpy as np
import torch
from torch.utils.data import Dataset
import torchvision.transforms as T


def _read_csv_rows(csv_path: str) -> List[Dict[str, str]]:
    rows = []
    with open(csv_path, 'r', encoding='utf-8') as f:
        reader = csv.reader(f)
        header = next(reader)
        for r in reader:
            obj = {header[i]: r[i] for i in range(len(header))}
            rows.append(obj)
    return rows


def _parse_seg_index_from_name(name: str) -> int:
    m = re.search(r"seg(\d+)", name, re.IGNORECASE)
    if not m:
        return -1
    try:
        return int(m.group(1))
    except Exception:
        return -1


class VideoMAEClipDataset(Dataset):
    def __init__(self, csv_path: str, root_dir: str, num_frames: int = 16, image_size: int = 224, is_train: bool = True, label2idx: Dict[str, int] = None, label_remap: Dict[str, int] = None, drop_unmapped: bool = False):
        self.root_dir = root_dir
        self.num_frames = int(num_frames)
        self.image_size = int(image_size)
        self.is_train = bool(is_train)
        self.label_remap = {str(k): str(v) for k, v in label_remap.items()} if label_remap else None
        self.drop_unmapped = bool(drop_unmapped)
        rows = _read_csv_rows(csv_path)
        groups: Dict[Tuple[str, str, int], Dict] = {}
        for r in rows:
            rel = r['path']
            label = str(r['label']).strip()
            try:
                _fv = float(label)
                if abs(_fv - int(_fv)) < 1e-6:
                    label = str(int(_fv))
            except Exception:
                pass
            if self.label_remap is not None:
                if label in self.label_remap:
                    label = self.label_remap[label]
                elif self.drop_unmapped:
                    continue
            subject = r.get('subject', '')
            group = r.get('group', '')
            base = os.path.basename(rel)
            seg_idx = _parse_seg_index_from_name(base)
            key = (subject, group, seg_idx)
            if key not in groups:
                groups[key] = {'frames': [], 'label': label, 'subject': subject, 'group': group, 'seg_idx': seg_idx}
            groups[key]['frames'].append(rel)
        clips = []
        for k, v in groups.items():
            v['frames'].sort()
            clips.append(v)
        self.clips = clips
        if label2idx is None:
            labels = sorted(list({c['label'] for c in self.clips}))
            self.label2idx = {l: i for i, l in enumerate(labels)}
        else:
            self.label2idx = dict(label2idx)
        self.idx2label = {i: l for l, i in self.label2idx.items()}
        self.transform = T.Compose([
            T.Resize((self.image_size, self.image_size)),
            T.ToTensor(),
            T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
        ])

    def __len__(self):
        return len(self.clips)

    def _sample_indices(self, n: int, k: int) -> List[int]:
        if n <= 0 or k <= 0:
            return []
        if n >= k:
            arr = np.linspace(0, n - 1, num=k)
            return np.unique(arr.astype(np.int64)).tolist()
        idx = np.arange(n)
        if self.is_train:
            rep = np.random.choice(idx, size=k, replace=True)
        else:
            rep = np.pad(idx, (0, k - n), mode='edge')
        return rep.astype(np.int64).tolist()

    def __getitem__(self, idx):
        info = self.clips[idx]
        frames = info['frames']
        n = len(frames)
        sel = self._sample_indices(n, self.num_frames)
        imgs = []
        for i in sel:
            p = os.path.join(self.root_dir, frames[int(i)])
            with Image.open(p) as im:
                im = im.convert('RGB')
                imgs.append(self.transform(im))
        x = torch.stack(imgs, dim=0)
        y = torch.tensor(self.label2idx[info['label']], dtype=torch.long)
        return x, y
