"""
Non-invasive attention & point extraction script for VGGt

Features:
- Loads a VGGt model checkpoint (handles DDP prefix if needed).
- Registers forward hooks on selected attention modules to capture attention matrices.
- Runs inference over a dataset (uses training data instantiation via Hydra if available, or a provided dataset class).
- Writes per-sample attention matrices and point-cloud outputs to HDF5.

Usage examples (see README for details):
python tools/extract_attention.py \
    --ckpt /path/to/checkpoint.pt \
    --out_dir /path/to/attn_db \
    --layers 2 5 8 11 \
    --batch_size 1 \
    --head_avg True


python tools/extract_attention.py \
  --ckpt /data1/lqf/vggt/model.pt \
  --out_dir /data1/lqf/vggt/data/h5_file \
  --links_json /data1/lqf/co3d/co3d/links.json \
  --CO3D_ANNOT_DIR /data1/lqf/co3d_anno \
  --layers 2 5 8 11 \
  --max_samples 2 \
  --work_dir /data1/lqf/vggt/data/tmp_co3d


python tools/extract_attention.py \
  --ckpt /data1/lqf/vggt/model.pt \
  --out_dir /data1/lqf/vggt/data/h5_file \
  --links_json /data1/lqf/co3d/co3d/links.json \
  --CO3D_ANNOT_DIR /data1/lqf/co3d_anno \
  --work_dir /data1/lqf/vggt/data/tmp_co3d \
  --category parkingmeter \
  --max_samples 1 \
  --batch_size 1 \
  --num_workers 2 \
  --single_h5 \
  --head_avg \
  --layers 6 10 14 18

Notes:
- You may need to update `build_dataset()` to match your dataset factory or import the dataset class.
- The script is intentionally conservative: it detaches tensors to CPU ASAP and writes per-sample groups in HDF5.

"""

import os
import re
import argparse
import json
import h5py
import time
from pathlib import Path
from typing import List, Dict, Any, Optional
import tempfile
import shutil
import zipfile
import requests

import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
import numpy as np

import sys
sys.path.insert(0, str(Path(__file__).resolve().parent.parent))
# Add training dir so imports like `from data.dataset_util import ...` succeed
sys.path.insert(0, str(Path(__file__).resolve().parent.parent / 'training'))

# Attempt to import model and dataset constructors from repo. Adjust if your paths differ.
try:
    from vggt.models import vggt as vggt_module
except Exception:
    vggt_module = None

# Helper: strip DDP prefix

def clean_state_dict(sd: Dict[str, Any]) -> Dict[str, Any]:
    new_sd = {}
    for k, v in sd.items():
        if k.startswith('module.'):
            new_sd[k[len('module.'):]] = v
        else:
            new_sd[k] = v
    return new_sd


# Placeholder dataset builder. EDIT this to return the dataset used in training.
# The function should return an object supporting: len(), __getitem__(idx) returning a dict, and
# optionally a get_loader(batch_size, num_workers, shuffle=False) method.
# For many hydra-based projects you can instantiate datasets via config; adjust as needed.
from training.data.datasets.co3d import Co3dDataset


class SimpleCommonConf:
    debug = False
    training = False
    get_nearby = True
    load_depth = True
    inside_random = False
    allow_duplicate_img = True


def build_dataset(split: str = 'train', CO3D_DIR: str = None, CO3D_ANNOTATION_DIR: str = None):
    """
    Build a Co3dDataset pointing at the provided CO3D_DIR and annotation dir.
    This allows running on a temporary, per-category extracted CO3D_DIR.
    """
    common_conf = SimpleCommonConf()
    return Co3dDataset(
        common_conf=common_conf,
        split=split,
        CO3D_DIR=CO3D_DIR,
        CO3D_ANNOTATION_DIR=CO3D_ANNOTATION_DIR,
    )


def load_links_json(path: str) -> Dict[str, Any]:
    with open(path, 'r') as f:
        return json.load(f)


def download_file(url: str, target_path: Path, chunk_size: int = 1024 * 1024):
    # stream download with progress
    resp = requests.get(url, stream=True)
    resp.raise_for_status()
    total = int(resp.headers.get('content-length', 0))
    with open(target_path, 'wb') as f:
        with tqdm(total=total, unit='B', unit_scale=True, desc=target_path.name) as pbar:
            for chunk in resp.iter_content(chunk_size=chunk_size):
                if chunk:
                    f.write(chunk)
                    pbar.update(len(chunk))


def download_and_extract_category(urls: List[str], download_dir: Path, extract_dir: Path, keep_zips: bool = False):
    """
    Download a list of zip urls into download_dir and extract all into extract_dir.
    Returns list of downloaded zip paths.
    """
    download_dir.mkdir(parents=True, exist_ok=True)
    extract_dir.mkdir(parents=True, exist_ok=True)
    downloaded = []
    for url in urls:
        filename = url.split('/')[-1]
        target = download_dir / filename
        if not target.exists():
            print('Downloading', url)
            download_file(url, target)
        else:
            print('Using cached', target)
        downloaded.append(target)
        # extract
        try:
            with zipfile.ZipFile(str(target), 'r') as zf:
                print('Extracting', target, '->', extract_dir)
                zf.extractall(str(extract_dir))
        except zipfile.BadZipFile:
            print('Bad zip file:', target)
    if not keep_zips:
        for p in downloaded:
            try:
                p.unlink()
            except Exception:
                pass
    return downloaded

class HookCollector:
    def __init__(self):
        # storage per-sample is created in the main loop
        self.storage = {}

    def make_hook(self, layer_key: str):
        def hook(module, input, output):
            # Try to extract attn from module attribute first
            attn = getattr(module, 'last_attn', None)
            if attn is None:
                # If attention.forward returns (out, attn) style, try to parse
                try:
                    # many implementations return attention as a tuple element
                    if isinstance(output, tuple) and len(output) > 1:
                        cand = output[1]
                        # cand might be a tensor or list
                        if torch.is_tensor(cand):
                            attn = cand
                except Exception:
                    attn = None

            if attn is not None:
                # Move to CPU numpy ASAP
                self.storage[layer_key] = attn.detach().cpu()

        return hook


def find_attention_modules(model: torch.nn.Module, pattern: str = 'attention'):
    """Find modules whose name or class includes pattern. Returns list of (name, module)"""
    results = []
    for name, m in model.named_modules():
        if pattern in name.lower() or pattern in m.__class__.__name__.lower():
            results.append((name, m))
    return results


def save_sample_to_h5(h5f: h5py.File, group_name: str, attn_dict: Dict[str, torch.Tensor], points: torch.Tensor, metadata: Dict[str, Any] = None, head_avg: bool = False):
    grp = h5f.create_group(group_name)
    if metadata:
        for k, v in metadata.items():
            try:
                grp.attrs[k] = json.dumps(v)
            except Exception:
                try:
                    grp.attrs[k] = str(v)
                except Exception:
                    pass

    # points: (num_points, 3) or (B, num_points, 3)
    if points is not None:
        pts = points.detach().cpu().numpy()
        grp.create_dataset('P', data=pts, compression='gzip', compression_opts=4)

    atn_grp = grp.create_group('attention')
    for layer_name, tensor in attn_dict.items():
        # tensor expected shape: (B, heads, N, N) or (heads, N, N) or (N, N)
        arr = tensor.numpy()
        if head_avg and arr.ndim == 4:
            # average over heads: (B, heads, N, N) -> (B, N, N)
            arr = arr.mean(axis=1)
        # write
        dset_name = re.sub(r'[^0-9a-zA-Z_]+', '_', layer_name)
        atn_grp.create_dataset(dset_name, data=arr, compression='gzip', compression_opts=4)


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--ckpt', type=str, required=True, help='Path to model checkpoint (.pt)')
    parser.add_argument('--out_dir', type=str, required=True, help='Output directory (h5 files will be placed here)')
    parser.add_argument('--layers', type=int, nargs='+', default=None, help='Layer indices to select from discovered attention modules')
    parser.add_argument('--layer_pattern', type=str, default='attention', help='Pattern to find attention modules by name or class')
    parser.add_argument('--batch_size', type=int, default=1)
    parser.add_argument('--num_workers', type=int, default=4)
    parser.add_argument('--device', type=str, default='cuda')
    parser.add_argument('--head_avg', action='store_true', help='Save head-averaged attention instead of per-head')
    parser.add_argument('--single_h5', action='store_true', help='Write all samples into a single HDF5 (default: one file per sample)')
    parser.add_argument('--prefix', type=str, default='sample', help='Prefix for sample group names or files')
    parser.add_argument('--max_samples', type=int, default=None, help='Max number of samples to process (for testing)')
    # args required for per-category download/run
    parser.add_argument('--links_json', type=str, default=None, help='Path to links.json with per-category download URLs')
    parser.add_argument('--CO3D_ANNOT_DIR', type=str, default=None, help='Path to CO3D annotation directory (used to build dataset)')
    parser.add_argument('--category', type=str, default=None, help='If set, only process this single category from links_json')
    parser.add_argument('--work_dir', type=str, default=None, help='Temporary work dir for downloads/extraction (default: tmpdir)')
    args = parser.parse_args()

    out_dir = Path(args.out_dir)
    out_dir.mkdir(parents=True, exist_ok=True)

    device = torch.device(args.device if torch.cuda.is_available() else 'cpu')

    # load links json if provided
    links = None
    if args.links_json:
        links = load_links_json(args.links_json).get('full', {})

    work_dir = Path(args.work_dir) if args.work_dir else Path(tempfile.mkdtemp(prefix='co3d_work_'))
    work_dir.mkdir(parents=True, exist_ok=True)

    # Helper: process a single category (download, extract, run)
    def process_category(cat_name: str, urls: List[str]):
        print('\nProcessing category', cat_name)
        cat_download_dir = work_dir / f'dl_{cat_name}'
        cat_extract_dir = work_dir / f'ex_{cat_name}'
        # download & extract
        try:
            download_and_extract_category(urls, cat_download_dir, cat_extract_dir)
        except Exception as e:
            print('Download/extract failed for', cat_name, 'error:', e)
            return 0

        # find annotation file for this category in CO3D_ANNOT dir
        # Expect user to provide CO3D_ANNOT dir that contains <cat>_train.jgz etc.
        annot_dir = Path(args.CO3D_ANNOT_DIR)
        if not annot_dir.exists():
            print('Annotation dir not found:', annot_dir)
            return 0

        # Build dataset pointing to the extracted images and the annotations
        # CO3D dataset expects directory structure similar to official CO3D.
        co3d_dir = str(cat_extract_dir)
        dataset = build_dataset(split='train', CO3D_DIR=co3d_dir, CO3D_ANNOTATION_DIR=str(annot_dir))
        loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)

        # run existing model loading and hook registration for this category
        # Load model
        if vggt_module is None:
            print('Warning: automatic import of vggt model module failed. Please edit this script to import your model class explicitly.')
            # Try dynamic import: assume model class at vggt.models.vggt.VGGT
            try:
                from vggt.models.vggt import VGGT as ModelClz
            except Exception:
                ModelClz = None
        else:
            # Try to locate a likely model class
            ModelClz = None
            try:
                ModelClz = getattr(vggt_module, 'VGGT')
            except Exception:
                # fallback: search module for classes
                for attr in dir(vggt_module):
                    val = getattr(vggt_module, attr)
                    if isinstance(val, type) and issubclass(val, torch.nn.Module):
                        ModelClz = val
                        break

        if ModelClz is None:
            print('Could not find model class automatically. Skipping category', cat_name)
            return 0

        model = ModelClz()
        ckpt = torch.load(args.ckpt, map_location='cpu')
        state = ckpt.get('model', ckpt)
        state = clean_state_dict(state)
        model.load_state_dict(state, strict=False)
        model.to(device)
        model.eval()

        # find attention modules
        attn_modules = find_attention_modules(model, pattern=args.layer_pattern)
        if len(attn_modules) == 0:
            print('No attention modules found with pattern', args.layer_pattern)
            return 0

        # choose layers
        if args.layers is not None:
            sel = []
            for i in args.layers:
                if i < 0 or i >= len(attn_modules):
                    raise ValueError(f'Layer index {i} out of range (0..{len(attn_modules)-1})')
                sel.append(attn_modules[i])
            attn_modules = sel

        print('Selected attention modules:')
        for name, m in attn_modules:
            print(' -', name, m.__class__.__name__)

        # register hooks
        collector = HookCollector()
        handles = []
        for name, m in attn_modules:
            h = m.register_forward_hook(collector.make_hook(name))
            handles.append(h)

        # Prepare HDF5 single file if needed
        single_h5f = None
        if args.single_h5:
            h5_path = out_dir / f'attention_db_{cat_name}.h5'
            single_h5f = h5py.File(h5_path, 'w')

        sample_idx = 0
        pbar = tqdm(total=len(loader) if args.max_samples is None else min(len(loader), args.max_samples))
        for batch in loader:
            if args.max_samples is not None and sample_idx >= args.max_samples:
                break

            # Expect batch to be a mapping; adjust for your dataset
            # Move images to device only
            if 'images' in batch:
                # batch['images'] is list of images per sample; Co3dDataset returns numpy arrays inside list
                # convert to tensor if needed
                try:
                    batch_images = torch.stack([torch.from_numpy(img) if isinstance(img, np.ndarray) else img for img in batch['images']], dim=0).to(device)
                except Exception:
                    # fallback: assume batch_images already tensor
                    batch_images = batch['images'].to(device)
            else:
                key_candidates = [k for k in batch.keys() if 'image' in k]
                if len(key_candidates) == 0:
                    raise RuntimeError('Dataset batch does not contain images key; please adapt build_dataset or script')
                batch_images = batch[key_candidates[0]].to(device)

            with torch.no_grad():
                outputs = model(images=batch_images)

            # 优先使用数据集 batch 中提供的点云（如果存在），否则回退到 model outputs
            points = None
            for pkey in ['world_points', 'cam_points']:
                if pkey in batch and batch[pkey] is not None:
                    pts = batch[pkey]
                    try:
                        if isinstance(pts, list):
                            converted = []
                            for el in pts:
                                if torch.is_tensor(el):
                                    converted.append(el)
                                elif isinstance(el, np.ndarray):
                                    converted.append(torch.from_numpy(el))
                                else:
                                    converted.append(torch.tensor(el))
                            points = torch.stack(converted, dim=0)
                        elif isinstance(pts, np.ndarray):
                            points = torch.from_numpy(pts)
                        elif torch.is_tensor(pts):
                            points = pts
                    except Exception:
                        points = None
                    break

            if points is None:
                if isinstance(outputs, dict):
                    for k in ['pred_points', 'points', 'cam_points', 'world_points']:
                        if k in outputs:
                            points = outputs[k]
                            break
                else:
                    try:
                        if hasattr(outputs, 'pred_points'):
                            points = outputs.pred_points
                    except Exception:
                        points = None

            B = batch_images.shape[0]
            for b in range(B):
                group_name = f"{args.prefix}_{cat_name}_{sample_idx:08d}"
                if args.single_h5:
                    target_h5 = single_h5f
                else:
                    file_path = out_dir / f"{group_name}.h5"
                    target_h5 = h5py.File(file_path, 'w')

                per_sample_attn = {}
                for layer_name, tensor in collector.storage.items():
                    t = tensor
                    if t.ndim == 4:
                        s = t[b]
                    else:
                        s = t
                    per_sample_attn[layer_name] = s

                per_sample_points = None
                if points is not None and torch.is_tensor(points):
                    if points.ndim == 3:
                        per_sample_points = points[b]
                    else:
                        per_sample_points = points

                meta = {
                    'sample_index': int(sample_idx),
                    'category': cat_name,
                }
                save_sample_to_h5(target_h5, group_name, per_sample_attn, per_sample_points, metadata=meta, head_avg=args.head_avg)

                if not args.single_h5:
                    target_h5.close()

                sample_idx += 1

            collector.storage.clear()
            pbar.update(1)

        if single_h5f is not None:
            single_h5f.close()

        for h in handles:
            h.remove()

        print('Done category', cat_name, 'wrote', sample_idx, 'samples to', out_dir)

        # cleanup extracted files to free disk
        try:
            shutil.rmtree(str(cat_extract_dir))
            shutil.rmtree(str(cat_download_dir))
        except Exception as e:
            print('Cleanup error:', e)

        return sample_idx

    # If links.json provided, iterate categories and process
    total_samples = 0
    if links is not None:
        cats = list(links.keys())
        if args.category is not None:
            cats = [args.category] if args.category in links else []
        for cat in cats:
            urls = links.get(cat, [])
            if len(urls) == 0:
                print('No urls for category', cat)
                continue
            cnt = process_category(cat, urls)
            total_samples += cnt
        print('All categories processed. Total samples:', total_samples)
    else:
        print('No links.json provided; please supply --links_json to run per-category downloads.')


if __name__ == '__main__':
    main()
