# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import datasets
from datasets import Dataset
import zipfile
import uuid
from tqdm import tqdm
from pathlib import Path
from typing import Dict, List, Tuple, Any
from huggingface_hub import hf_hub_download


def download_hf_files(repo_id: str, out_dir: Path) -> Tuple[Path, Path]:
    """
    Download the VisDial Parquet file and images.zip from a Hugging Face repo, if not already present locally.
    Returns (parquet_path, zip_path).
    """
    # Ensure output directory exists
    out_dir.mkdir(parents=True, exist_ok=True)

    # 1) Parquet split
    parquet_dest = out_dir / "ImageNet-1K" / "test-00000-of-00001.parquet"
    if not parquet_dest.exists():
        print(f"Downloading Parquet to {parquet_dest} …")
        hf_hub_download(
            repo_id=repo_id,
            repo_type="dataset",
            filename="ImageNet-1K/test-00000-of-00001.parquet",
            local_dir=str(out_dir),
            local_dir_use_symlinks=False,
        )
    else:
        print(f"Parquet already exists at {parquet_dest}")

    # 2) images.zip
    zip_dest = out_dir / "images.zip"
    if not zip_dest.exists():
        print(f"Downloading images.zip to {zip_dest} …")
        hf_hub_download(
            repo_id=repo_id,
            repo_type="dataset",
            filename="images.zip",
            local_dir=str(out_dir),
            local_dir_use_symlinks=False,
        )
    else:
        print(f"images.zip already exists at {zip_dest}")

    return parquet_dest, zip_dest


class MMEBImageZip:
    """
    Utility class to read raw image bytes from a ZIP archive.
    """

    def __init__(self, zip_path: Path):
        self.zip_file = zipfile.ZipFile(zip_path, "r")

    def extract(self, file_path: str) -> bytes:
        """
        :param file_path: the internal path inside the ZIP to extract
        :returns: raw bytes of the file
        """
        with self.zip_file.open(file_path) as f:
            return f.read()


def load_parquet_dataset(parquet_path: Path, split: str = "train") -> Dataset:
    """
    Load a Parquet dataset from disk with a given split.
    """
    return datasets.load_dataset("parquet", data_files=str(parquet_path), split=split)


def build_target_records(
    ds: Dataset,
    image_zip: MMEBImageZip,
) -> List[Dict[str, Any]]:
    """
    Build the list of target records in one of two formats:
      - Text only: {"id", "text"}
      - Text + image: {"id", "text", "image_path", "frames": [bytes, ...]}
    """
    seen: Dict[Tuple[str, str], str] = {}
    records: List[Dict[str, Any]] = []

    for ex in tqdm(ds, desc="Building targets"):
        for text, img_path in zip(ex["tgt_text"], ex["tgt_img_path"]):
            key = (text, img_path)
            if key in seen:
                continue

            record_id = uuid.uuid4().hex[:15]
            seen[key] = record_id

            # Multi-modal check: empty path → treat as text-only
            if img_path:
                try:
                    img_bytes = image_zip.extract(img_path)
                    rec = {
                        "id": record_id,
                        "text": text,
                        "image_path": img_path,
                        "frames": [img_bytes],
                    }
                except KeyError:
                    # If the file is missing in the ZIP, fall back to text-only
                    rec = {"id": record_id, "text": text}
            else:
                rec = {"id": record_id, "text": text}

            records.append(rec)

    return records


def build_query_records(
    ds: Dataset,
    image_zip: MMEBImageZip,
) -> List[Dict[str, Any]]:
    """
    Similarly build query records in two possible formats:
      - Text only: {"id", "text"}
      - Text + image: {"id", "text", "image_path", "frames": [bytes]}
    Note: Any KeyError or empty path downgrades to text-only.
    """
    seen: Dict[Tuple[str, str], str] = {}
    records: List[Dict[str, Any]] = []

    for ex in tqdm(ds, desc="Building queries"):
        text = ex["qry_text"]
        img_path = ex.get("qry_img_path", "")  # Default to empty if field is missing

        key = (text, img_path)
        if key in seen:
            continue

        query_id = uuid.uuid4().hex[:15]
        seen[key] = query_id

        if img_path:
            try:
                img_bytes = image_zip.extract(img_path)
                rec = {
                    "id": query_id,
                    "text": text,
                    "image_path": img_path,
                    "frames": [img_bytes],
                }
            except KeyError:
                rec = {"id": query_id, "text": text}
        else:
            rec = {"id": query_id, "text": text}

        records.append(rec)

    return records


def build_ground_truth_records(
    ds: Dataset,
    query_index: Dict[Tuple[str, str], str],
    target_index: Dict[Tuple[str, str], str],
) -> List[Dict[str, Any]]:
    """
    Build the ground-truth mapping records.
    Use (text, img_path) as the key to look up IDs.
    Skip any entries without a matching target.
    """
    records: List[Dict[str, Any]] = []

    for ex in tqdm(ds, desc="Building ground truth"):
        q_text = ex["qry_text"]
        q_img = ex.get("qry_img_path", "")
        q_key = (q_text, q_img)
        qid = query_index.get(q_key)
        if not qid:
            # Skip if this query was never built
            continue

        tids: List[str] = []
        for t, p in zip(ex["tgt_text"], ex["tgt_img_path"]):
            key = (t, p)
            tid = target_index.get(key)
            if tid:
                tids.append(tid)
            # Otherwise skip missing targets

        if not tids:
            continue

        records.append({
            "query_id": qid,
            "target_id": tids,
        })

    return records


def save_parquet(data: List[Dict], path: Path) -> None:
    """
    Save a list of dict records to a Parquet file on disk.
    """
    ds = Dataset.from_list(data)
    ds.to_parquet(str(path))


def main():
    # ─── Hugging Face download setup ────────────────────────
    hf_repo = "TIGER-Lab/MMEB-eval"
    hf_cache_dir = Path(__file__).parent / "hf_cache"
    src_parquet, zip_path = download_hf_files(hf_repo, hf_cache_dir)

    # ─── Load dataset split ─────────────────────────────────
    ds = datasets.load_dataset("parquet",
                               data_files=str(src_parquet),
                               split="train")
    print(f"Dataset loaded: {ds}")

    # ─── Prepare output directories ─────────────────────────
    out_dir = Path(__file__).parent / "ImageNet-1K"
    query_dir = out_dir / "query"
    target_dir = out_dir / "target"
    gt_dir = out_dir / "ground_truth"
    for d in (query_dir, target_dir, gt_dir):
        d.mkdir(parents=True, exist_ok=True)

    # ─── Initialize image ZIP reader ───────────────────────
    image_zip = MMEBImageZip(zip_path)

    # ─── Build records ─────────────────────────────────────
    target_records = build_target_records(ds, image_zip)
    query_records = build_query_records(ds, image_zip)

    # ─── Build lookup indices ──────────────────────────────
    target_index = {
        (r["text"], r.get("image_path", "")): r["id"]
        for r in target_records
    }
    query_index = {
        (r["text"], r.get("image_path", "")): r["id"]
        for r in query_records
    }

    # ─── Build ground truth ─────────────────────────────────
    gt_records = build_ground_truth_records(ds, query_index, target_index)

    # ─── Save to Parquet files ─────────────────────────────
    save_parquet(query_records,  query_dir / "query.parquet")
    save_parquet(target_records, target_dir / "target.parquet")
    save_parquet(gt_records,     gt_dir / "ground_truth.parquet")

    print(f"Saved {len(query_records)} queries, "
          f"{len(target_records)} targets, "
          f"{len(gt_records)} ground-truth entries.")


if __name__ == "__main__":
    main()
