{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "b46bef3f",
   "metadata": {},
   "source": [
    "# Mini Imagenet Generation\n",
    "\n",
    "This is one of a pair of notebooks used for generating an ImageNet-like dataset of training\n",
    "data using stable diffusion models. The difficulty of such artificial datasets can be\n",
    "easily tuned, and they are useful for debugging and testing deep learning applications.\n",
    "\n",
    "The first notebook uses Mistral-7B for taking class labels and generating descriptive prompts\n",
    "for image generation. The prompts are written out as shards to disk and shuffled. The process\n",
    "is parallelized using Ray.\n",
    "\n",
    "The second notebook uses Stable Diffustion to take descriptive prompts/image captions\n",
    "and renders them as image. This is a straightfowrard shard-to-shard transformation.\n",
    "\n",
    "Note that we are using explicit parallelization over shard files in the initial generation\n",
    "and the image generation, while we are using ray.data for the actual shuffling. That is\n",
    "because using explicit parallelization over shards makes it easier to restart jobs that have\n",
    "failed halfway through for some reason."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f8c7158d",
   "metadata": {},
   "outputs": [],
   "source": [
    "import itertools\n",
    "import random\n",
    "import uuid\n",
    "import os\n",
    "\n",
    "import torch\n",
    "import webdataset as wds\n",
    "from transformers import AutoModelForCausalLM, AutoTokenizer\n",
    "import ray\n",
    "\n",
    "from typing import List\n",
    "\n",
    "\n",
    "def take(n, iterable):\n",
    "    \"\"\"Return first n items of the iterable as a list\"\"\"\n",
    "    return list(itertools.islice(iterable, n))\n",
    "\n",
    "\n",
    "def get_gpu_memories():\n",
    "    memory = []\n",
    "    if torch.cuda.is_available():\n",
    "        for i in range(torch.cuda.device_count()):\n",
    "            memory.append(torch.cuda.get_device_properties(i).total_memory - torch.cuda.memory_allocated(i))\n",
    "    return memory\n",
    "\n",
    "\n",
    "def get_num_gpus():\n",
    "    cluster_resources = ray.cluster_resources()\n",
    "    return cluster_resources[\"GPU\"]\n",
    "\n",
    "\n",
    "def ray_get(future, timeout=0.1):\n",
    "    ready, not_ready = ray.wait([future], timeout=timeout)\n",
    "    if not_ready:\n",
    "        raise TimeoutError()\n",
    "    return ray.get(future)\n",
    "\n",
    "\n",
    "def is_ready(actor, timeout=0.1):\n",
    "    ready, not_ready = ray.wait([actor], timeout=timeout)\n",
    "    if not_ready:\n",
    "        return False\n",
    "    return True\n",
    "\n",
    "\n",
    "def select_or_delete(actors, predicate):\n",
    "    result = []\n",
    "    for actor in actors:\n",
    "        if predicate(actor):\n",
    "            result.append(actor)\n",
    "        else:\n",
    "            del actor\n",
    "    return result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "eea31df5",
   "metadata": {
    "tags": [
     "parameters"
    ]
   },
   "outputs": [],
   "source": [
    "# parameters\n",
    "\n",
    "# number of classes, must be 10, 100, or 1000\n",
    "nclasses = 10\n",
    "\n",
    "# number of images per shard\n",
    "nimages = 100\n",
    "\n",
    "# number of prompts generated at once per class\n",
    "ngenerated = 20\n",
    "\n",
    "# number of training shards\n",
    "nshards = 1281\n",
    "\n",
    "# number of validation shards\n",
    "nvalshards = 50\n",
    "\n",
    "# output directory\n",
    "odir = f\"./mini-imagenet-{nclasses}\"\n",
    "\n",
    "# output file prefix\n",
    "oprefix = f\"mi{nclasses}\"\n",
    "\n",
    "# number of actors to use, -1 for =number of GPUs\n",
    "nactors = -1\n",
    "\n",
    "# check that each actor has sufficient memory\n",
    "check_sufficient = True\n",
    "\n",
    "# seconds to wait for actors to start up\n",
    "actor_startup_wait = 10"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e436522f",
   "metadata": {},
   "outputs": [],
   "source": [
    "!echo \"odir=$odir\"\n",
    "!mkdir -p $odir"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d70e72fc",
   "metadata": {},
   "outputs": [],
   "source": [
    "if nclasses == 10:\n",
    "    imagenet_classes = \"dog cat car plane bird fish frog horse sheep truck\".split()\n",
    "elif nclasses == 100:\n",
    "    imagenet_classes = sorted(\n",
    "        list(\n",
    "            set(\n",
    "                \"\"\"\n",
    "    3d_printer aircraft_carrier airplane apple backpack banana baseball_bat\n",
    "    baseball_glove bat bear bed bench bird book bottle bowl broccoli cake camel car\n",
    "    carrot cat cell_phone chair clock cloud couch cup dining_table dog donut\n",
    "    elephant fire fish fork fox frisbee frog giraffe hair_drier handbag horse\n",
    "    hot_dog hydrant kangaroo keyboard kite knife lamp laptop lion meteor microwave\n",
    "    monitor monkey mouse mushroom octopus orange oven palm_tree panda parking_meter\n",
    "    pear pizza plane potted_plant refrigerator remote rocket sandwich scissors sheep\n",
    "    sink skateboard skis snowboard spoon sports_ball stop_sign street_sign suitcase\n",
    "    surfboard sweet_pepper table teddy_bear telephone tennis_racket tie tiger\n",
    "    toaster toilet toothbrush tree truck tv umbrella vase wine_glass zebra\n",
    "    \"\"\".split()\n",
    "            )\n",
    "        )\n",
    "    )\n",
    "elif nclasses == 1000:\n",
    "    imagenet_classes = open(\"imagenet1000.txt\").read().split()\n",
    "else:\n",
    "    raise ValueError(f\"invalid number of classes: {nclasses}, must be 10, 100, or 1000\")\n",
    "\n",
    "assert len(imagenet_classes) == nclasses"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "faeaa0a3",
   "metadata": {},
   "source": [
    "# Generation Classes\n",
    "\n",
    "We encapsulate the model and the generation in a low level and high level class. We can then instantiate those classes once per GPU and call them to generate the shards."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d7563fa9",
   "metadata": {
    "lines_to_next_cell": 2
   },
   "outputs": [],
   "source": [
    "class TextGenerationModel:\n",
    "    def __init__(\n",
    "        self,\n",
    "        model_name: str = \"mistralai/Mistral-7B-Instruct-v0.1\",\n",
    "        temperature: float = 2.0,\n",
    "        top_p: float = 0.9,\n",
    "        top_k: int = 10,\n",
    "        max_length: int = 96,\n",
    "        num_return_sequences: int = 10,\n",
    "    ):\n",
    "        \"\"\"\n",
    "        Initialize the text generation model.\n",
    "\n",
    "        Args:\n",
    "            model_name: The name of the pretrained model.\n",
    "            temperature: The temperature for the generation process.\n",
    "            top_p: The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling.\n",
    "            top_k: The number of highest probability vocabulary tokens to keep for top-k filtering.\n",
    "            max_length: The maximum length of the sequence to be generated.\n",
    "            num_return_sequences: The number of independently computed returned sequences for each element in the batch.\n",
    "        \"\"\"\n",
    "        # Load the tokenizer and model\n",
    "        self.tokenizer = AutoTokenizer.from_pretrained(\n",
    "            model_name,\n",
    "            padding_side=\"left\",\n",
    "        )\n",
    "        self.tokenizer.pad_token = self.tokenizer.eos_token\n",
    "        self.model = AutoModelForCausalLM.from_pretrained(model_name)\n",
    "\n",
    "        # Ensure the model is on GPU\n",
    "        self.model.to(\"cuda\").half()\n",
    "\n",
    "        # Set generation parameters\n",
    "        self.temperature = temperature\n",
    "        self.top_p = top_p\n",
    "        self.top_k = top_k\n",
    "        self.max_length = max_length\n",
    "        self.num_return_sequences = num_return_sequences\n",
    "\n",
    "    def generate_responses(self, texts: List[str]) -> List[str]:\n",
    "        \"\"\"\n",
    "        Generate responses for the given texts.\n",
    "\n",
    "        Args:\n",
    "            texts: A list of texts to generate responses for.\n",
    "\n",
    "        Returns:\n",
    "            A list of generated responses.\n",
    "        \"\"\"\n",
    "        # Prepare the inputs\n",
    "        inputs = self.tokenizer(\n",
    "            texts,\n",
    "            return_tensors=\"pt\",\n",
    "            padding=True,\n",
    "            truncation=True,\n",
    "            max_length=self.max_length,\n",
    "        ).to(\"cuda\")\n",
    "\n",
    "        # Generate responses\n",
    "        with torch.no_grad():\n",
    "            outputs = self.model.generate(\n",
    "                input_ids=inputs.input_ids,\n",
    "                attention_mask=inputs.attention_mask,\n",
    "                do_sample=True,\n",
    "                temperature=self.temperature,\n",
    "                top_p=self.top_p,\n",
    "                top_k=self.top_k,\n",
    "                max_length=self.max_length,\n",
    "                num_return_sequences=self.num_return_sequences,\n",
    "            )\n",
    "\n",
    "        # Decode the responses\n",
    "        responses = [self.tokenizer.decode(output, skip_special_tokens=True) for output in outputs]\n",
    "\n",
    "        return responses"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e9e1bac7",
   "metadata": {},
   "outputs": [],
   "source": [
    "from typing import Dict, List, Iterator\n",
    "\n",
    "\n",
    "class CaptionGenerator:\n",
    "    def __init__(self):\n",
    "        self.template = (\n",
    "            \"[INST] Generate a random, detailed visual caption/description of a photo showing: {object}. [/INST]\"\n",
    "        )\n",
    "\n",
    "    def load_model(self):\n",
    "        self.model = TextGenerationModel()\n",
    "\n",
    "    def gpu_is_sufficient(self):\n",
    "        gpu_memories = get_gpu_memories()\n",
    "        assert len(gpu_memories) == 1, \"more than one GPU allocated to actor???\"\n",
    "        return gpu_memories[0] / 1e9 > 32.0\n",
    "\n",
    "    def process_batch(self, batch: List[Dict], trim: bool = True) -> List[Dict]:\n",
    "        \"\"\"Process a batch of samples, generating responses for each.\"\"\"\n",
    "        n = len(batch)\n",
    "        texts = [batch[i][\"text\"] for i in range(n)]\n",
    "        responses = self.model.generate_responses(texts)\n",
    "        if trim:\n",
    "            responses = [response.split(\"[/INST]\")[-1].strip() for response in responses]\n",
    "        responses = [\n",
    "            responses[i : i + self.model.num_return_sequences]\n",
    "            for i in range(0, len(responses), self.model.num_return_sequences)\n",
    "        ]\n",
    "        for i in range(n):\n",
    "            batch[i][\"responses\"] = responses[i]\n",
    "        return batch\n",
    "\n",
    "    def process_list_by_batches(self, samples: List[Dict], batch_size: int = 1) -> Iterator[Dict]:\n",
    "        \"\"\"Process a list of samples by batches.\"\"\"\n",
    "        samples_iter = iter(samples)\n",
    "        while True:\n",
    "            batch = take(batch_size, samples_iter)\n",
    "            if not batch:\n",
    "                break\n",
    "            responses = self.process_batch(batch)\n",
    "            yield from responses\n",
    "\n",
    "    def make_samples(self, n: int) -> Iterator[Dict]:\n",
    "        \"\"\"Generate a list of samples.\"\"\"\n",
    "        for i in range(n):\n",
    "            cls = random.randrange(len(imagenet_classes))\n",
    "            object = imagenet_classes[cls]\n",
    "            yield dict(cls=cls, object=object, text=self.template.format(object=object))\n",
    "\n",
    "    def make_captions(self, samples: List[Dict]) -> Iterator[Dict]:\n",
    "        \"\"\"Generate captions for a list of samples.\"\"\"\n",
    "        for sample in self.process_list_by_batches(samples):\n",
    "            for response in sample[\"responses\"]:\n",
    "                yield dict(\n",
    "                    cls=sample[\"cls\"],\n",
    "                    object=sample[\"object\"],\n",
    "                    text=sample[\"text\"],\n",
    "                    response=response,\n",
    "                )\n",
    "\n",
    "    def make_shard(self, output: str, n: int, k: int = 5):\n",
    "        \"\"\"\n",
    "        Generate a shard of samples with generated captions.\n",
    "\n",
    "        Args:\n",
    "            output: The output file to write the shard to.\n",
    "            n: The number of samples to generate in the shard.\n",
    "            k: The number of return sequences for each sample.\n",
    "        \"\"\"\n",
    "        if os.path.exists(output):\n",
    "            return\n",
    "        self.model.num_return_sequences = k\n",
    "        writer = wds.TarWriter(output + \".temp\")\n",
    "        captions = self.make_captions(self.make_samples(n // k + k))\n",
    "        for caption in itertools.islice(captions, n):\n",
    "            sample = dict(\n",
    "                __key__=uuid.uuid4().hex,\n",
    "                json=caption,\n",
    "            )\n",
    "            writer.write(sample)\n",
    "        os.rename(output + \".temp\", output)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3e31f1a5",
   "metadata": {},
   "source": [
    "# Parallelization with Ray\n",
    "\n",
    "For parallel generation, we use a Ray cluster. This will also do the right thing with a single machine/single GPU setup. It automatically scales up."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7f58f93f",
   "metadata": {},
   "outputs": [],
   "source": [
    "import ray\n",
    "\n",
    "if not ray.is_initialized():\n",
    "    ray.init()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fa3ac5f7",
   "metadata": {},
   "outputs": [],
   "source": [
    "@ray.remote(num_gpus=1)\n",
    "class RayCaptionGenerator(CaptionGenerator):\n",
    "    def __init__(self):\n",
    "        super().__init__()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "85ba6bb5",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Start up and create the actor pool.\n",
    "# This tries to adapt to the number of GPUs available.\n",
    "# It also checks that each actor has sufficient memory.\n",
    "# If not, set up your cluster differently by excluding GPUs that are too small.\n",
    "# (Ray's facilities for heterogenous clusters are somewhat limited)\n",
    "\n",
    "ngpus = get_num_gpus() if nactors == -1 else nactors\n",
    "\n",
    "print(f\"using {ngpus} actors\")\n",
    "actors = [RayCaptionGenerator.remote() for i in range(int(ngpus))]\n",
    "\n",
    "print(\"loading the models\")\n",
    "for actor in actors:\n",
    "    assert ray.get(actor.gpu_is_sufficient.remote()), \"GPU memory insufficient\"\n",
    "    ray.get(actor.load_model.remote())\n",
    "\n",
    "print(\"creating the pool\")\n",
    "pool = ray.util.ActorPool(actors)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e1e0f11f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# It would be nice if there were a .map_with_actors method in pool,\n",
    "# but there isn't, so we use this workaround.\n",
    "\n",
    "\n",
    "def apply_actor(actor, dest):\n",
    "    return actor.make_shard.remote(dest, nimages, ngenerated)\n",
    "\n",
    "\n",
    "!mkdir -p $odir/prompts"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d57a611b",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Perform the actual shard generation.\n",
    "\n",
    "dests = [f\"{odir}/prompts/prompts-{i:06d}.tar\" for i in range(nshards + nvalshards)]\n",
    "result = list(pool.map(apply_actor, dests))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a76e1928",
   "metadata": {},
   "outputs": [],
   "source": [
    "del actors\n",
    "del pool"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e591c25c",
   "metadata": {},
   "source": [
    "# Shuffle\n",
    "\n",
    "For shuffling the dataset, we use the ray.data `read_webdataset` and `write_webdataset` functions."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fd3526f6",
   "metadata": {},
   "outputs": [],
   "source": [
    "import ray\n",
    "from ray.data import read_webdataset\n",
    "import glob"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b7c3c906",
   "metadata": {},
   "outputs": [],
   "source": [
    "!mkdir -p $odir/shuffled\n",
    "!rm -f $odir/shuffled/*\n",
    "shards = glob.glob(f\"{odir}/prompts/prompts-*.tar\")\n",
    "dataset = read_webdataset(shards)\n",
    "shuffled_dataset = dataset.random_shuffle()\n",
    "shuffled_dataset.repartition(len(shards)).write_webdataset(f\"{odir}/shuffled/\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f3b53c3a",
   "metadata": {},
   "outputs": [],
   "source": [
    "# The output of write_webdataset is a directory of shards, but not following\n",
    "# the usual naming conventions. We rename the shards to follow typical\n",
    "# webdataset conventions.\n",
    "\n",
    "import glob\n",
    "\n",
    "shuffled = sorted(glob.glob(f\"{odir}/shuffled/*.tar\"))\n",
    "for i in range(nshards):\n",
    "    os.rename(shuffled[i], f\"{odir}/shuffled/{oprefix}-{i:06d}.tar\")\n",
    "for i in range(nvalshards):\n",
    "    os.rename(shuffled[nshards + i], f\"{odir}/shuffled/{oprefix}-val-{i:06d}.tar\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "85efaf2f",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "jupytext": {
   "cell_metadata_filter": "-all",
   "encoding": "# coding: utf-8",
   "executable": "/usr/bin/env python",
   "main_language": "python",
   "notebook_metadata_filter": "-all"
  },
  "kernelspec": {
   "display_name": ".venv",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
