{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "#!poetry install"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Update this for your data path.\n",
    "instance_data_dir = \"/Volumes/ml/datasets/test_datasets/single_image_dataset\"\n",
    "pretrained_model_name_or_path = \"black-forest-labs/FLUX.1-dev\"\n",
    "# Your public model name after it's pushed to the hub.\n",
    "hub_model_id = \"simpletuner-lora\"\n",
    "tracker_project_name = \"flux-training\"\n",
    "\n",
    "# Validation prompt\n",
    "validation_prompt = \"A photo-realistic image of a cat\"\n",
    "\n",
    "train_batch_size = 1\n",
    "learning_rate = 1e-4\n",
    "\n",
    "# choices: int8-quanto, fp8-quanto, no_change (mac and a100/h100 users get int4 and int2 as well)\n",
    "base_model_precision = \"no_change\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "lycoris_config = {\n",
    "    \"algo\": \"lokr\",\n",
    "    \"multiplier\": 1.0,\n",
    "    \"linear_dim\": 10000,\n",
    "    \"linear_alpha\": 1,\n",
    "    \"factor\": 12,\n",
    "    \"apply_preset\": {\n",
    "        \"target_module\": [\n",
    "            \"Attention\",\n",
    "            \"FeedForward\"\n",
    "        ],\n",
    "        \"module_algo_map\": {\n",
    "            \"Attention\": {\n",
    "                \"factor\": 12\n",
    "            },\n",
    "            \"FeedForward\": {\n",
    "                \"factor\": 6\n",
    "            }\n",
    "        }\n",
    "    }\n",
    "}\n",
    "# write to config/lycoris_config.json\n",
    "import json\n",
    "with open(\"config/lycoris_config.json\", \"w\") as f:\n",
    "    json.dump(lycoris_config, f)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "training_config = {\n",
    "    \"mixed_precision\":\"bf16\",\n",
    "    \"model_type\":\"lora\",\n",
    "    \"pretrained_model_name_or_path\":pretrained_model_name_or_path,\n",
    "    \"gradient_checkpointing\":True,\n",
    "    \"cache_dir\": \"cache\",\n",
    "    \"set_grads_to_none\":True,\n",
    "    \"gradient_accumulation_steps\":1,\n",
    "    \"resume_from_checkpoint\":\"latest\",\n",
    "    \"snr_gamma\":5,\n",
    "    \"num_train_epochs\":0,\n",
    "    \"max_train_steps\":10000,\n",
    "    \"metadata_update_interval\":65,\n",
    "    \"optimizer\":\"adamw_bf16\",\n",
    "    \"learning_rate\":learning_rate,\n",
    "    \"lr_scheduler\":\"polynomial\",\n",
    "    \"seed\":42,\n",
    "    \"lr_warmup_steps\":100,\n",
    "    \"output_dir\":\"output/models\",\n",
    "    \"non_ema_revision\": False,\n",
    "    \"aspect_bucket_rounding\":2,\n",
    "    \"inference_scheduler_timestep_spacing\":\"trailing\",\n",
    "    \"training_scheduler_timestep_spacing\":\"trailing\",\n",
    "    \"report_to\":\"wandb\",\n",
    "    \"lr_end\":1e-8,\n",
    "    \"compress_disk_cache\":True,\n",
    "    \"push_to_hub\":True,\n",
    "    \"hub_model_id\":hub_model_id,\n",
    "    \"push_checkpoints_to_hub\":True,\n",
    "    \"model_family\":\"flux\",\n",
    "    \"disable_benchmark\":False,\n",
    "    \"train_batch\":train_batch_size,\n",
    "    \"max_workers\":32,\n",
    "    \"read_batch_size\":25,\n",
    "    \"write_batch_size\":64,\n",
    "    \"caption_dropout_probability\":0.1,\n",
    "    \"torch_num_threads\":8,\n",
    "    \"image_processing_batch_size\":32,\n",
    "    \"vae_batch_size\":4,\n",
    "    \"validation_prompt\":validation_prompt,\n",
    "    \"num_validation_images\":1,\n",
    "    \"validation_num_inference_steps\":20,\n",
    "    \"validation_seed\":42,\n",
    "    \"minimum_image_size\":0,\n",
    "    \"resolution\":1024,\n",
    "    \"validation_resolution\":\"1024x1024\",\n",
    "    \"resolution_type\":\"pixel_area\",\n",
    "    \"lycoris_config\":\"config/lycoris_config.json\",\n",
    "    \"lora_type\":\"lycoris\",\n",
    "    \"base_model_precision\":base_model_precision,\n",
    "    \"checkpointing_steps\":500,\n",
    "    \"checkpoints_total_limit\":5,\n",
    "    \"validation_steps\":500,\n",
    "    \"tracker_run_name\":hub_model_id,\n",
    "    \"tracker_project_name\":tracker_project_name,\n",
    "    \"validation_guidance\":3.0,\n",
    "    \"validation_guidance_real\":1.0,\n",
    "    \"validation_guidance_rescale\":0.0,\n",
    "    \"validation_negative_prompt\":\"blurry, cropped, ugly\",\n",
    "}\n",
    "# write to config/config.json\n",
    "with open(\"config/config.json\", \"w\") as f:\n",
    "    json.dump(training_config, f, indent=4)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "dataloader_config = [\n",
    "    {\n",
    "        \"id\": \"my-dataset-512\",\n",
    "        \"type\": \"local\",\n",
    "        \"instance_data_dir\": instance_data_dir,\n",
    "        \"crop\": False,\n",
    "        \"crop_style\": \"random\",\n",
    "        \"minimum_image_size\": 128,\n",
    "        \"resolution\": 512,\n",
    "        \"resolution_type\": \"pixel_area\",\n",
    "        \"repeats\": \"4\",\n",
    "        \"metadata_backend\": \"discovery\",\n",
    "        \"caption_strategy\": \"filename\",\n",
    "        \"cache_dir_vae\": \"cache/vae-512\"\n",
    "    },\n",
    "    {\n",
    "        \"id\": \"my-dataset-1024\",\n",
    "        \"type\": \"local\",\n",
    "        \"instance_data_dir\": instance_data_dir,\n",
    "        \"crop\": False,\n",
    "        \"crop_style\": \"random\",\n",
    "        \"minimum_image_size\": 128,\n",
    "        \"resolution\": 1024,\n",
    "        \"resolution_type\": \"pixel_area\",\n",
    "        \"repeats\": \"4\",\n",
    "        \"metadata_backend\": \"discovery\",\n",
    "        \"caption_strategy\": \"filename\",\n",
    "        \"cache_dir_vae\": \"cache/vae-1024\"\n",
    "    },\n",
    "    {\n",
    "        \"id\": \"my-dataset-512-crop\",\n",
    "        \"type\": \"local\",\n",
    "        \"instance_data_dir\": instance_data_dir,\n",
    "        \"crop\": False,\n",
    "        \"crop_style\": \"random\",\n",
    "        \"minimum_image_size\": 128,\n",
    "        \"resolution\": 512,\n",
    "        \"resolution_type\": \"pixel_area\",\n",
    "        \"repeats\": \"4\",\n",
    "        \"metadata_backend\": \"discovery\",\n",
    "        \"caption_strategy\": \"filename\",\n",
    "        \"cache_dir_vae\": \"cache/vae-512-crop\"\n",
    "    },\n",
    "    {\n",
    "        \"id\": \"my-dataset-1024-crop\",\n",
    "        \"type\": \"local\",\n",
    "        \"instance_data_dir\": instance_data_dir,\n",
    "        \"crop\": False,\n",
    "        \"crop_style\": \"random\",\n",
    "        \"minimum_image_size\": 128,\n",
    "        \"resolution\": 1024,\n",
    "        \"resolution_type\": \"pixel_area\",\n",
    "        \"repeats\": \"4\",\n",
    "        \"metadata_backend\": \"discovery\",\n",
    "        \"caption_strategy\": \"filename\",\n",
    "        \"cache_dir_vae\": \"cache/vae-1024-crop\"\n",
    "    },\n",
    "    {\n",
    "        \"id\": \"text-embed-cache\",\n",
    "        \"dataset_type\": \"text_embeds\",\n",
    "        \"default\": True,\n",
    "        \"type\": \"local\",\n",
    "        \"cache_dir\": \"cache/text\"\n",
    "    }\n",
    "]\n",
    "# write to config/multidatabackend.json\n",
    "import json\n",
    "with open(\"config/multidatabackend.json\", \"w\") as f:\n",
    "    json.dump(dataloader_config, f)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Users/bghira/src/SimpleTuner/.venv/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n"
     ]
    }
   ],
   "source": [
    "from helpers.training.trainer import Trainer\n",
    "from helpers.training.state_tracker import StateTracker\n",
    "from helpers import log_format\n",
    "import logging\n",
    "from os import environ\n",
    "\n",
    "logger = logging.getLogger(\"SimpleTuner\")\n",
    "logger.setLevel(environ.get(\"SIMPLETUNER_LOG_LEVEL\", \"INFO\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-08-31 20:56:05,034 [WARNING] (SimpleTuner) Skipping false argument: non_ema_revision\n"
     ]
    }
   ],
   "source": [
    "from helpers.configuration.json_file import normalize_args\n",
    "import os\n",
    "os.environ['CONFIG_BACKEND'] = 'cmd'\n",
    "os.environ['ENV'] = 'default'\n",
    "StateTracker.set_config_path('config/')\n",
    "loaded_config = normalize_args(training_config)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "optimizer: {'precision': 'bf16', 'default_settings': {'betas': (0.9, 0.999), 'weight_decay': 0.01, 'eps': 1e-06}, 'class': <class 'helpers.training.optimizers.adamw_bfloat16.AdamWBF16'>}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-08-31 20:56:05,043 [WARNING] (ArgsParser) The VAE model madebyollin/sdxl-vae-fp16-fix is not compatible. Please use a compatible VAE to eliminate this warning. The baked-in VAE will be used, instead.\n",
      "2024-08-31 20:56:05,043 [INFO] (ArgsParser) VAE Model: black-forest-labs/FLUX.1-dev\n",
      "2024-08-31 20:56:05,044 [INFO] (ArgsParser) Default VAE Cache location: \n",
      "2024-08-31 20:56:05,044 [INFO] (ArgsParser) Text Cache location: cache\n",
      "2024-08-31 20:56:05,045 [WARNING] (ArgsParser) Updating T5 XXL tokeniser max length to 512 for Flux.\n",
      "2024-08-31 20:56:05,046 [WARNING] (ArgsParser) No data backend config provided. Using default config at config/multidatabackend.json.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model family: flux\n"
     ]
    }
   ],
   "source": [
    "try:\n",
    "    trainer = Trainer(loaded_config)\n",
    "except Exception as e:\n",
    "    import traceback\n",
    "    logger.error(f\"Failed to create Trainer: {e}, {traceback.format_exc()}\")\n",
    "    raise e"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-08-31 20:56:05,554 [INFO] (helpers.training.trainer) Logged into Hugging Face Hub as 'bghira'\n"
     ]
    }
   ],
   "source": [
    "try:\n",
    "    trainer.configure_webhook()\n",
    "    trainer.init_noise_schedule()\n",
    "    trainer.init_seed()\n",
    "\n",
    "    trainer.init_huggingface_hub()\n",
    "except Exception as e:\n",
    "    logger.error(f\"Failed to configure Trainer: {e}\")\n",
    "    raise e"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-08-31 20:56:05,559 [INFO] (helpers.training.trainer) Load VAE: black-forest-labs/FLUX.1-dev\n",
      "2024-08-31 20:56:05,843 [INFO] (helpers.training.trainer) Loading VAE onto accelerator, converting from torch.float32 to torch.bfloat16\n",
      "2024-08-31 20:56:05,952 [INFO] (helpers.training.trainer) Load tokenizers\n",
      "You set `add_prefix_space`. The tokenizer needs to be converted from the slow tokenizers\n",
      "2024-08-31 20:56:06,440 [INFO] (helpers.training.text_encoding) Loading OpenAI CLIP-L text encoder from black-forest-labs/FLUX.1-dev/text_encoder..\n",
      "2024-08-31 20:56:06,683 [INFO] (helpers.training.text_encoding) Loading T5 XXL v1.1 text encoder from black-forest-labs/FLUX.1-dev/text_encoder_2..\n",
      "Downloading shards: 100%|██████████| 2/2 [00:00<00:00, 2430.07it/s]\n",
      "Loading checkpoint shards: 100%|██████████| 2/2 [00:00<00:00,  9.30it/s]\n",
      "2024-08-31 20:56:08,494 [INFO] (helpers.training.trainer) Moving text encoder to GPU.\n",
      "2024-08-31 20:56:08,736 [INFO] (helpers.training.trainer) Moving text encoder 2 to GPU.\n"
     ]
    }
   ],
   "source": [
    "try:\n",
    "    trainer.init_preprocessing_models()\n",
    "except Exception as e:\n",
    "    logger.error(f\"Failed to initialize preprocessing models: {e}\")\n",
    "    raise e"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-08-31 20:56:14,161 [INFO] (DataBackendFactory) Loading data backend config from config/multidatabackend.json\n",
      "2024-08-31 20:56:14,162 [INFO] (DataBackendFactory) Configuring text embed backend: text-embed-cache\n",
      "Loading pipeline components...: 100%|██████████| 5/5 [00:00<00:00, 1132.49it/s]\n",
      "2024-08-31 20:56:14,337 [INFO] (TextEmbeddingCache) (Rank: 0) (id=text-embed-cache) Listing all text embed cache entries\n",
      "2024-08-31 20:56:14,339 [INFO] (DataBackendFactory) Pre-computing null embedding\n",
      "2024-08-31 20:56:19,345 [INFO] (DataBackendFactory) Completed loading text embed services.\n",
      "2024-08-31 20:56:19,347 [INFO] (DataBackendFactory) Configuring data backend: my-dataset-512\n",
      "2024-08-31 20:56:19,351 [INFO] (DataBackendFactory) (id=my-dataset-512) Loading bucket manager.\n",
      "2024-08-31 20:56:19,352 [INFO] (DiscoveryMetadataBackend) Checking for cache file: /Volumes/ml/datasets/test_datasets/single_image_dataset/aspect_ratio_bucket_indices_my-dataset-512.json\n",
      "2024-08-31 20:56:19,353 [INFO] (DiscoveryMetadataBackend) Pulling cache file from storage\n",
      "2024-08-31 20:56:19,354 [INFO] (DataBackendFactory) (id=my-dataset-512) Refreshing aspect buckets on main process.\n",
      "2024-08-31 20:56:19,355 [INFO] (BaseMetadataBackend) Discovering new files...\n",
      "2024-08-31 20:56:19,358 [INFO] (BaseMetadataBackend) Compressed 11 existing files from 2.\n",
      "2024-08-31 20:56:19,359 [INFO] (BaseMetadataBackend) No new files discovered. Doing nothing.\n",
      "2024-08-31 20:56:19,360 [INFO] (BaseMetadataBackend) Statistics: {'total_processed': 0, 'skipped': {'already_exists': 11, 'metadata_missing': 0, 'not_found': 0, 'too_small': 0, 'other': 0}}\n",
      "2024-08-31 20:56:19,360 [WARNING] (DataBackendFactory) Key crop_aspect not found in the current backend config, using the existing value 'square'.\n",
      "2024-08-31 20:56:19,361 [WARNING] (DataBackendFactory) Key disable_validation not found in the current backend config, using the existing value 'False'.\n",
      "2024-08-31 20:56:19,362 [WARNING] (DataBackendFactory) Key config_version not found in the current backend config, using the existing value '1'.\n",
      "2024-08-31 20:56:19,362 [WARNING] (DataBackendFactory) Key hash_filenames not found in the current backend config, using the existing value 'True'.\n",
      "2024-08-31 20:56:19,363 [INFO] (DataBackendFactory) Configured backend: {'id': 'my-dataset-512', 'config': {'repeats': '4', 'crop': False, 'crop_aspect': 'square', 'crop_style': 'random', 'disable_validation': False, 'resolution': 0.262144, 'resolution_type': 'area', 'caption_strategy': 'filename', 'instance_data_dir': '/Volumes/ml/datasets/test_datasets/single_image_dataset', 'maximum_image_size': None, 'target_downsample_size': None, 'config_version': 1, 'hash_filenames': True}, 'dataset_type': 'image', 'data_backend': <helpers.data_backend.local.LocalDataBackend object at 0x368b5b1f0>, 'instance_data_dir': '/Volumes/ml/datasets/test_datasets/single_image_dataset', 'metadata_backend': <helpers.metadata.backends.discovery.DiscoveryMetadataBackend object at 0x368b5a470>}\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(Rank: 0)  | Bucket     | Image Count (per-GPU)\n",
      "------------------------------\n",
      "(Rank: 0)  | 1.0        | 10          \n",
      "(Rank: 0)  | 0.7        | 1           \n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-08-31 20:56:19,365 [INFO] (DataBackendFactory) (id=my-dataset-512) Collecting captions.\n",
      "2024-08-31 20:56:19,367 [INFO] (DataBackendFactory) (id=my-dataset-512) Initialise text embed pre-computation using the filename caption strategy. We have 11 captions to process.\n",
      "2024-08-31 20:56:19,368 [INFO] (DataBackendFactory) (id=my-dataset-512) Completed processing 11 captions.\n",
      "2024-08-31 20:56:19,369 [INFO] (DataBackendFactory) (id=my-dataset-512) Creating VAE latent cache.\n",
      "2024-08-31 20:56:19,370 [INFO] (DataBackendFactory) (id=my-dataset-512) Discovering cache objects..\n",
      "2024-08-31 20:56:19,371 [INFO] (DataBackendFactory) Configured backend: {'id': 'my-dataset-512', 'config': {'repeats': '4', 'crop': False, 'crop_aspect': 'square', 'crop_style': 'random', 'disable_validation': False, 'resolution': 0.262144, 'resolution_type': 'area', 'caption_strategy': 'filename', 'instance_data_dir': '/Volumes/ml/datasets/test_datasets/single_image_dataset', 'maximum_image_size': None, 'target_downsample_size': None, 'config_version': 1, 'hash_filenames': True}, 'dataset_type': 'image', 'data_backend': <helpers.data_backend.local.LocalDataBackend object at 0x368b5b1f0>, 'instance_data_dir': '/Volumes/ml/datasets/test_datasets/single_image_dataset', 'metadata_backend': <helpers.metadata.backends.discovery.DiscoveryMetadataBackend object at 0x368b5a470>, 'train_dataset': <helpers.multiaspect.dataset.MultiAspectDataset object at 0x39a2abb20>, 'sampler': <helpers.multiaspect.sampler.MultiAspectSampler object at 0x39a2ab910>, 'train_dataloader': <torch.utils.data.dataloader.DataLoader object at 0x39a2abac0>, 'text_embed_cache': <helpers.caching.text_embeds.TextEmbeddingCache object at 0x368b5a110>, 'vaecache': <helpers.caching.vae.VAECache object at 0x39a1eec20>}\n",
      "2024-08-31 20:56:19,372 [INFO] (DataBackendFactory) Configuring data backend: my-dataset-1024\n",
      "2024-08-31 20:56:19,374 [INFO] (DataBackendFactory) (id=my-dataset-1024) Loading bucket manager.\n",
      "2024-08-31 20:56:19,374 [INFO] (DiscoveryMetadataBackend) Checking for cache file: /Volumes/ml/datasets/test_datasets/single_image_dataset/aspect_ratio_bucket_indices_my-dataset-1024.json\n",
      "2024-08-31 20:56:19,374 [INFO] (DiscoveryMetadataBackend) Pulling cache file from storage\n",
      "2024-08-31 20:56:19,375 [INFO] (DataBackendFactory) (id=my-dataset-1024) Refreshing aspect buckets on main process.\n",
      "2024-08-31 20:56:19,376 [INFO] (BaseMetadataBackend) Discovering new files...\n",
      "2024-08-31 20:56:19,378 [INFO] (BaseMetadataBackend) Compressed 11 existing files from 2.\n",
      "2024-08-31 20:56:19,379 [INFO] (BaseMetadataBackend) No new files discovered. Doing nothing.\n",
      "2024-08-31 20:56:19,379 [INFO] (BaseMetadataBackend) Statistics: {'total_processed': 0, 'skipped': {'already_exists': 11, 'metadata_missing': 0, 'not_found': 0, 'too_small': 0, 'other': 0}}\n",
      "2024-08-31 20:56:19,380 [WARNING] (DataBackendFactory) Key crop_aspect not found in the current backend config, using the existing value 'square'.\n",
      "2024-08-31 20:56:19,380 [WARNING] (DataBackendFactory) Key disable_validation not found in the current backend config, using the existing value 'False'.\n",
      "2024-08-31 20:56:19,381 [WARNING] (DataBackendFactory) Key config_version not found in the current backend config, using the existing value '2'.\n",
      "2024-08-31 20:56:19,381 [WARNING] (DataBackendFactory) Key hash_filenames not found in the current backend config, using the existing value 'True'.\n",
      "2024-08-31 20:56:19,382 [INFO] (DataBackendFactory) Configured backend: {'id': 'my-dataset-1024', 'config': {'repeats': '4', 'crop': False, 'crop_aspect': 'square', 'crop_style': 'random', 'disable_validation': False, 'resolution': 1.048576, 'resolution_type': 'area', 'caption_strategy': 'filename', 'instance_data_dir': '/Volumes/ml/datasets/test_datasets/single_image_dataset', 'maximum_image_size': None, 'target_downsample_size': None, 'config_version': 2, 'hash_filenames': True}, 'dataset_type': 'image', 'data_backend': <helpers.data_backend.local.LocalDataBackend object at 0x39a2b8c70>, 'instance_data_dir': '/Volumes/ml/datasets/test_datasets/single_image_dataset', 'metadata_backend': <helpers.metadata.backends.discovery.DiscoveryMetadataBackend object at 0x39a2b8eb0>}\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(Rank: 0)  | Bucket     | Image Count (per-GPU)\n",
      "------------------------------\n",
      "(Rank: 0)  | 1.0        | 10          \n",
      "(Rank: 0)  | 0.65       | 1           \n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-08-31 20:56:19,384 [INFO] (DataBackendFactory) (id=my-dataset-1024) Collecting captions.\n",
      "2024-08-31 20:56:19,385 [INFO] (DataBackendFactory) (id=my-dataset-1024) Initialise text embed pre-computation using the filename caption strategy. We have 11 captions to process.\n",
      "2024-08-31 20:56:19,386 [INFO] (DataBackendFactory) (id=my-dataset-1024) Completed processing 11 captions.\n",
      "2024-08-31 20:56:19,386 [INFO] (DataBackendFactory) (id=my-dataset-1024) Creating VAE latent cache.\n",
      "2024-08-31 20:56:19,387 [INFO] (DataBackendFactory) (id=my-dataset-1024) Discovering cache objects..\n",
      "2024-08-31 20:56:19,389 [INFO] (DataBackendFactory) Configured backend: {'id': 'my-dataset-1024', 'config': {'repeats': '4', 'crop': False, 'crop_aspect': 'square', 'crop_style': 'random', 'disable_validation': False, 'resolution': 1.048576, 'resolution_type': 'area', 'caption_strategy': 'filename', 'instance_data_dir': '/Volumes/ml/datasets/test_datasets/single_image_dataset', 'maximum_image_size': None, 'target_downsample_size': None, 'config_version': 2, 'hash_filenames': True}, 'dataset_type': 'image', 'data_backend': <helpers.data_backend.local.LocalDataBackend object at 0x39a2b8c70>, 'instance_data_dir': '/Volumes/ml/datasets/test_datasets/single_image_dataset', 'metadata_backend': <helpers.metadata.backends.discovery.DiscoveryMetadataBackend object at 0x39a2b8eb0>, 'train_dataset': <helpers.multiaspect.dataset.MultiAspectDataset object at 0x39a2aba00>, 'sampler': <helpers.multiaspect.sampler.MultiAspectSampler object at 0x39a2b8850>, 'train_dataloader': <torch.utils.data.dataloader.DataLoader object at 0x39a2b89d0>, 'text_embed_cache': <helpers.caching.text_embeds.TextEmbeddingCache object at 0x368b5a110>, 'vaecache': <helpers.caching.vae.VAECache object at 0x39a2b8d30>}\n",
      "2024-08-31 20:56:19,389 [INFO] (DataBackendFactory) Configuring data backend: my-dataset-512-crop\n",
      "2024-08-31 20:56:19,390 [INFO] (DataBackendFactory) (id=my-dataset-512-crop) Loading bucket manager.\n",
      "2024-08-31 20:56:19,391 [INFO] (DiscoveryMetadataBackend) Checking for cache file: /Volumes/ml/datasets/test_datasets/single_image_dataset/aspect_ratio_bucket_indices_my-dataset-512-crop.json\n",
      "2024-08-31 20:56:19,391 [INFO] (DiscoveryMetadataBackend) Pulling cache file from storage\n",
      "2024-08-31 20:56:19,392 [INFO] (DataBackendFactory) (id=my-dataset-512-crop) Refreshing aspect buckets on main process.\n",
      "2024-08-31 20:56:19,392 [INFO] (BaseMetadataBackend) Discovering new files...\n",
      "2024-08-31 20:56:19,395 [INFO] (BaseMetadataBackend) Compressed 11 existing files from 2.\n",
      "2024-08-31 20:56:19,396 [INFO] (BaseMetadataBackend) No new files discovered. Doing nothing.\n",
      "2024-08-31 20:56:19,396 [INFO] (BaseMetadataBackend) Statistics: {'total_processed': 0, 'skipped': {'already_exists': 11, 'metadata_missing': 0, 'not_found': 0, 'too_small': 0, 'other': 0}}\n",
      "2024-08-31 20:56:19,397 [WARNING] (DataBackendFactory) Key crop_aspect not found in the current backend config, using the existing value 'square'.\n",
      "2024-08-31 20:56:19,397 [WARNING] (DataBackendFactory) Key disable_validation not found in the current backend config, using the existing value 'False'.\n",
      "2024-08-31 20:56:19,398 [WARNING] (DataBackendFactory) Key config_version not found in the current backend config, using the existing value '2'.\n",
      "2024-08-31 20:56:19,398 [WARNING] (DataBackendFactory) Key hash_filenames not found in the current backend config, using the existing value 'True'.\n",
      "2024-08-31 20:56:19,399 [INFO] (DataBackendFactory) Configured backend: {'id': 'my-dataset-512-crop', 'config': {'repeats': '4', 'crop': False, 'crop_aspect': 'square', 'crop_style': 'random', 'disable_validation': False, 'resolution': 0.262144, 'resolution_type': 'area', 'caption_strategy': 'filename', 'instance_data_dir': '/Volumes/ml/datasets/test_datasets/single_image_dataset', 'maximum_image_size': None, 'target_downsample_size': None, 'config_version': 2, 'hash_filenames': True}, 'dataset_type': 'image', 'data_backend': <helpers.data_backend.local.LocalDataBackend object at 0x39a2b9900>, 'instance_data_dir': '/Volumes/ml/datasets/test_datasets/single_image_dataset', 'metadata_backend': <helpers.metadata.backends.discovery.DiscoveryMetadataBackend object at 0x39a2b9390>}\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(Rank: 0)  | Bucket     | Image Count (per-GPU)\n",
      "------------------------------\n",
      "(Rank: 0)  | 1.0        | 10          \n",
      "(Rank: 0)  | 0.7        | 1           \n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-08-31 20:56:19,401 [INFO] (DataBackendFactory) (id=my-dataset-512-crop) Collecting captions.\n",
      "2024-08-31 20:56:19,402 [INFO] (DataBackendFactory) (id=my-dataset-512-crop) Initialise text embed pre-computation using the filename caption strategy. We have 11 captions to process.\n",
      "2024-08-31 20:56:19,403 [INFO] (DataBackendFactory) (id=my-dataset-512-crop) Completed processing 11 captions.\n",
      "2024-08-31 20:56:19,404 [INFO] (DataBackendFactory) (id=my-dataset-512-crop) Creating VAE latent cache.\n",
      "2024-08-31 20:56:19,404 [INFO] (DataBackendFactory) (id=my-dataset-512-crop) Discovering cache objects..\n",
      "2024-08-31 20:56:19,406 [INFO] (DataBackendFactory) Configured backend: {'id': 'my-dataset-512-crop', 'config': {'repeats': '4', 'crop': False, 'crop_aspect': 'square', 'crop_style': 'random', 'disable_validation': False, 'resolution': 0.262144, 'resolution_type': 'area', 'caption_strategy': 'filename', 'instance_data_dir': '/Volumes/ml/datasets/test_datasets/single_image_dataset', 'maximum_image_size': None, 'target_downsample_size': None, 'config_version': 2, 'hash_filenames': True}, 'dataset_type': 'image', 'data_backend': <helpers.data_backend.local.LocalDataBackend object at 0x39a2b9900>, 'instance_data_dir': '/Volumes/ml/datasets/test_datasets/single_image_dataset', 'metadata_backend': <helpers.metadata.backends.discovery.DiscoveryMetadataBackend object at 0x39a2b9390>, 'train_dataset': <helpers.multiaspect.dataset.MultiAspectDataset object at 0x39a2b8100>, 'sampler': <helpers.multiaspect.sampler.MultiAspectSampler object at 0x39a2b9a50>, 'train_dataloader': <torch.utils.data.dataloader.DataLoader object at 0x39a2b9ea0>, 'text_embed_cache': <helpers.caching.text_embeds.TextEmbeddingCache object at 0x368b5a110>, 'vaecache': <helpers.caching.vae.VAECache object at 0x39a2b8b50>}\n",
      "2024-08-31 20:56:19,407 [INFO] (DataBackendFactory) Configuring data backend: my-dataset-1024-crop\n",
      "2024-08-31 20:56:19,408 [INFO] (DataBackendFactory) (id=my-dataset-1024-crop) Loading bucket manager.\n",
      "2024-08-31 20:56:19,408 [INFO] (DiscoveryMetadataBackend) Checking for cache file: /Volumes/ml/datasets/test_datasets/single_image_dataset/aspect_ratio_bucket_indices_my-dataset-1024-crop.json\n",
      "2024-08-31 20:56:19,409 [INFO] (DiscoveryMetadataBackend) Pulling cache file from storage\n",
      "2024-08-31 20:56:19,409 [INFO] (DataBackendFactory) (id=my-dataset-1024-crop) Refreshing aspect buckets on main process.\n",
      "2024-08-31 20:56:19,409 [INFO] (BaseMetadataBackend) Discovering new files...\n",
      "2024-08-31 20:56:19,412 [INFO] (BaseMetadataBackend) Compressed 11 existing files from 2.\n",
      "2024-08-31 20:56:19,413 [INFO] (BaseMetadataBackend) No new files discovered. Doing nothing.\n",
      "2024-08-31 20:56:19,414 [INFO] (BaseMetadataBackend) Statistics: {'total_processed': 0, 'skipped': {'already_exists': 11, 'metadata_missing': 0, 'not_found': 0, 'too_small': 0, 'other': 0}}\n",
      "2024-08-31 20:56:19,414 [WARNING] (DataBackendFactory) Key crop_aspect not found in the current backend config, using the existing value 'square'.\n",
      "2024-08-31 20:56:19,415 [WARNING] (DataBackendFactory) Key disable_validation not found in the current backend config, using the existing value 'False'.\n",
      "2024-08-31 20:56:19,415 [WARNING] (DataBackendFactory) Key config_version not found in the current backend config, using the existing value '2'.\n",
      "2024-08-31 20:56:19,415 [WARNING] (DataBackendFactory) Key hash_filenames not found in the current backend config, using the existing value 'True'.\n",
      "2024-08-31 20:56:19,416 [INFO] (DataBackendFactory) Configured backend: {'id': 'my-dataset-1024-crop', 'config': {'repeats': '4', 'crop': False, 'crop_aspect': 'square', 'crop_style': 'random', 'disable_validation': False, 'resolution': 1.048576, 'resolution_type': 'area', 'caption_strategy': 'filename', 'instance_data_dir': '/Volumes/ml/datasets/test_datasets/single_image_dataset', 'maximum_image_size': None, 'target_downsample_size': None, 'config_version': 2, 'hash_filenames': True}, 'dataset_type': 'image', 'data_backend': <helpers.data_backend.local.LocalDataBackend object at 0x39a2ba140>, 'instance_data_dir': '/Volumes/ml/datasets/test_datasets/single_image_dataset', 'metadata_backend': <helpers.metadata.backends.discovery.DiscoveryMetadataBackend object at 0x39a2b9750>}\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(Rank: 0)  | Bucket     | Image Count (per-GPU)\n",
      "------------------------------\n",
      "(Rank: 0)  | 1.0        | 10          \n",
      "(Rank: 0)  | 0.65       | 1           \n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-08-31 20:56:19,418 [INFO] (DataBackendFactory) (id=my-dataset-1024-crop) Collecting captions.\n",
      "2024-08-31 20:56:19,419 [INFO] (DataBackendFactory) (id=my-dataset-1024-crop) Initialise text embed pre-computation using the filename caption strategy. We have 11 captions to process.\n",
      "2024-08-31 20:56:19,420 [INFO] (DataBackendFactory) (id=my-dataset-1024-crop) Completed processing 11 captions.\n",
      "2024-08-31 20:56:19,421 [INFO] (DataBackendFactory) (id=my-dataset-1024-crop) Creating VAE latent cache.\n",
      "2024-08-31 20:56:19,421 [INFO] (DataBackendFactory) (id=my-dataset-1024-crop) Discovering cache objects..\n",
      "2024-08-31 20:56:19,423 [INFO] (DataBackendFactory) Configured backend: {'id': 'my-dataset-1024-crop', 'config': {'repeats': '4', 'crop': False, 'crop_aspect': 'square', 'crop_style': 'random', 'disable_validation': False, 'resolution': 1.048576, 'resolution_type': 'area', 'caption_strategy': 'filename', 'instance_data_dir': '/Volumes/ml/datasets/test_datasets/single_image_dataset', 'maximum_image_size': None, 'target_downsample_size': None, 'config_version': 2, 'hash_filenames': True}, 'dataset_type': 'image', 'data_backend': <helpers.data_backend.local.LocalDataBackend object at 0x39a2ba140>, 'instance_data_dir': '/Volumes/ml/datasets/test_datasets/single_image_dataset', 'metadata_backend': <helpers.metadata.backends.discovery.DiscoveryMetadataBackend object at 0x39a2b9750>, 'train_dataset': <helpers.multiaspect.dataset.MultiAspectDataset object at 0x39a2b9b40>, 'sampler': <helpers.multiaspect.sampler.MultiAspectSampler object at 0x39a2b9cc0>, 'train_dataloader': <torch.utils.data.dataloader.DataLoader object at 0x39a2b9c30>, 'text_embed_cache': <helpers.caching.text_embeds.TextEmbeddingCache object at 0x368b5a110>, 'vaecache': <helpers.caching.vae.VAECache object at 0x39a2ba590>}\n",
      "2024-08-31 20:56:20,265 [INFO] (validation) Precomputing the negative prompt embed for validations.\n",
      "2024-08-31 20:56:20,810 [INFO] (helpers.training.trainer) Calculated our maximum training steps at 10000 because we have 46 epochs and 220 steps per epoch.\n",
      "2024-08-31 20:56:20,811 [INFO] (helpers.training.trainer) Collected the following data backends: ['text-embed-cache', 'my-dataset-512', 'my-dataset-1024', 'my-dataset-512-crop', 'my-dataset-1024-crop']\n"
     ]
    }
   ],
   "source": [
    "try:\n",
    "    trainer.init_data_backend()\n",
    "except Exception as e:\n",
    "    logger.error(f\"Failed to initialize data backend: {e}\")\n",
    "    raise e"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-08-31 20:56:21,375 [INFO] (validation) Precomputing the negative prompt embed for validations.\n"
     ]
    }
   ],
   "source": [
    "try:\n",
    "    trainer.init_validation_prompts()\n",
    "except Exception as e:\n",
    "    logger.error(f\"Failed to initialize validation prompts: {e}\")\n",
    "    raise e"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-08-31 20:56:21,936 [INFO] (helpers.training.trainer) Unloading text encoders, as they are not being trained.\n",
      "2024-08-31 20:56:22,832 [INFO] (helpers.training.trainer) After nuking text encoders from orbit, we freed 9.1 GB of VRAM. The real memories were the friends we trained a model on along the way.\n"
     ]
    }
   ],
   "source": [
    "trainer.init_unload_text_encoder()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-08-31 20:56:22,994 [INFO] (helpers.training.trainer) After nuking the VAE from orbit, we freed 163.84 MB of VRAM.\n"
     ]
    }
   ],
   "source": [
    "trainer.init_unload_vae()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Fetching 3 files: 100%|██████████| 3/3 [00:00<00:00, 64198.53it/s]\n"
     ]
    }
   ],
   "source": [
    "trainer.init_load_base_model()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "trainer.init_precision()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "trainer.init_controlnet_model()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "trainer.init_freeze_models()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-08-31 20:56:24,559 [INFO] (helpers.training.trainer) Using lycoris training mode\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2024-08-31 20:56:24|[LyCORIS]-INFO: Using rank adaptation algo: lokr\n",
      "2024-08-31 20:56:24|[LyCORIS]-INFO: Use Dropout value: 0.0\n",
      "2024-08-31 20:56:24|[LyCORIS]-INFO: Create LyCORIS Module\n",
      "2024-08-31 20:56:24|[LyCORIS]-WARNING: lora_dim 10000 is too large for dim=3072 and factor=12, using full matrix mode.\n",
      "2024-08-31 20:56:24|[LyCORIS]-WARNING: lora_dim 10000 is too large for dim=12288 and factor=6, using full matrix mode.\n",
      "2024-08-31 20:56:24|[LyCORIS]-INFO: create LyCORIS: 342 modules.\n",
      "2024-08-31 20:56:24|[LyCORIS]-INFO: module type table: {'LokrModule': 342}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-08-31 20:56:24,657 [INFO] (helpers.training.trainer) LyCORIS network has been initialized with 97,165,392 parameters\n"
     ]
    }
   ],
   "source": [
    "trainer.init_trainable_peft_adapter()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "trainer.init_ema_model()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "trainer.move_models(destination=\"accelerator\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "trainer.init_validations()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [],
   "source": [
    "trainer.init_benchmark_base_model()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-08-31 20:56:24,674 [INFO] (helpers.training.trainer) Learning rate: 0.0001\n",
      "2024-08-31 20:56:24,676 [INFO] (helpers.training.optimizer_param) cls: <class 'helpers.training.optimizers.adamw_bfloat16.AdamWBF16'>, settings: {'betas': (0.9, 0.999), 'weight_decay': 0.01, 'eps': 1e-06}\n",
      "2024-08-31 20:56:24,679 [INFO] (helpers.training.trainer) Optimizer arguments, weight_decay=0.01 eps=1e-08, extra_arguments={'lr': 0.0001, 'betas': (0.9, 0.999), 'weight_decay': 0.01, 'eps': 1e-06}\n"
     ]
    }
   ],
   "source": [
    "trainer.resume_and_prepare()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-08-31 20:56:32,220 [ERROR] (wandb.jupyter) Failed to detect the name of this notebook, you can set it manually with the WANDB_NOTEBOOK_NAME environment variable to enable code saving.\n",
      "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
      "To disable this warning, you can either:\n",
      "\t- Avoid using `tokenizers` before the fork if possible\n",
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n",
      "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
      "To disable this warning, you can either:\n",
      "\t- Avoid using `tokenizers` before the fork if possible\n",
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
     ]
    }
   ],
   "source": [
    "trainer.init_trackers()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "trainer.train()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": ".venv",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.14"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
