muellerzr HF staff commited on
Commit
5887b84
1 Parent(s): 60de117
README.md CHANGED
@@ -37,4 +37,12 @@ Using `regex` in VSCODE, use the following replacement:
37
  <!--Copyright(.*\n)+-->
38
  ```
39
 
40
- Then remove all import statements (as we only care about the content).
 
 
 
 
 
 
 
 
 
37
  <!--Copyright(.*\n)+-->
38
  ```
39
 
40
+ In the source:
41
+ ```regex
42
+ """
43
+
44
+
45
+ ```
46
+ Then remove all import statements (as we only care about the content).
47
+
48
+ **WARNING**: It is known that this will seperate out the `_inner()` in the source code and use it as a seperate function losing the context. Trying out with this issue for now.
src/accelerator.py CHANGED
@@ -1,3 +1,4 @@
 
1
  logger = get_logger(__name__)
2
 
3
 
@@ -95,7 +96,6 @@ class Accelerator:
95
  - **sync_gradients** (`bool`) -- Whether the gradients are currently being synced across all processes.
96
  - **use_distributed** (`bool`) -- Whether the current configuration is for distributed training.
97
  """
98
-
99
  def __init__(
100
  self,
101
  device_placement: bool = True,
@@ -2010,7 +2010,6 @@ class Accelerator:
2010
  9
2011
  ```
2012
  """
2013
-
2014
  try:
2015
  recursively_apply(lambda x: x, input_data, error_on_other_type=True)
2016
  all_tensors = True
@@ -2373,7 +2372,6 @@ class Accelerator:
2373
  >>> accelerator.save_model(model, save_directory)
2374
  ```
2375
  """
2376
-
2377
  if os.path.isfile(save_directory):
2378
  logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
2379
  return
@@ -2865,7 +2863,6 @@ class Accelerator:
2865
  >>> state_dict = accelerator.get_state_dict(net)
2866
  ```
2867
  """
2868
-
2869
  if self.distributed_type == DistributedType.DEEPSPEED:
2870
  if self.deepspeed_config["zero_optimization"]["stage"] == 3:
2871
  if model.zero_gather_16bit_weights_on_model_save():
 
1
+
2
  logger = get_logger(__name__)
3
 
4
 
 
96
  - **sync_gradients** (`bool`) -- Whether the gradients are currently being synced across all processes.
97
  - **use_distributed** (`bool`) -- Whether the current configuration is for distributed training.
98
  """
 
99
  def __init__(
100
  self,
101
  device_placement: bool = True,
 
2010
  9
2011
  ```
2012
  """
 
2013
  try:
2014
  recursively_apply(lambda x: x, input_data, error_on_other_type=True)
2015
  all_tensors = True
 
2372
  >>> accelerator.save_model(model, save_directory)
2373
  ```
2374
  """
 
2375
  if os.path.isfile(save_directory):
2376
  logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
2377
  return
 
2863
  >>> state_dict = accelerator.get_state_dict(net)
2864
  ```
2865
  """
 
2866
  if self.distributed_type == DistributedType.DEEPSPEED:
2867
  if self.deepspeed_config["zero_optimization"]["stage"] == 3:
2868
  if model.zero_gather_16bit_weights_on_model_save():
src/commands/config/config_utils.py CHANGED
@@ -67,7 +67,6 @@ class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter):
67
  """
68
  A custom formatter that will remove the usage line from the help message for subcommands.
69
  """
70
-
71
  def _format_usage(self, usage, actions, groups, prefix):
72
  usage = super()._format_usage(usage, actions, groups, prefix)
73
  usage = usage.replace("<command> [<args>] ", "")
 
67
  """
68
  A custom formatter that will remove the usage line from the help message for subcommands.
69
  """
 
70
  def _format_usage(self, usage, actions, groups, prefix):
71
  usage = super()._format_usage(usage, actions, groups, prefix)
72
  usage = usage.replace("<command> [<args>] ", "")
src/commands/launch.py CHANGED
@@ -22,7 +22,6 @@ class _CustomHelpAction(argparse._HelpAction):
22
  called. This is useful for the case where the user is using a specific platform and only wants to see the arguments
23
  for that platform.
24
  """
25
-
26
  def __call__(self, parser, namespace, values, option_string=None):
27
  if "accelerate" in sys.argv[0] and "launch" in sys.argv[1:]:
28
  args = sys.argv[2:]
 
22
  called. This is useful for the case where the user is using a specific platform and only wants to see the arguments
23
  for that platform.
24
  """
 
25
  def __call__(self, parser, namespace, values, option_string=None):
26
  if "accelerate" in sys.argv[0] and "launch" in sys.argv[1:]:
27
  args = sys.argv[2:]
src/data_loader.py CHANGED
@@ -36,7 +36,6 @@ class SeedableRandomSampler(RandomSampler):
36
  If a custom `generator` is passed, it will rely on its initial seed as well as the current iteration it is on
37
  (stored in `self.epoch`).
38
  """
39
-
40
  def __init__(self, *args, **kwargs):
41
  super().__init__(*args, **kwargs)
42
  self.epoch = 0
@@ -246,7 +245,6 @@ class IterableDatasetShard(IterableDataset):
246
  - the shard on process 0 to yield `[0, 1, 4, 5]` and the sampler on process 1 to yield `[2, 3, 6, 7]` if
247
  this argument is set to `True`.
248
  """
249
-
250
  def __init__(
251
  self,
252
  dataset: IterableDataset,
@@ -326,7 +324,6 @@ class DataLoaderStateMixin:
326
  batch size
327
 
328
  """
329
-
330
  def __init_subclass__(cls, **kwargs):
331
  cls.end_of_dataloader = False
332
  cls.remainder = -1
@@ -381,7 +378,6 @@ class DataLoaderShard(DataLoader, DataLoaderStateMixin):
381
 
382
  - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
383
  """
384
-
385
  def __init__(
386
  self,
387
  dataset,
@@ -481,7 +477,6 @@ if is_tpu_available(check_device=False):
481
 
482
  - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
483
  """
484
-
485
  def __init__(self, dataloader: DataLoaderShard, device: torch.device):
486
  super().__init__(dataloader, device)
487
  self._rng_types = self._loader.rng_types
@@ -530,7 +525,6 @@ class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
530
 
531
  - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
532
  """
533
-
534
  def __init__(
535
  self, dataset, split_batches: bool = False, skip_batches=0, _drop_last: bool = False, slice_fn=None, **kwargs
536
  ):
@@ -907,7 +901,6 @@ class SkipBatchSampler(BatchSampler):
907
  """
908
  A `torch.utils.data.BatchSampler` that skips the first `n` batches of another `torch.utils.data.BatchSampler`.
909
  """
910
-
911
  def __init__(self, batch_sampler, skip_batches=0):
912
  self.batch_sampler = batch_sampler
913
  self.skip_batches = skip_batches
@@ -937,7 +930,6 @@ class SkipDataLoader(DataLoader):
937
  kwargs:
938
  All other keyword arguments to pass to the regular `DataLoader` initialization.
939
  """
940
-
941
  def __init__(self, dataset, skip_batches=0, **kwargs):
942
  super().__init__(dataset, **kwargs)
943
  self.skip_batches = skip_batches
 
36
  If a custom `generator` is passed, it will rely on its initial seed as well as the current iteration it is on
37
  (stored in `self.epoch`).
38
  """
 
39
  def __init__(self, *args, **kwargs):
40
  super().__init__(*args, **kwargs)
41
  self.epoch = 0
 
245
  - the shard on process 0 to yield `[0, 1, 4, 5]` and the sampler on process 1 to yield `[2, 3, 6, 7]` if
246
  this argument is set to `True`.
247
  """
 
248
  def __init__(
249
  self,
250
  dataset: IterableDataset,
 
324
  batch size
325
 
326
  """
 
327
  def __init_subclass__(cls, **kwargs):
328
  cls.end_of_dataloader = False
329
  cls.remainder = -1
 
378
 
379
  - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
380
  """
 
381
  def __init__(
382
  self,
383
  dataset,
 
477
 
478
  - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
479
  """
 
480
  def __init__(self, dataloader: DataLoaderShard, device: torch.device):
481
  super().__init__(dataloader, device)
482
  self._rng_types = self._loader.rng_types
 
525
 
526
  - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
527
  """
 
528
  def __init__(
529
  self, dataset, split_batches: bool = False, skip_batches=0, _drop_last: bool = False, slice_fn=None, **kwargs
530
  ):
 
901
  """
902
  A `torch.utils.data.BatchSampler` that skips the first `n` batches of another `torch.utils.data.BatchSampler`.
903
  """
 
904
  def __init__(self, batch_sampler, skip_batches=0):
905
  self.batch_sampler = batch_sampler
906
  self.skip_batches = skip_batches
 
930
  kwargs:
931
  All other keyword arguments to pass to the regular `DataLoader` initialization.
932
  """
 
933
  def __init__(self, dataset, skip_batches=0, **kwargs):
934
  super().__init__(dataset, **kwargs)
935
  self.skip_batches = skip_batches
src/hooks.py CHANGED
@@ -7,7 +7,6 @@ class ModelHook:
7
  - **no_grad** (`bool`, *optional*, defaults to `False`) -- Whether or not to execute the actual forward pass under
8
  the `torch.no_grad()` context manager.
9
  """
10
-
11
  no_grad = False
12
 
13
  def init_hook(self, module):
@@ -60,7 +59,6 @@ class SequentialHook(ModelHook):
60
  """
61
  A hook that can contain several hooks and iterates through them at each event.
62
  """
63
-
64
  def __init__(self, *hooks):
65
  self.hooks = hooks
66
 
@@ -109,7 +107,6 @@ def add_hook_to_module(module: nn.Module, hook: ModelHook, append: bool = False)
109
  `torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can
110
  be discarded).
111
  """
112
-
113
  if append and (getattr(module, "_hf_hook", None) is not None):
114
  old_hook = module._hf_hook
115
  remove_hook_from_module(module)
@@ -151,7 +148,6 @@ def remove_hook_from_module(module: nn.Module, recurse=False):
151
  `torch.nn.Module`: The same module, with the hook detached (the module is modified in place, so the result can
152
  be discarded).
153
  """
154
-
155
  if hasattr(module, "_hf_hook"):
156
  module._hf_hook.detach_hook(module)
157
  delattr(module, "_hf_hook")
@@ -186,7 +182,6 @@ class AlignDevicesHook(ModelHook):
186
  place_submodules (`bool`, *optional*, defaults to `False`):
187
  Whether to place the submodules on `execution_device` during the `init_hook` event.
188
  """
189
-
190
  def __init__(
191
  self,
192
  execution_device: Optional[Union[int, str, torch.device]] = None,
@@ -539,7 +534,6 @@ class CpuOffload(ModelHook):
539
  passed, its offload method will be called just before the forward of the model to which this hook is
540
  attached.
541
  """
542
-
543
  def __init__(
544
  self,
545
  execution_device: Optional[Union[str, int, torch.device]] = None,
@@ -564,7 +558,6 @@ class UserCpuOffloadHook:
564
  A simple hook grouping a model and a `ModelHook`, which provides easy APIs for to call the init method of the hook
565
  or remove it entirely.
566
  """
567
-
568
  def __init__(self, model, hook):
569
  self.model = model
570
  self.hook = hook
 
7
  - **no_grad** (`bool`, *optional*, defaults to `False`) -- Whether or not to execute the actual forward pass under
8
  the `torch.no_grad()` context manager.
9
  """
 
10
  no_grad = False
11
 
12
  def init_hook(self, module):
 
59
  """
60
  A hook that can contain several hooks and iterates through them at each event.
61
  """
 
62
  def __init__(self, *hooks):
63
  self.hooks = hooks
64
 
 
107
  `torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can
108
  be discarded).
109
  """
 
110
  if append and (getattr(module, "_hf_hook", None) is not None):
111
  old_hook = module._hf_hook
112
  remove_hook_from_module(module)
 
148
  `torch.nn.Module`: The same module, with the hook detached (the module is modified in place, so the result can
149
  be discarded).
150
  """
 
151
  if hasattr(module, "_hf_hook"):
152
  module._hf_hook.detach_hook(module)
153
  delattr(module, "_hf_hook")
 
182
  place_submodules (`bool`, *optional*, defaults to `False`):
183
  Whether to place the submodules on `execution_device` during the `init_hook` event.
184
  """
 
185
  def __init__(
186
  self,
187
  execution_device: Optional[Union[int, str, torch.device]] = None,
 
534
  passed, its offload method will be called just before the forward of the model to which this hook is
535
  attached.
536
  """
 
537
  def __init__(
538
  self,
539
  execution_device: Optional[Union[str, int, torch.device]] = None,
 
558
  A simple hook grouping a model and a `ModelHook`, which provides easy APIs for to call the init method of the hook
559
  or remove it entirely.
560
  """
 
561
  def __init__(self, model, hook):
562
  self.model = model
563
  self.hook = hook
src/local_sgd.py CHANGED
@@ -19,7 +19,6 @@ class LocalSGD:
19
  Learning Representations. No. CONF. 2019.](https://arxiv.org/abs/1805.09767)
20
 
21
  """
22
-
23
  def __enter__(self):
24
  if self.enabled:
25
  self.model_sync_obj = self.model.no_sync()
@@ -75,7 +74,6 @@ class LocalSGD:
75
  """
76
  Synchronize + Average model parameters across all GPUs
77
  """
78
-
79
  self.accelerator.wait_for_everyone()
80
  with self.accelerator.autocast():
81
  for param in self.model.parameters():
 
19
  Learning Representations. No. CONF. 2019.](https://arxiv.org/abs/1805.09767)
20
 
21
  """
 
22
  def __enter__(self):
23
  if self.enabled:
24
  self.model_sync_obj = self.model.no_sync()
 
74
  """
75
  Synchronize + Average model parameters across all GPUs
76
  """
 
77
  self.accelerator.wait_for_everyone()
78
  with self.accelerator.autocast():
79
  for param in self.model.parameters():
src/logging.py CHANGED
@@ -7,7 +7,6 @@ class MultiProcessAdapter(logging.LoggerAdapter):
7
 
8
  Does not require an `Accelerator` object to be created first.
9
  """
10
-
11
  @staticmethod
12
  def _should_log(main_process_only):
13
  "Check if log should be performed"
 
7
 
8
  Does not require an `Accelerator` object to be created first.
9
  """
 
10
  @staticmethod
11
  def _should_log(main_process_only):
12
  "Check if log should be performed"
src/optimizer.py CHANGED
@@ -24,7 +24,6 @@ class AcceleratedOptimizer(torch.optim.Optimizer):
24
  scaler (`torch.cuda.amp.grad_scaler.GradScaler`, *optional*):
25
  The scaler to use in the step function if training with mixed precision.
26
  """
27
-
28
  def __init__(self, optimizer, device_placement=True, scaler=None):
29
  self.optimizer = optimizer
30
  self.scaler = scaler
 
24
  scaler (`torch.cuda.amp.grad_scaler.GradScaler`, *optional*):
25
  The scaler to use in the step function if training with mixed precision.
26
  """
 
27
  def __init__(self, optimizer, device_placement=True, scaler=None):
28
  self.optimizer = optimizer
29
  self.scaler = scaler
src/scheduler.py CHANGED
@@ -23,7 +23,6 @@ class AcceleratedScheduler:
23
  regardless of the number of processes) or create batches on each process (so batch size is the original
24
  batch size multiplied by the number of processes).
25
  """
26
-
27
  def __init__(self, scheduler, optimizers, step_with_optimizer: bool = True, split_batches: bool = False):
28
  self.scheduler = scheduler
29
  self.optimizers = optimizers if isinstance(optimizers, (list, tuple)) else [optimizers]
 
23
  regardless of the number of processes) or create batches on each process (so batch size is the original
24
  batch size multiplied by the number of processes).
25
  """
 
26
  def __init__(self, scheduler, optimizers, step_with_optimizer: bool = True, split_batches: bool = False):
27
  self.scheduler = scheduler
28
  self.optimizers = optimizers if isinstance(optimizers, (list, tuple)) else [optimizers]
src/state.py CHANGED
@@ -30,7 +30,6 @@ class ThreadLocalSharedDict(threading.local):
30
 
31
  See https://github.com/pytorch/xla/blob/r2.0/docs/pjrt.md#multithreading-on-tpu-v2v3
32
  """
33
-
34
  def __init__(self, thread_local: bool = False):
35
  self._storage = {}
36
 
@@ -67,7 +66,6 @@ class PartialState:
67
  - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node.
68
  - **debug** (`bool`) -- Whether or not the current script is being run in debug mode.
69
  """
70
-
71
  _shared_state = SharedDict()
72
 
73
  def __init__(self, cpu: bool = False, **kwargs):
@@ -684,7 +682,6 @@ class AcceleratorState:
684
  - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node.
685
  - **debug** (`bool`) -- Whether or not the current script is being run in debug mode.
686
  """
687
-
688
  _shared_state = SharedDict()
689
 
690
  def __init__(
@@ -946,7 +943,6 @@ class GradientState:
946
  - **sync_with_dataloader** (`bool`) -- Whether the gradients should be synced at the end of the dataloader
947
  iteration and the number of total steps reset
948
  """
949
-
950
  _shared_state = SharedDict()
951
 
952
  def __init__(self, gradient_accumulation_plugin: Optional[GradientAccumulationPlugin] = None):
 
30
 
31
  See https://github.com/pytorch/xla/blob/r2.0/docs/pjrt.md#multithreading-on-tpu-v2v3
32
  """
 
33
  def __init__(self, thread_local: bool = False):
34
  self._storage = {}
35
 
 
66
  - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node.
67
  - **debug** (`bool`) -- Whether or not the current script is being run in debug mode.
68
  """
 
69
  _shared_state = SharedDict()
70
 
71
  def __init__(self, cpu: bool = False, **kwargs):
 
682
  - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node.
683
  - **debug** (`bool`) -- Whether or not the current script is being run in debug mode.
684
  """
 
685
  _shared_state = SharedDict()
686
 
687
  def __init__(
 
943
  - **sync_with_dataloader** (`bool`) -- Whether the gradients should be synced at the end of the dataloader
944
  iteration and the number of total steps reset
945
  """
 
946
  _shared_state = SharedDict()
947
 
948
  def __init__(self, gradient_accumulation_plugin: Optional[GradientAccumulationPlugin] = None):
src/tracking.py CHANGED
@@ -37,7 +37,6 @@ def on_main_process(function):
37
  Checks at function execution rather than initialization time, not triggering the initialization of the
38
  `PartialState`.
39
  """
40
-
41
  @wraps(function)
42
  def execute_on_main_process(self, *args, **kwargs):
43
  if getattr(self, "main_process_only", False):
@@ -69,7 +68,6 @@ class GeneralTracker:
69
  Implementations can also include a `main_process_only` (`bool`) attribute to toggle if relevent logging, init, and
70
  other functions should occur on the main process or across all processes (by default will use `True`)
71
  """
72
-
73
  main_process_only = True
74
 
75
  def __init__(self, _blank=False):
@@ -139,7 +137,6 @@ class TensorBoardTracker(GeneralTracker):
139
  kwargs:
140
  Additional key word arguments passed along to the `tensorboard.SummaryWriter.__init__` method.
141
  """
142
-
143
  name = "tensorboard"
144
  requires_logging_directory = True
145
 
@@ -248,7 +245,6 @@ class WandBTracker(GeneralTracker):
248
  kwargs:
249
  Additional key word arguments passed along to the `wandb.init` method.
250
  """
251
-
252
  name = "wandb"
253
  requires_logging_directory = False
254
  main_process_only = False
@@ -373,7 +369,6 @@ class CometMLTracker(GeneralTracker):
373
  kwargs:
374
  Additional key word arguments passed along to the `Experiment.__init__` method.
375
  """
376
-
377
  name = "comet_ml"
378
  requires_logging_directory = False
379
 
@@ -452,7 +447,6 @@ class AimTracker(GeneralTracker):
452
  kwargs:
453
  Additional key word arguments passed along to the `Run.__init__` method.
454
  """
455
-
456
  name = "aim"
457
  requires_logging_directory = True
458
 
@@ -568,7 +562,6 @@ class MLflowTracker(GeneralTracker):
568
  description is set on the resumed run. If a new run is being created, the description is set on the new
569
  run.
570
  """
571
-
572
  name = "mlflow"
573
  requires_logging_directory = False
574
 
@@ -697,7 +690,6 @@ class ClearMLTracker(GeneralTracker):
697
  kwargs:
698
  Kwargs passed along to the `Task.__init__` method.
699
  """
700
-
701
  name = "clearml"
702
  requires_logging_directory = False
703
 
@@ -857,7 +849,6 @@ class DVCLiveTracker(GeneralTracker):
857
  accelerator.init_trackers(project_name="my_project", init_kwargs={"dvclive": {"dir": "my_directory"}})
858
  ```
859
  """
860
-
861
  name = "dvclive"
862
  requires_logging_directory = False
863
 
 
37
  Checks at function execution rather than initialization time, not triggering the initialization of the
38
  `PartialState`.
39
  """
 
40
  @wraps(function)
41
  def execute_on_main_process(self, *args, **kwargs):
42
  if getattr(self, "main_process_only", False):
 
68
  Implementations can also include a `main_process_only` (`bool`) attribute to toggle if relevent logging, init, and
69
  other functions should occur on the main process or across all processes (by default will use `True`)
70
  """
 
71
  main_process_only = True
72
 
73
  def __init__(self, _blank=False):
 
137
  kwargs:
138
  Additional key word arguments passed along to the `tensorboard.SummaryWriter.__init__` method.
139
  """
 
140
  name = "tensorboard"
141
  requires_logging_directory = True
142
 
 
245
  kwargs:
246
  Additional key word arguments passed along to the `wandb.init` method.
247
  """
 
248
  name = "wandb"
249
  requires_logging_directory = False
250
  main_process_only = False
 
369
  kwargs:
370
  Additional key word arguments passed along to the `Experiment.__init__` method.
371
  """
 
372
  name = "comet_ml"
373
  requires_logging_directory = False
374
 
 
447
  kwargs:
448
  Additional key word arguments passed along to the `Run.__init__` method.
449
  """
 
450
  name = "aim"
451
  requires_logging_directory = True
452
 
 
562
  description is set on the resumed run. If a new run is being created, the description is set on the new
563
  run.
564
  """
 
565
  name = "mlflow"
566
  requires_logging_directory = False
567
 
 
690
  kwargs:
691
  Kwargs passed along to the `Task.__init__` method.
692
  """
 
693
  name = "clearml"
694
  requires_logging_directory = False
695
 
 
849
  accelerator.init_trackers(project_name="my_project", init_kwargs={"dvclive": {"dir": "my_directory"}})
850
  ```
851
  """
 
852
  name = "dvclive"
853
  requires_logging_directory = False
854
 
src/utils/bnb.py CHANGED
@@ -44,7 +44,6 @@ def load_and_quantize_model(
44
  Returns:
45
  `torch.nn.Module`: The quantized model
46
  """
47
-
48
  load_in_4bit = bnb_quantization_config.load_in_4bit
49
  load_in_8bit = bnb_quantization_config.load_in_8bit
50
 
@@ -246,7 +245,6 @@ def replace_with_bnb_layers(model, bnb_quantization_config, modules_to_not_conve
246
  An array to track the current key of the recursion. This is used to check whether the current key (part of
247
  it) is not in the list of modules to not convert.
248
  """
249
-
250
  if modules_to_not_convert is None:
251
  modules_to_not_convert = []
252
 
 
44
  Returns:
45
  `torch.nn.Module`: The quantized model
46
  """
 
47
  load_in_4bit = bnb_quantization_config.load_in_4bit
48
  load_in_8bit = bnb_quantization_config.load_in_8bit
49
 
 
245
  An array to track the current key of the recursion. This is used to check whether the current key (part of
246
  it) is not in the list of modules to not convert.
247
  """
 
248
  if modules_to_not_convert is None:
249
  modules_to_not_convert = []
250
 
src/utils/dataclasses.py CHANGED
@@ -5,7 +5,6 @@ class KwargsHandler:
5
  """
6
  Internal mixin that implements a `to_kwargs()` method for a dataclass.
7
  """
8
-
9
  def to_dict(self):
10
  return copy.deepcopy(self.__dict__)
11
 
@@ -39,7 +38,6 @@ class AutocastKwargs(KwargsHandler):
39
  accelerator = Accelerator(kwargs_handlers=[kwargs])
40
  ```
41
  """
42
-
43
  enabled: bool = True
44
  cache_enabled: bool = None
45
 
@@ -70,7 +68,6 @@ class DistributedDataParallelKwargs(KwargsHandler):
70
  accelerator = Accelerator(kwargs_handlers=[kwargs])
71
  ```
72
  """
73
-
74
  dim: int = 0
75
  broadcast_buffers: bool = True
76
  bucket_cap_mb: int = 25
@@ -103,7 +100,6 @@ class GradScalerKwargs(KwargsHandler):
103
  accelerator = Accelerator(kwargs_handlers=[kwargs])
104
  ```
105
  """
106
-
107
  init_scale: float = 65536.0
108
  growth_factor: float = 2.0
109
  backoff_factor: float = 0.5
@@ -128,7 +124,6 @@ class InitProcessGroupKwargs(KwargsHandler):
128
  accelerator = Accelerator(kwargs_handlers=[kwargs])
129
  ```
130
  """
131
-
132
  backend: Optional[str] = "nccl"
133
  init_method: Optional[str] = None
134
  timeout: timedelta = timedelta(seconds=1800)
@@ -197,7 +192,6 @@ class FP8RecipeKwargs(KwargsHandler):
197
  are stored in FP8. If `fp8` is selected and deepspeed is enabled, will be used by default. (Not
198
  available currently).
199
  """
200
-
201
  backend: Backend = "msamp"
202
  opt_level: OptLevel = "O2"
203
  margin: int = 0
@@ -260,7 +254,6 @@ class DistributedType(str, enum.Enum):
260
  - **DEEPSPEED** -- Using DeepSpeed.
261
  - **TPU** -- Distributed on TPUs.
262
  """
263
-
264
  # Subclassing str as well as Enum allows the `DistributedType` to be JSON-serializable out of the box.
265
  NO = "NO"
266
  MULTI_CPU = "MULTI_CPU"
@@ -283,7 +276,6 @@ class SageMakerDistributedType(str, enum.Enum):
283
  - **DATA_PARALLEL** -- using sagemaker distributed data parallelism.
284
  - **MODEL_PARALLEL** -- using sagemaker distributed model parallelism.
285
  """
286
-
287
  # Subclassing str as well as Enum allows the `SageMakerDistributedType` to be JSON-serializable out of the box.
288
  NO = "NO"
289
  DATA_PARALLEL = "DATA_PARALLEL"
@@ -299,7 +291,6 @@ class ComputeEnvironment(str, enum.Enum):
299
  - **LOCAL_MACHINE** -- private/custom cluster hardware.
300
  - **AMAZON_SAGEMAKER** -- Amazon SageMaker as compute environment.
301
  """
302
-
303
  # Subclassing str as well as Enum allows the `ComputeEnvironment` to be JSON-serializable out of the box.
304
  LOCAL_MACHINE = "LOCAL_MACHINE"
305
  AMAZON_SAGEMAKER = "AMAZON_SAGEMAKER"
@@ -336,7 +327,6 @@ class DynamoBackend(str, BaseEnum):
336
  - **TVM** -- Uses Apach TVM for inference optimizations. [Read more](https://tvm.apache.org/)
337
 
338
  """
339
-
340
  # Subclassing str as well as Enum allows the `SageMakerDistributedType` to be JSON-serializable out of the box.
341
  NO = "NO"
342
  EAGER = "EAGER"
@@ -364,7 +354,6 @@ class LoggerType(BaseEnum):
364
  - **COMETML** -- comet_ml as an experiment tracker
365
  - **DVCLIVE** -- dvclive as an experiment tracker
366
  """
367
-
368
  ALL = "all"
369
  AIM = "aim"
370
  TENSORBOARD = "tensorboard"
@@ -384,7 +373,6 @@ class PrecisionType(BaseEnum):
384
  - **FP16** -- using half precision
385
  - **BF16** -- using brain floating point precision
386
  """
387
-
388
  NO = "no"
389
  FP8 = "fp8"
390
  FP16 = "fp16"
@@ -404,7 +392,6 @@ class CustomDtype(enum.Enum):
404
  r"""
405
  An enum that contains multiple custom dtypes that can be used for `infer_auto_device_map`.
406
  """
407
-
408
  FP8 = "fp8"
409
  INT4 = "int4"
410
 
@@ -423,7 +410,6 @@ class ProjectConfiguration:
423
  """
424
  Configuration for the Accelerator object based on inner-project needs.
425
  """
426
-
427
  project_dir: str = field(default=None, metadata={"help": "A path to a directory for storing data."})
428
  logging_dir: str = field(
429
  default=None,
@@ -471,7 +457,6 @@ class GradientAccumulationPlugin(KwargsHandler):
471
  """
472
  A plugin to configure gradient accumulation behavior.
473
  """
474
-
475
  num_steps: int = field(default=None, metadata={"help": "The number of steps to accumulate gradients for."})
476
  adjust_scheduler: bool = field(
477
  default=True,
@@ -492,7 +477,6 @@ class TorchDynamoPlugin(KwargsHandler):
492
  """
493
  This plugin is used to compile a model with PyTorch 2.0
494
  """
495
-
496
  backend: DynamoBackend = field(
497
  default=None,
498
  metadata={"help": f"Possible options are {[b.value.lower() for b in DynamoBackend]}"},
@@ -528,7 +512,6 @@ class DeepSpeedPlugin:
528
  """
529
  This plugin is used to integrate DeepSpeed.
530
  """
531
-
532
  hf_ds_config: Any = field(
533
  default=None,
534
  metadata={
@@ -828,7 +811,6 @@ class FullyShardedDataParallelPlugin:
828
  """
829
  This plugin is used to enable fully sharded data parallelism.
830
  """
831
-
832
  sharding_strategy: "typing.Any" = field(
833
  default=None,
834
  metadata={
@@ -1062,7 +1044,6 @@ class MegatronLMPlugin:
1062
  Plugin for Megatron-LM to enable tensor, pipeline, sequence and data parallelism. Also to enable selective
1063
  activation recomputation and optimized fused kernels.
1064
  """
1065
-
1066
  tp_degree: int = field(default=None, metadata={"help": "tensor parallelism degree."})
1067
  pp_degree: int = field(default=None, metadata={"help": "pipeline parallelism degree."})
1068
  num_micro_batches: int = field(default=None, metadata={"help": "number of micro-batches."})
@@ -1436,7 +1417,6 @@ class BnbQuantizationConfig:
1436
  """
1437
  A plugin to enable BitsAndBytes 4bit and 8bit quantization
1438
  """
1439
-
1440
  load_in_8bit: bool = field(default=False, metadata={"help": "enable 8bit quantization."})
1441
 
1442
  llm_int8_threshold: float = field(
 
5
  """
6
  Internal mixin that implements a `to_kwargs()` method for a dataclass.
7
  """
 
8
  def to_dict(self):
9
  return copy.deepcopy(self.__dict__)
10
 
 
38
  accelerator = Accelerator(kwargs_handlers=[kwargs])
39
  ```
40
  """
 
41
  enabled: bool = True
42
  cache_enabled: bool = None
43
 
 
68
  accelerator = Accelerator(kwargs_handlers=[kwargs])
69
  ```
70
  """
 
71
  dim: int = 0
72
  broadcast_buffers: bool = True
73
  bucket_cap_mb: int = 25
 
100
  accelerator = Accelerator(kwargs_handlers=[kwargs])
101
  ```
102
  """
 
103
  init_scale: float = 65536.0
104
  growth_factor: float = 2.0
105
  backoff_factor: float = 0.5
 
124
  accelerator = Accelerator(kwargs_handlers=[kwargs])
125
  ```
126
  """
 
127
  backend: Optional[str] = "nccl"
128
  init_method: Optional[str] = None
129
  timeout: timedelta = timedelta(seconds=1800)
 
192
  are stored in FP8. If `fp8` is selected and deepspeed is enabled, will be used by default. (Not
193
  available currently).
194
  """
 
195
  backend: Backend = "msamp"
196
  opt_level: OptLevel = "O2"
197
  margin: int = 0
 
254
  - **DEEPSPEED** -- Using DeepSpeed.
255
  - **TPU** -- Distributed on TPUs.
256
  """
 
257
  # Subclassing str as well as Enum allows the `DistributedType` to be JSON-serializable out of the box.
258
  NO = "NO"
259
  MULTI_CPU = "MULTI_CPU"
 
276
  - **DATA_PARALLEL** -- using sagemaker distributed data parallelism.
277
  - **MODEL_PARALLEL** -- using sagemaker distributed model parallelism.
278
  """
 
279
  # Subclassing str as well as Enum allows the `SageMakerDistributedType` to be JSON-serializable out of the box.
280
  NO = "NO"
281
  DATA_PARALLEL = "DATA_PARALLEL"
 
291
  - **LOCAL_MACHINE** -- private/custom cluster hardware.
292
  - **AMAZON_SAGEMAKER** -- Amazon SageMaker as compute environment.
293
  """
 
294
  # Subclassing str as well as Enum allows the `ComputeEnvironment` to be JSON-serializable out of the box.
295
  LOCAL_MACHINE = "LOCAL_MACHINE"
296
  AMAZON_SAGEMAKER = "AMAZON_SAGEMAKER"
 
327
  - **TVM** -- Uses Apach TVM for inference optimizations. [Read more](https://tvm.apache.org/)
328
 
329
  """
 
330
  # Subclassing str as well as Enum allows the `SageMakerDistributedType` to be JSON-serializable out of the box.
331
  NO = "NO"
332
  EAGER = "EAGER"
 
354
  - **COMETML** -- comet_ml as an experiment tracker
355
  - **DVCLIVE** -- dvclive as an experiment tracker
356
  """
 
357
  ALL = "all"
358
  AIM = "aim"
359
  TENSORBOARD = "tensorboard"
 
373
  - **FP16** -- using half precision
374
  - **BF16** -- using brain floating point precision
375
  """
 
376
  NO = "no"
377
  FP8 = "fp8"
378
  FP16 = "fp16"
 
392
  r"""
393
  An enum that contains multiple custom dtypes that can be used for `infer_auto_device_map`.
394
  """
 
395
  FP8 = "fp8"
396
  INT4 = "int4"
397
 
 
410
  """
411
  Configuration for the Accelerator object based on inner-project needs.
412
  """
 
413
  project_dir: str = field(default=None, metadata={"help": "A path to a directory for storing data."})
414
  logging_dir: str = field(
415
  default=None,
 
457
  """
458
  A plugin to configure gradient accumulation behavior.
459
  """
 
460
  num_steps: int = field(default=None, metadata={"help": "The number of steps to accumulate gradients for."})
461
  adjust_scheduler: bool = field(
462
  default=True,
 
477
  """
478
  This plugin is used to compile a model with PyTorch 2.0
479
  """
 
480
  backend: DynamoBackend = field(
481
  default=None,
482
  metadata={"help": f"Possible options are {[b.value.lower() for b in DynamoBackend]}"},
 
512
  """
513
  This plugin is used to integrate DeepSpeed.
514
  """
 
515
  hf_ds_config: Any = field(
516
  default=None,
517
  metadata={
 
811
  """
812
  This plugin is used to enable fully sharded data parallelism.
813
  """
 
814
  sharding_strategy: "typing.Any" = field(
815
  default=None,
816
  metadata={
 
1044
  Plugin for Megatron-LM to enable tensor, pipeline, sequence and data parallelism. Also to enable selective
1045
  activation recomputation and optimized fused kernels.
1046
  """
 
1047
  tp_degree: int = field(default=None, metadata={"help": "tensor parallelism degree."})
1048
  pp_degree: int = field(default=None, metadata={"help": "pipeline parallelism degree."})
1049
  num_micro_batches: int = field(default=None, metadata={"help": "number of micro-batches."})
 
1417
  """
1418
  A plugin to enable BitsAndBytes 4bit and 8bit quantization
1419
  """
 
1420
  load_in_8bit: bool = field(default=False, metadata={"help": "enable 8bit quantization."})
1421
 
1422
  llm_int8_threshold: float = field(
src/utils/deepspeed.py CHANGED
@@ -14,7 +14,6 @@ class HfDeepSpeedConfig:
14
  config_file_or_dict (`Union[str, Dict]`): path to DeepSpeed config file or dict.
15
 
16
  """
17
-
18
  def __init__(self, config_file_or_dict):
19
  if isinstance(config_file_or_dict, dict):
20
  # Don't modify user's data should they want to reuse it (e.g. in tests), because once we
@@ -134,7 +133,6 @@ class DeepSpeedEngineWrapper:
134
  Args:
135
  engine (deepspeed.runtime.engine.DeepSpeedEngine): deepspeed engine to wrap
136
  """
137
-
138
  def __init__(self, engine):
139
  self.engine = engine
140
 
@@ -163,7 +161,6 @@ class DeepSpeedOptimizerWrapper(AcceleratedOptimizer):
163
  optimizer (`torch.optim.optimizer.Optimizer`):
164
  The optimizer to wrap.
165
  """
166
-
167
  def __init__(self, optimizer):
168
  super().__init__(optimizer, device_placement=False, scaler=None)
169
  self.__has_overflow__ = hasattr(self.optimizer, "overflow")
@@ -191,7 +188,6 @@ class DeepSpeedSchedulerWrapper(AcceleratedScheduler):
191
  The scheduler to wrap.
192
  optimizers (one or a list of `torch.optim.Optimizer`):
193
  """
194
-
195
  def __init__(self, scheduler, optimizers):
196
  super().__init__(scheduler, optimizers)
197
 
@@ -214,7 +210,6 @@ class DummyOptim:
214
  **kwargs:
215
  Other arguments.
216
  """
217
-
218
  def __init__(self, params, lr=0.001, weight_decay=0, **kwargs):
219
  self.params = params
220
  self.lr = lr
@@ -239,7 +234,6 @@ class DummyScheduler:
239
  **kwargs:
240
  Other arguments.
241
  """
242
-
243
  def __init__(self, optimizer, total_num_steps=None, warmup_num_steps=0, lr_scheduler_callable=None, **kwargs):
244
  self.optimizer = optimizer
245
  self.total_num_steps = total_num_steps
 
14
  config_file_or_dict (`Union[str, Dict]`): path to DeepSpeed config file or dict.
15
 
16
  """
 
17
  def __init__(self, config_file_or_dict):
18
  if isinstance(config_file_or_dict, dict):
19
  # Don't modify user's data should they want to reuse it (e.g. in tests), because once we
 
133
  Args:
134
  engine (deepspeed.runtime.engine.DeepSpeedEngine): deepspeed engine to wrap
135
  """
 
136
  def __init__(self, engine):
137
  self.engine = engine
138
 
 
161
  optimizer (`torch.optim.optimizer.Optimizer`):
162
  The optimizer to wrap.
163
  """
 
164
  def __init__(self, optimizer):
165
  super().__init__(optimizer, device_placement=False, scaler=None)
166
  self.__has_overflow__ = hasattr(self.optimizer, "overflow")
 
188
  The scheduler to wrap.
189
  optimizers (one or a list of `torch.optim.Optimizer`):
190
  """
 
191
  def __init__(self, scheduler, optimizers):
192
  super().__init__(scheduler, optimizers)
193
 
 
210
  **kwargs:
211
  Other arguments.
212
  """
 
213
  def __init__(self, params, lr=0.001, weight_decay=0, **kwargs):
214
  self.params = params
215
  self.lr = lr
 
234
  **kwargs:
235
  Other arguments.
236
  """
 
237
  def __init__(self, optimizer, total_num_steps=None, warmup_num_steps=0, lr_scheduler_callable=None, **kwargs):
238
  self.optimizer = optimizer
239
  self.total_num_steps = total_num_steps
src/utils/launch.py CHANGED
@@ -502,7 +502,6 @@ class PrepareForLaunch:
502
  debug (`bool`, *optional*, defaults to `False`):
503
  Whether or not this is a debug launch.
504
  """
505
-
506
  def __init__(self, launcher, distributed_type="NO", debug=False):
507
  self.launcher = launcher
508
  self.distributed_type = DistributedType(distributed_type)
 
502
  debug (`bool`, *optional*, defaults to `False`):
503
  Whether or not this is a debug launch.
504
  """
 
505
  def __init__(self, launcher, distributed_type="NO", debug=False):
506
  self.launcher = launcher
507
  self.distributed_type = DistributedType(distributed_type)
src/utils/megatron_lm.py CHANGED
@@ -68,7 +68,6 @@ class MegatronLMDummyDataLoader:
68
  Args:
69
  **dataset_kwargs: Megatron data arguments.
70
  """
71
-
72
  def __init__(self, **dataset_kwargs):
73
  parser = argparse.ArgumentParser()
74
  parser = _add_data_args(parser)
@@ -346,7 +345,6 @@ class MegatronLMDummyScheduler:
346
  **kwargs:
347
  Other arguments.
348
  """
349
-
350
  def __init__(self, optimizer, total_num_steps=None, warmup_num_steps=0, **kwargs):
351
  self.optimizer = optimizer
352
  self.total_num_steps = total_num_steps
@@ -392,7 +390,6 @@ class BertTrainStep(AbstractTrainStep):
392
  Args:
393
  args (`argparse.Namespace`): Megatron-LM arguments.
394
  """
395
-
396
  def __init__(self, args):
397
  super().__init__("BertTrainStep")
398
  self.get_batch = self.get_batch_func(args.megatron_dataset_flag)
@@ -521,7 +518,6 @@ class GPTTrainStep(AbstractTrainStep):
521
  Args:
522
  args (`argparse.Namespace`): Megatron-LM arguments.
523
  """
524
-
525
  def __init__(self, args):
526
  super().__init__("GPTTrainStep")
527
  self.get_batch = self.get_batch_func(args.megatron_dataset_flag)
@@ -627,7 +623,6 @@ class T5TrainStep(AbstractTrainStep):
627
  Args:
628
  args (`argparse.Namespace`): Megatron-LM arguments.
629
  """
630
-
631
  def __init__(self, args):
632
  super().__init__("T5TrainStep")
633
  self.get_batch = self.get_batch_func(args.megatron_dataset_flag)
@@ -846,7 +841,6 @@ class MegatronEngine(torch.nn.Module):
846
  optimizer: Megatron-LM optimizer
847
  lr_scheduler: Megatron-LM lr scheduler
848
  """
849
-
850
  def __init__(self, accelerator, model, optimizer, scheduler):
851
  super(MegatronEngine, self).__init__()
852
  self.module = model
@@ -892,7 +886,6 @@ class MegatronEngine(torch.nn.Module):
892
  Args:
893
  batch_data (:obj:`dict`): The batch data to train on.
894
  """
895
-
896
  args = get_args()
897
  timers = get_timers()
898
 
@@ -993,7 +986,6 @@ class MegatronEngine(torch.nn.Module):
993
  Args:
994
  batch_data (:obj:`dict`): The batch data to evaluate on.
995
  """
996
-
997
  args = get_args()
998
  data_chunks = []
999
  if args.num_micro_batches > 1:
@@ -1176,7 +1168,6 @@ class MegatronEngine(torch.nn.Module):
1176
  length_penalty (float, optional): length penalty for beam search. Defaults to None.
1177
  kwargs: additional key-value arguments
1178
  """
1179
-
1180
  # checking if required arguments are passed
1181
  args = get_args()
1182
  if args.model_type_name != "gpt":
@@ -1332,7 +1323,6 @@ def avg_losses_across_data_parallel_group(losses):
1332
  Args:
1333
  losses (List[Tensor]): List of losses to average across data parallel group.
1334
  """
1335
-
1336
  return average_losses_across_data_parallel_group(losses)
1337
 
1338
 
@@ -1345,7 +1335,6 @@ def gather_across_data_parallel_groups(tensor):
1345
  The data to gather across data parallel ranks.
1346
 
1347
  """
1348
-
1349
  def _gpu_gather_one(tensor):
1350
  if tensor.ndim == 0:
1351
  tensor = tensor.clone()[None]
 
68
  Args:
69
  **dataset_kwargs: Megatron data arguments.
70
  """
 
71
  def __init__(self, **dataset_kwargs):
72
  parser = argparse.ArgumentParser()
73
  parser = _add_data_args(parser)
 
345
  **kwargs:
346
  Other arguments.
347
  """
 
348
  def __init__(self, optimizer, total_num_steps=None, warmup_num_steps=0, **kwargs):
349
  self.optimizer = optimizer
350
  self.total_num_steps = total_num_steps
 
390
  Args:
391
  args (`argparse.Namespace`): Megatron-LM arguments.
392
  """
 
393
  def __init__(self, args):
394
  super().__init__("BertTrainStep")
395
  self.get_batch = self.get_batch_func(args.megatron_dataset_flag)
 
518
  Args:
519
  args (`argparse.Namespace`): Megatron-LM arguments.
520
  """
 
521
  def __init__(self, args):
522
  super().__init__("GPTTrainStep")
523
  self.get_batch = self.get_batch_func(args.megatron_dataset_flag)
 
623
  Args:
624
  args (`argparse.Namespace`): Megatron-LM arguments.
625
  """
 
626
  def __init__(self, args):
627
  super().__init__("T5TrainStep")
628
  self.get_batch = self.get_batch_func(args.megatron_dataset_flag)
 
841
  optimizer: Megatron-LM optimizer
842
  lr_scheduler: Megatron-LM lr scheduler
843
  """
 
844
  def __init__(self, accelerator, model, optimizer, scheduler):
845
  super(MegatronEngine, self).__init__()
846
  self.module = model
 
886
  Args:
887
  batch_data (:obj:`dict`): The batch data to train on.
888
  """
 
889
  args = get_args()
890
  timers = get_timers()
891
 
 
986
  Args:
987
  batch_data (:obj:`dict`): The batch data to evaluate on.
988
  """
 
989
  args = get_args()
990
  data_chunks = []
991
  if args.num_micro_batches > 1:
 
1168
  length_penalty (float, optional): length penalty for beam search. Defaults to None.
1169
  kwargs: additional key-value arguments
1170
  """
 
1171
  # checking if required arguments are passed
1172
  args = get_args()
1173
  if args.model_type_name != "gpt":
 
1323
  Args:
1324
  losses (List[Tensor]): List of losses to average across data parallel group.
1325
  """
 
1326
  return average_losses_across_data_parallel_group(losses)
1327
 
1328
 
 
1335
  The data to gather across data parallel ranks.
1336
 
1337
  """
 
1338
  def _gpu_gather_one(tensor):
1339
  if tensor.ndim == 0:
1340
  tensor = tensor.clone()[None]
src/utils/modeling.py CHANGED
@@ -395,7 +395,6 @@ def get_non_persistent_buffers(module: nn.Module, recurse: bool = False):
395
  recurse (`bool`, *optional*, defaults to `False`):
396
  Whether or not to go look in every submodule or just return the direct non persistent buffers.
397
  """
398
-
399
  non_persistent_buffers_set = module._non_persistent_buffers_set
400
  if recurse:
401
  for _, m in module.named_modules():
@@ -409,7 +408,6 @@ class FindTiedParametersResult(list):
409
  This is a subclass of a list to handle backward compatibility for Transformers. Do not rely on the fact this is not
410
  a list or on the `values` method as in the future this will be removed.
411
  """
412
-
413
  def __init__(self, *args, **kwargs):
414
  super().__init__(*args, **kwargs)
415
 
@@ -428,7 +426,6 @@ def check_tied_parameters_in_config(model: nn.Module):
428
  Returns:
429
  bool: True if the model needs to have tied weights
430
  """
431
-
432
  # based on model.tie_weights() method
433
  has_tied_word_embedding = False
434
  has_tied_encoder_decoder = False
 
395
  recurse (`bool`, *optional*, defaults to `False`):
396
  Whether or not to go look in every submodule or just return the direct non persistent buffers.
397
  """
 
398
  non_persistent_buffers_set = module._non_persistent_buffers_set
399
  if recurse:
400
  for _, m in module.named_modules():
 
408
  This is a subclass of a list to handle backward compatibility for Transformers. Do not rely on the fact this is not
409
  a list or on the `values` method as in the future this will be removed.
410
  """
 
411
  def __init__(self, *args, **kwargs):
412
  super().__init__(*args, **kwargs)
413
 
 
426
  Returns:
427
  bool: True if the model needs to have tied weights
428
  """
 
429
  # based on model.tie_weights() method
430
  has_tied_word_embedding = False
431
  has_tied_encoder_decoder = False
src/utils/offload.py CHANGED
@@ -85,7 +85,6 @@ class PrefixedDataset(Mapping):
85
  dataset (`Mapping`): Any map with string keys.
86
  prefix (`str`): A prefix to add when trying to access any element in the underlying dataset.
87
  """
88
-
89
  def __init__(self, dataset: Mapping, prefix: str):
90
  self.dataset = dataset
91
  self.prefix = prefix
@@ -113,7 +112,6 @@ class OffloadedWeightsLoader(Mapping):
113
  A dictionary from weight name to their information (`dtype`/ `shape` or safetensors filename). Will default
114
  to the index saved in `save_folder`.
115
  """
116
-
117
  def __init__(
118
  self,
119
  state_dict: Dict[str, torch.Tensor] = None,
 
85
  dataset (`Mapping`): Any map with string keys.
86
  prefix (`str`): A prefix to add when trying to access any element in the underlying dataset.
87
  """
 
88
  def __init__(self, dataset: Mapping, prefix: str):
89
  self.dataset = dataset
90
  self.prefix = prefix
 
112
  A dictionary from weight name to their information (`dtype`/ `shape` or safetensors filename). Will default
113
  to the index saved in `save_folder`.
114
  """
 
115
  def __init__(
116
  self,
117
  state_dict: Dict[str, torch.Tensor] = None,
src/utils/operations.py CHANGED
@@ -150,7 +150,6 @@ def get_data_structure(data):
150
  Returns:
151
  The same data structure as `data` with [`~utils.TensorInformation`] instead of tensors.
152
  """
153
-
154
  def _get_data_structure(tensor):
155
  return TensorInformation(shape=tensor.shape, dtype=tensor.dtype)
156
 
@@ -168,7 +167,6 @@ def get_shape(data):
168
  Returns:
169
  The same data structure as `data` with lists of tensor shapes instead of tensors.
170
  """
171
-
172
  def _get_shape(tensor):
173
  return list(tensor.shape)
174
 
@@ -182,7 +180,6 @@ def initialize_tensors(data_structure):
182
  Returns:
183
  The same data structure as `data` with tensors instead of [`~utils.TensorInformation`].
184
  """
185
-
186
  def _initialize_tensor(tensor_info):
187
  return torch.empty(*tensor_info.shape, dtype=tensor_info.dtype)
188
 
@@ -222,7 +219,6 @@ def listify(data):
222
  Returns:
223
  The same data structure as `data` with lists of numbers instead of `torch.Tensor`.
224
  """
225
-
226
  def _convert_to_list(tensor):
227
  tensor = tensor.detach().cpu()
228
  if tensor.dtype == torch.bfloat16:
@@ -293,7 +289,6 @@ class DistributedOperationException(Exception):
293
  An exception class for distributed operations. Raised if the operation cannot be performed due to the shape of the
294
  tensors.
295
  """
296
-
297
  pass
298
 
299
 
@@ -301,7 +296,6 @@ def verify_operation(function):
301
  """
302
  Verifies that `tensor` is the same shape across all processes. Only ran if `PartialState().debug` is `True`.
303
  """
304
-
305
  @wraps(function)
306
  def wrapper(*args, **kwargs):
307
  if PartialState().distributed_type == DistributedType.NO or not PartialState().debug:
@@ -337,7 +331,6 @@ def chained_operation(function):
337
  Checks that `verify_operation` failed and if so reports a more helpful error chaining the existing
338
  `DistributedOperationException`.
339
  """
340
-
341
  @wraps(function)
342
  def wrapper(*args, **kwargs):
343
  try:
@@ -469,7 +462,6 @@ def slice_tensors(data, tensor_slice, process_index=None, num_processes=None):
469
  Returns:
470
  The same data structure as `data` with all the tensors slices.
471
  """
472
-
473
  def _slice_tensor(tensor, tensor_slice):
474
  return tensor[tensor_slice]
475
 
@@ -518,7 +510,6 @@ def pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):
518
  pad_first (`bool`, *optional*, defaults to `False`):
519
  Whether to pad at the beginning or the end.
520
  """
521
-
522
  def _pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):
523
  if getattr(tensor, "is_nested", False):
524
  warnings.warn(
@@ -572,7 +563,6 @@ def reduce(tensor, reduction="mean", scale=1.0):
572
  Returns:
573
  The same data structure as `data` with all the tensors reduced.
574
  """
575
-
576
  def _reduce_across_processes(tensor, reduction="mean", scale=1.0):
577
  state = PartialState()
578
  cloned_tensor = tensor.clone()
@@ -602,7 +592,6 @@ def convert_to_fp32(tensor):
602
  Returns:
603
  The same data structure as `tensor` with all tensors that were in FP16/BF16 precision converted to FP32.
604
  """
605
-
606
  def _convert_to_fp32(tensor):
607
  return tensor.float()
608
 
@@ -624,7 +613,6 @@ class ConvertOutputsToFp32:
624
  Returns:
625
  The same function as `model_forward` but with converted outputs.
626
  """
627
-
628
  def __init__(self, model_forward):
629
  self.model_forward = model_forward
630
  update_wrapper(self, model_forward)
 
150
  Returns:
151
  The same data structure as `data` with [`~utils.TensorInformation`] instead of tensors.
152
  """
 
153
  def _get_data_structure(tensor):
154
  return TensorInformation(shape=tensor.shape, dtype=tensor.dtype)
155
 
 
167
  Returns:
168
  The same data structure as `data` with lists of tensor shapes instead of tensors.
169
  """
 
170
  def _get_shape(tensor):
171
  return list(tensor.shape)
172
 
 
180
  Returns:
181
  The same data structure as `data` with tensors instead of [`~utils.TensorInformation`].
182
  """
 
183
  def _initialize_tensor(tensor_info):
184
  return torch.empty(*tensor_info.shape, dtype=tensor_info.dtype)
185
 
 
219
  Returns:
220
  The same data structure as `data` with lists of numbers instead of `torch.Tensor`.
221
  """
 
222
  def _convert_to_list(tensor):
223
  tensor = tensor.detach().cpu()
224
  if tensor.dtype == torch.bfloat16:
 
289
  An exception class for distributed operations. Raised if the operation cannot be performed due to the shape of the
290
  tensors.
291
  """
 
292
  pass
293
 
294
 
 
296
  """
297
  Verifies that `tensor` is the same shape across all processes. Only ran if `PartialState().debug` is `True`.
298
  """
 
299
  @wraps(function)
300
  def wrapper(*args, **kwargs):
301
  if PartialState().distributed_type == DistributedType.NO or not PartialState().debug:
 
331
  Checks that `verify_operation` failed and if so reports a more helpful error chaining the existing
332
  `DistributedOperationException`.
333
  """
 
334
  @wraps(function)
335
  def wrapper(*args, **kwargs):
336
  try:
 
462
  Returns:
463
  The same data structure as `data` with all the tensors slices.
464
  """
 
465
  def _slice_tensor(tensor, tensor_slice):
466
  return tensor[tensor_slice]
467
 
 
510
  pad_first (`bool`, *optional*, defaults to `False`):
511
  Whether to pad at the beginning or the end.
512
  """
 
513
  def _pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):
514
  if getattr(tensor, "is_nested", False):
515
  warnings.warn(
 
563
  Returns:
564
  The same data structure as `data` with all the tensors reduced.
565
  """
 
566
  def _reduce_across_processes(tensor, reduction="mean", scale=1.0):
567
  state = PartialState()
568
  cloned_tensor = tensor.clone()
 
592
  Returns:
593
  The same data structure as `tensor` with all tensors that were in FP16/BF16 precision converted to FP32.
594
  """
 
595
  def _convert_to_fp32(tensor):
596
  return tensor.float()
597
 
 
613
  Returns:
614
  The same function as `model_forward` but with converted outputs.
615
  """
 
616
  def __init__(self, model_forward):
617
  self.model_forward = model_forward
618
  update_wrapper(self, model_forward)