diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..ed8ebf583f771da9150c35db3955987b7d757904 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +__pycache__ \ No newline at end of file diff --git a/config_mistral.py b/config_mistral.py new file mode 100644 index 0000000000000000000000000000000000000000..7680cfdcf0ce2691486f8c34d0dbab77bd0f4ae2 --- /dev/null +++ b/config_mistral.py @@ -0,0 +1,70 @@ +""" Example python script to generate a YAML config file which can be used to run a training with nanotron. Refer to "examples" section in the `/README.md` for more information. + +Usage: +``` +python config_tiny_mistral.py +``` +""" +import os +from dataclasses import dataclass +from typing import Optional + +from nanotron.config import ( + CheckpointsArgs, + Config, + DataArgs, + GeneralArgs, + LoggingArgs, + LRSchedulerArgs, + ModelArgs, + OptimizerArgs, + ParallelismArgs, + PretrainDatasetsArgs, + RandomInit, + TokenizerArgs, + TokensArgs, +) +from nanotron.logging import human_format + + +@dataclass +class MistralConfig: + """Configuration for a MISTRAL model + + Be careful on having a coherent typing as we use it to reconstruct the model from yaml + """ + + attn_pdrop: float = 0.0 + bos_token_id: int = 1 + eos_token_id: int = 2 + hidden_act: str = "silu" + hidden_size: int = 4096 + initializer_range: float = 0.02 + intermediate_size: int = 14336 + is_mistral_config: bool = True # We use this help differentiate models in yaml/python conversion + max_position_embeddings: int = 32768 + num_attention_heads: int = 32 + num_hidden_layers: int = 32 + num_key_value_heads: Optional[int] = 8 + pad_token_id: Optional[int] = None + pretraining_tp: int = 1 + rms_norm_eps: float = 1e-05 + rope_theta: float = 10000.0 + sliding_window_size: int = 4096 + tie_word_embeddings: bool = False + use_cache: bool = True + vocab_size: int = 32000 + + def __post_init__(self): + # for backward compatibility + if self.num_key_value_heads is None: + self.num_key_value_heads = self.num_attention_heads + +def get_num_params(model_config: MistralConfig) -> int: + num_params = model_config.vocab_size * model_config.hidden_size * 2 + \ + model_config.num_hidden_layers * ( + 3 * model_config.hidden_size * model_config.intermediate_size + + 2 * model_config.hidden_size * model_config.hidden_size + + 2 * model_config.hidden_size * (model_config.hidden_size / (model_config.num_attention_heads / model_config.num_key_value_heads)) + ) + return num_params diff --git a/config_mistral_7b.py b/config_mistral_7b.py new file mode 100644 index 0000000000000000000000000000000000000000..6ca6cbc0e2e4d4c50c7df9107bd175f47082be1c --- /dev/null +++ b/config_mistral_7b.py @@ -0,0 +1,88 @@ +""" Example python script to generate a YAML config file which can be used to run a training with nanotron. Refer to "examples" section in the `/README.md` for more information. + +Usage: +``` +python config_tiny_mistral.py +``` +""" +import os +from dataclasses import dataclass +from typing import Optional + +from nanotron.config import ( + CheckpointsArgs, + Config, + DataArgs, + GeneralArgs, + LoggingArgs, + LRSchedulerArgs, + ModelArgs, + OptimizerArgs, + ParallelismArgs, + PretrainDatasetsArgs, + RandomInit, + TokenizerArgs, + TokensArgs, +) +from nanotron.logging import human_format + +from config_mistral import MistralConfig, get_num_params + + +MODEL_CONFIG = MistralConfig( + # Config for Mistral 7B + attn_pdrop=0.0, + bos_token_id=1, + eos_token_id=2, + hidden_act="silu", + hidden_size=4096, + initializer_range=0.02, + intermediate_size=14336, + max_position_embeddings=32768, + num_attention_heads=32, + num_hidden_layers=32, + num_key_value_heads=8, + pretraining_tp=1, + rms_norm_eps=1e-05, + rope_theta=10000.0, + sliding_window_size=4096, + tie_word_embeddings=False, + use_cache=True, + vocab_size=32000, +) + +num_params = human_format(get_num_params(MODEL_CONFIG)).replace(".", "p") + +print(f"Model has {num_params} parameters") + +PARALLELISM = ParallelismArgs( + dp=2, + pp=2, + tp=2, + pp_engine="1f1b", + tp_mode="REDUCE_SCATTER", + tp_linear_async_communication=True, + recompute_granularity="selective", +) + +CONFIG = Config( + general=GeneralArgs(project="mistralai", run="Mistral-7B-v0.1", seed=42), + checkpoints=None, + parallelism=PARALLELISM, + model=ModelArgs(init_method=RandomInit(std=0.025), model_config=MODEL_CONFIG), + tokenizer=TokenizerArgs("mistralai/Mistral-7B-v0.1"), + optimizer=None, + logging=None, + tokens=None, + data=None, + profiler=None, +) + +if __name__ == "__main__": + file_path = os.path.abspath(__file__) + + file_path = file_path.replace(".py", ".yaml") + # Save config as YAML file + config.save_as_yaml(file_path) + + # You can now train a model with this config using `/run_train.py` diff --git a/config_mistral_7b.yaml b/config_mistral_7b.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2a609591b98bbab1f79b01369a4256b34b310d43 --- /dev/null +++ b/config_mistral_7b.yaml @@ -0,0 +1,53 @@ +checkpoints: null +data: null +general: + benchmark_csv_path: null + consumed_train_samples: null + ignore_sanity_checks: false + project: mistralai + run: Mistral-7B-v0.1 + seed: 42 + step: null +logging: null +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.025 + make_vocab_size_divisible_by: 1 + model_config: + attn_pdrop: 0.0 + bos_token_id: 1 + eos_token_id: 2 + hidden_act: silu + hidden_size: 4096 + initializer_range: 0.02 + intermediate_size: 14336 + is_mistral_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 32 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_theta: 10000.0 + sliding_window_size: 4096 + tie_word_embeddings: false + use_cache: true + vocab_size: 32000 +optimizer: null +parallelism: + dp: 2 + pp: 2 + pp_engine: 1f1b + recompute_granularity: SELECTIVE + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: mistralai/Mistral-7B-v0.1 + tokenizer_revision: null +tokens: null diff --git a/config_tiny_mistral.py b/config_tiny_mistral.py index 6ae36172be0e5da1b5e61852c03ab0f3304f1cb2..ad5d8c21329d27a6baef3deecf55e79a3e87ee1b 100644 --- a/config_tiny_mistral.py +++ b/config_tiny_mistral.py @@ -26,41 +26,12 @@ from nanotron.config import ( ) from nanotron.logging import human_format - -@dataclass -class MistralConfig: - """Configuration for a MISTRAL model - - Be careful on having a coherent typing as we use it to reconstruct the model from yaml - """ - - bos_token_id: int = 1 - eos_token_id: int = 2 - hidden_act: str = "silu" - hidden_size: int = 4096 - initializer_range: float = 0.02 - intermediate_size: int = 11008 - is_mistral_config: bool = True # We use this help differentiate models in yaml/python conversion - max_position_embeddings: int = 2048 - num_attention_heads: int = 32 - num_hidden_layers: int = 32 - num_key_value_heads: Optional[int] = None - pad_token_id: Optional[int] = None - pretraining_tp: int = 1 - rms_norm_eps: float = 1e-6 - rope_scaling: Optional[dict] = None - tie_word_embeddings: bool = False - use_cache: bool = True - vocab_size: int = 32000 - - def __post_init__(self): - # for backward compatibility - if self.num_key_value_heads is None: - self.num_key_value_heads = self.num_attention_heads +from config_mistral import MistralConfig, get_num_params model_config = MistralConfig( # Config for a tiny model model with 1.62M parameters + attn_pdrop=0.0, bos_token_id=1, eos_token_id=2, hidden_act="silu", @@ -73,20 +44,13 @@ model_config = MistralConfig( num_key_value_heads=4, pretraining_tp=1, rms_norm_eps=1e-05, - rope_scaling=None, + rope_theta=10000.0, tie_word_embeddings=True, use_cache=True, vocab_size=256, ) -num_params = human_format( - model_config.vocab_size * model_config.hidden_size * 2 - + model_config.num_hidden_layers - * ( - 3 * model_config.hidden_size * model_config.intermediate_size - + 4 * model_config.hidden_size * model_config.hidden_size - ) -).replace(".", "p") +num_params = human_format(get_num_params(model_config)).replace(".", "p") print(f"Model has {num_params} parameters") @@ -141,9 +105,10 @@ config = Config( ) if __name__ == "__main__": - dir = os.path.dirname(__file__) + file_path = os.path.abspath(__file__) + file_path = file_path.replace(".py", ".yaml") # Save config as YAML file - config.save_as_yaml(f"{dir}/config_tiny_mistral.yaml") + config.save_as_yaml(file_path) # You can now train a model with this config using `/run_train.py` diff --git a/config_tiny_mistral.yaml b/config_tiny_mistral.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cecc9a4dd5b8f986cfe7b0dd390ca287f1f4cbc6 --- /dev/null +++ b/config_tiny_mistral.yaml @@ -0,0 +1,92 @@ +checkpoints: + checkpoint_interval: 10 + checkpoints_path: /fsx/thomwolf/github/textbooks-proj/brrr/models/checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data: + dataset: + dataset_overwrite_cache: false + dataset_processing_num_proc_per_process: 1 + hf_dataset_config_name: null + hf_dataset_or_datasets: HuggingFaceH4/testing_alpaca_small + hf_dataset_splits: train + text_column_name: completion + num_loading_workers: 1 + seed: 42 +general: + benchmark_csv_path: null + consumed_train_samples: null + ignore_sanity_checks: false + project: debug + run: tiny_mistral + seed: 42 + step: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.025 + make_vocab_size_divisible_by: 1 + model_config: + attn_pdrop: 0.0 + bos_token_id: 1 + eos_token_id: 2 + hidden_act: silu + hidden_size: 16 + initializer_range: 0.02 + intermediate_size: 64 + is_mistral_config: true + max_position_embeddings: 256 + num_attention_heads: 4 + num_hidden_layers: 2 + num_key_value_heads: 4 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_theta: 10000.0 + sliding_window_size: 4096 + tie_word_embeddings: true + use_cache: true + vocab_size: 256 +optimizer: + accumulate_grad_in_fp32: true + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_steps: 8 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 2 + pp: 2 + pp_engine: 1f1b + recompute_granularity: SELECTIVE + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: gpt2 + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 32 + train_steps: 10 + val_check_interval: -1 diff --git a/convert_trfrs_to_brrr.py b/convert_trfrs_to_brrr.py new file mode 100644 index 0000000000000000000000000000000000000000..6f85587d00416f271f93104e0f1a6aedb8b9a4a4 --- /dev/null +++ b/convert_trfrs_to_brrr.py @@ -0,0 +1,262 @@ +# ruff: noqa: E402 +""" +This module converts a transformers LlamaForCausalLM to a brrr model + +Command: +torchrun --nproc_per_node=1 convert_trfrs_to_brrr.py \ + --model_name mistralai/Mistral-7B-v0.1 \ + --save_path ./pretrained/Mistral-7B-v0.1 +""" +import argparse +import sys +from dataclasses import asdict +from pathlib import Path +from typing import Dict, List + +import torch + +from brrr.trainer import DistributedTrainer + +sys.path.append(Path(__file__).parent.parent.as_posix()) +import os + +from nanotron.parallel.parameters import NanotronParameter, sanity_check +from nanotron.parallel.pipeline_parallel.engine import ( + AllForwardAllBackwardPipelineEngine, +) +from nanotron.parallel.tensor_parallel.nn import TensorParallelLinearMode +from transformers import MistralConfig as MistralConfig_trfs, MistralForCausalLM + +import nanotron.distributed as dist +from nanotron.config import ParallelismArgs, RecomputeGranularity +from nanotron.parallel.context import ParallelContext +from nanotron.models import build_model +from nanotron.trainer import mark_tied_parameters +from nanotron.serialize import save_meta, save_weights, save + +from modeling_mistral import MistralForTraining +from config_mistral_7b import PARALLELISM as PARALLELISM_BRRR, CONFIG as CONFIG_BRRR + + +def get_args(): + parser = argparse.ArgumentParser(description="Convert transformers weights to brrr weights") + parser.add_argument("--model_name", type=str, default="mistralai/Mistral-7B-v0.1") + parser.add_argument("--save_path", type=str, default="pretrained/Mistral-7B-v0.1") + parser.add_argument("--dp", type=int, default=1) + parser.add_argument("--pp", type=int, default=1) + parser.add_argument("--tp", type=int, default=1) + return parser.parse_args() + + +def permute_for_rotary(tensor, num_heads, per_head_hidden_size, hidden_size): + return ( + tensor.view(num_heads, 2, per_head_hidden_size // 2, hidden_size) + .transpose(1, 2) + .contiguous() + .view(num_heads * per_head_hidden_size, hidden_size) + ) + + +def get_transformers_weight( + name: str, ref_module_state_dict: Dict[str, torch.Tensor], ref_module: MistralForCausalLM, get_grad: bool = False +) -> torch.Tensor: + """From our brrr implementation, we get the equivalent tensor in transformers implementation""" + config = ref_module.config + brrr_prefix = "model." + assert name.startswith(brrr_prefix) + name = name[len(brrr_prefix) :] + + path = name.split(".") + path.remove("pp_block") + name = ".".join(path) + + if get_grad is False: + + def get_tensor(path: str): + return ref_module_state_dict[path] + + def get_tensors(path: List[str]): + return [get_tensor(p) for p in path] + + else: + + def get_tensor(path: str): + weight = ref_module.get_parameter(path) + return weight.grad + + def get_tensors(path: List[str]): + return [get_tensor(p) for p in path] + + if name == "token_position_embeddings.token_embedding.weight": + return get_tensor("model.embed_tokens.weight") + + elif name == "lm_head.weight": + # This only used when weights are not shared + return get_tensor("lm_head.weight") + + elif name == "final_layer_norm.weight": + return get_tensor("model.norm.weight") + + if path[0] == "decoder": + transformer_path = ["model"] + ["layers"] + [path[1]] + + if path[2] == "attn": + path[2] = "self_attn" + + if path[2] == "ff": + path[2] = "mlp" + + if path[3] == "qkv_proj": + proj_names = ["q_proj", "k_proj", "v_proj"] + tensor_list = get_tensors( + [".".join(transformer_path + path[2:3] + [proj_name] + path[4:]) for proj_name in proj_names] + ) + # Permute q/k + per_head_hidden_size = config.hidden_size // config.num_attention_heads + # Permute q + print(f"Permuting q {tensor_list[0].shape}") + tensor_list[0] = permute_for_rotary( + tensor=tensor_list[0], + num_heads=config.num_attention_heads, + per_head_hidden_size=per_head_hidden_size, + hidden_size=config.hidden_size, + ) + # Permute k + print(f"Permuting k {tensor_list[1].shape}") + tensor_list[1] = permute_for_rotary( + tensor=tensor_list[1], + num_heads=config.num_key_value_heads, + per_head_hidden_size=per_head_hidden_size, + hidden_size=config.hidden_size, + ) + return torch.cat(tensor_list, dim=0) + + if path[3] == "gate_up_proj": + tensor_list = get_tensors( + [ + ".".join(transformer_path + path[2:3] + [proj_name] + path[4:]) + for proj_name in ["gate_proj", "up_proj"] + ] + ) + return torch.cat(tensor_list, dim=0) + + return get_tensor(".".join(transformer_path + path[2:])) + + else: + raise ValueError(f"Couldn't find transformer equivalent of {name}") + + +def convert_trfrs_to_brrr(dp, pp, tp, model_name="huggyllama/llama-7b", save_path="pretrained/llama-7b"): + # check save_path doesnt exist or is empty + save_path = Path(save_path) + # assert not save_path.exists() or len(list(save_path.iterdir())) == 0, f"save_path {save_path} is not empty" + + parallel_config = PARALLELISM_BRRR + + parallel_config.dp = dp + parallel_config.pp = pp + parallel_config.tp = tp + + # Initialise all process groups + parallel_context = ParallelContext( + data_parallel_size=parallel_config.dp, + pipeline_parallel_size=parallel_config.pp, + tensor_parallel_size=parallel_config.tp, + ) + # params + dtype = torch.bfloat16 # Flash attention doesn't support fp32 + + # Initialise brrr model + model_config_brrr = CONFIG_BRRR.model.model_config + + model = build_model( + model_builder=lambda: MistralForTraining( + config=model_config_brrr, + parallel_context=parallel_context, + parallel_config=parallel_config, + random_states=None, + ), + dtype=dtype, + parallel_context=parallel_context, + device=torch.device("cpu"), + ) + + # Initialise transformers model + device_map = {} + current_pp_rank = dist.get_rank(group=parallel_context.pp_pg) + device_map["model.embed_tokens"] = ( + model.model.token_position_embeddings.rank + if current_pp_rank == model.model.token_position_embeddings.rank + else "meta" + ) + for i in range(model_config_brrr.num_hidden_layers): + device_map[f"model.layers.{i}"] = ( + model.model.decoder[i].rank if current_pp_rank == model.model.decoder[i].rank else "meta" + ) + device_map["model.norm"] = ( + model.model.final_layer_norm.rank if current_pp_rank == model.model.final_layer_norm.rank else "meta" + ) + device_map["lm_head"] = model.model.lm_head.rank if current_pp_rank == model.model.lm_head.rank else "meta" + model_ref = MistralForCausalLM.from_pretrained(model_name, torch_dtype=dtype, device_map=device_map) + + # Copy weights from trfrs to brrr + ref_state_dict = model_ref.state_dict() + for name, param in model.named_parameters(): + print(f"Syncing {name}") + ref_param = get_transformers_weight(name=name, ref_module_state_dict=ref_state_dict, ref_module=model_ref) + + param_is_tp_sharded = ( + isinstance(param, NanotronParameter) + and param.is_sharded + and parallel_context.world_ranks_to_pg[param.get_sharded_info().global_ranks] == parallel_context.tp_pg + ) + + if param_is_tp_sharded: + sharded_info = param.get_sharded_info() + # copy param data (not just the reference) + with torch.no_grad(): + for local_global_slices_pair in sharded_info.local_global_slices_pairs: + local_slices = local_global_slices_pair.local_slices + global_slices = local_global_slices_pair.global_slices + param[local_slices].copy_(ref_param[global_slices]) + else: + assert ( + ref_param.shape == param.shape + ), f"Parameter shape don't match for {name}\n{ref_param.shape} != {param.shape}" + # copy param data (not just the reference) + with torch.no_grad(): + param.copy_(ref_param) + ref_param = None + # torch.cuda.empty_cache() + + # TODO @nouamanetazi: assert weights are the same + # Marks parameters as NanotronParameters + mark_tied_parameters(model=model, parallel_context=parallel_context, parallel_config=parallel_config) + + sanity_check(root_module=model) + + checkpoint_metadata = { + "last_train_step": 0, + "consumed_train_samples": 0, + } + save(config=CONFIG_BRRR, model=model, optimizer=None, lr_scheduler=None, parallel_context=parallel_context, root_folder=save_path, + should_save_optimizer=False, should_save_lr_scheduler=False, checkpoint_metadata=checkpoint_metadata, + sanity_checks=False) + # save_weights(model=model, parallel_context=parallel_context, root_folder=save_path) + # save_meta(root_folder=save_path, parallel_context=parallel_context, checkpoint_metadata=checkpoint_metadata) + + if dist.get_rank(parallel_context.world_pg) == 0: + print(save_path) + import json + + with open(save_path / "model_config.json", mode="w") as fo: + fo.write(json.dumps(asdict(CONFIG_BRRR.model.model_config), indent=4)) + + +def main(): + args = get_args() + convert_trfrs_to_brrr(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/modeling_mistral.py b/modeling_mistral.py index 81a1722736c91097fe9c02ae9f549ce667cbdb1f..47cf8f497d8bceda44026d55d1430b6540d10b8d 100644 --- a/modeling_mistral.py +++ b/modeling_mistral.py @@ -15,6 +15,7 @@ """ PyTorch Mistral model. """ from typing import Dict, Optional, Union +import inspect import torch from flash_attn import bert_padding @@ -46,12 +47,15 @@ from nanotron.parallel.tensor_parallel.nn import ( ) from nanotron.random import RandomStates from nanotron.utils import checkpoint_method +from nanotron.nn.activations import ACT2FN from torch import nn -from transformers import MistralConfig -from transformers.activations import ACT2FN + +from config_mistral_7b import MistralConfig logger = logging.get_logger(__name__) +_flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_varlen_func).parameters) + class RotaryEmbedding(nn.Module): def __init__(self, dim: int, end: int, theta: float = 10000.0): @@ -189,15 +193,22 @@ class CoreAttention(nn.Module): ), f"Hidden size {config.hidden_size} must be divisible by number of attention heads {config.num_attention_heads}." self.d_qk = config.hidden_size // config.num_attention_heads self.d_v = config.hidden_size // config.num_attention_heads + self.dropout = config.attn_pdrop self.checkpoint_attention = False # Because flash_attn already does checkpointing + if config.sliding_window_size is not None: + assert ( + _flash_supports_window_size + ), "Current version of flash-attn doesn't support sliding window: `pip install flash-attn>=2.3`" + self.sliding_window_size = config.sliding_window_size # if layer_idx not in config.global_attn_layers else None + @checkpoint_method(attr_name="checkpoint_attention") def forward( self, - query_states: torch.Tensor, # [batch_size * q_length, n_local_q_heads, inner_dim] - key_states: torch.Tensor, # [batch_size * kv_length, n_local_kv_heads, inner_dim] - value_states: torch.Tensor, # [batch_size * kv_length, n_local_kv_heads, inner_dim] + query_states: torch.Tensor, # [batch_size * q_length, num_heads, inner_dim] + key_states: torch.Tensor, # [batch_size * kv_length, 1, inner_dim] + value_states: torch.Tensor, # [batch_size * kv_length, 1, inner_dim] q_sequence_mask: torch.Tensor, # torch.BoolTensor [batch_size, q_length] (can be broadcasted to that size) kv_sequence_mask: torch.Tensor, # torch.BoolTensor [batch_size, kv_length] (can be broadcasted to that size) ): @@ -218,9 +229,10 @@ class CoreAttention(nn.Module): cu_seqlens_k=cu_seqlens_k, max_seqlen_q=q_sequence_mask.shape[1], max_seqlen_k=kv_sequence_mask.shape[1], - dropout_p=0.0, - softmax_scale=None, # This already defaults to the scale I'm interested in + dropout_p=self.dropout if self.training else 0.0, + softmax_scale=None, # defaults to 1/sqrt(d_qk) causal=causal, + window_size=(self.sliding_window_size - 1, 0) if self.sliding_window_size is not None else (-1, -1), return_attn_probs=False, ) @@ -318,10 +330,11 @@ class CausalSelfAttention(nn.Module, AttachableStore): self.rotary_embedding = RotaryEmbedding( dim=self.d_qk, end=config.max_position_embeddings, + theta=config.rope_theta ) # NOTE: Only supported for training (TODO(fmom): position_ids not supported yet) - self.flash_rotary_embedding = FlashRotaryEmbedding(dim=self.d_qk, interleaved=True) + self.flash_rotary_embedding = FlashRotaryEmbedding(dim=self.d_qk, base=config.rope_theta, interleaved=True) self.o_proj = TensorParallelRowLinear( config.num_attention_heads * self.d_qk, @@ -852,7 +865,6 @@ class MistralForTraining(NanotronModel): super().__init__() import warnings - warnings.warn("This is just a Llama Model, not a Mistral one for demo purpose. Please fix implementation") self.model = MistralModel(config=config, parallel_context=parallel_context, parallel_config=parallel_config) self.loss = PipelineBlock( p2p=self.model.p2p, @@ -1044,12 +1056,13 @@ def get_flops( num_layers, hidden_size, num_heads, - num_key_value_heads, vocab_size, seq_len, - ffn_hidden_size, + kv_channels=None, + ffn_hidden_size=None, batch_size=1, recompute_granularity=None, + glu_activation=False, ): """Counts flops in an decoder-only model Args: @@ -1066,33 +1079,43 @@ def get_flops( model_flops: flops in the model (should be independent of the hardware and model implementation) hardware_flops: flops in the hardware (actual flops performed on the hardware). Check 6.3 in https://arxiv.org/pdf/2205.05198.pdf """ - if num_key_value_heads is None: - num_key_value_heads = num_heads - hidden_size_per_head = hidden_size // num_heads + if kv_channels is None: + assert hidden_size % num_heads == 0 + kv_channels = hidden_size // num_heads + if ffn_hidden_size is None: + ffn_hidden_size = 4 * hidden_size + # In the following we mark the reduced dimension with parentheses # decoder - # self attention - ## qkv projection - decoder_qkv_proj_flops_fwd = ( - 2 * num_layers * batch_size * seq_len * (hidden_size) * num_heads * hidden_size_per_head - + 2 * num_layers * batch_size * seq_len * (hidden_size) * 2 * num_key_value_heads * hidden_size_per_head - ) + # self attention (MQA) + ## q projection + decoder_q_proj_flops_fwd = 2 * num_layers * batch_size * seq_len * (hidden_size) * num_heads * kv_channels + ## kv projection, shared across heads + decoder_kv_proj_flops_fwd = 2 * num_layers * batch_size * seq_len * (hidden_size) * 2 * kv_channels ## qk logits - decoder_qk_logits_flops_fwd = 2 * num_layers * batch_size * num_heads * seq_len * (hidden_size_per_head) * seq_len + decoder_qk_logits_flops_fwd = 2 * num_layers * batch_size * num_heads * seq_len * (kv_channels) * seq_len + ### SWA (sliding window attention / local attention) + # window_size = 4096 + # decoder_qk_logits_flops_fwd = 2 * num_layers * batch_size * num_heads * seq_len * (kv_channels) * window_size ## v logits - decoder_v_logits_flops_fwd = 2 * num_layers * batch_size * num_heads * seq_len * (seq_len) * hidden_size_per_head + decoder_v_logits_flops_fwd = 2 * num_layers * batch_size * num_heads * seq_len * (seq_len) * kv_channels + # decoder_v_logits_flops_fwd = 2 * num_layers * batch_size * num_heads * seq_len * (window_size) * kv_channels ## attn out - decoder_attn_out_flops_fwd = ( - 2 * num_layers * batch_size * num_heads * seq_len * (hidden_size_per_head) * hidden_size - ) + decoder_attn_out_flops_fwd = 2 * num_layers * batch_size * num_heads * seq_len * (kv_channels) * hidden_size # FF ## 1st layer - decoder_ffn_1_flops_fwd = 4 * num_layers * batch_size * seq_len * (hidden_size) * ffn_hidden_size + decoder_ffn_1_flops_fwd = 2 * num_layers * batch_size * seq_len * (hidden_size) * ffn_hidden_size + if glu_activation: + # 3 matmuls instead of 2 in FFN + # ref. https://arxiv.org/pdf/2002.05202.pdf + # Used for example in T5 v1.1 + decoder_ffn_1_flops_fwd = 4 * num_layers * batch_size * seq_len * (hidden_size) * ffn_hidden_size ## 2nd layer decoder_ffn_2_flops_fwd = 2 * num_layers * batch_size * seq_len * (ffn_hidden_size) * hidden_size decoder_flops_fwd = ( - decoder_qkv_proj_flops_fwd + decoder_q_proj_flops_fwd + + decoder_kv_proj_flops_fwd + decoder_qk_logits_flops_fwd + decoder_v_logits_flops_fwd + decoder_attn_out_flops_fwd diff --git a/pretrained/Mistral-7B-v0.1/checkpoint_metadata.json b/pretrained/Mistral-7B-v0.1/checkpoint_metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..d4edf002aa5cc3e4ac7b7258643ee9f8d008787a --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/checkpoint_metadata.json @@ -0,0 +1,9 @@ +{ + "dp": 1, + "metas": { + "consumed_train_samples": 0, + "last_train_step": 0 + }, + "tp": 1, + "version": "1.2" +} \ No newline at end of file diff --git a/pretrained/Mistral-7B-v0.1/config.yaml b/pretrained/Mistral-7B-v0.1/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7bf564cf9dbef5e422d3811ada197fedf7c1a265 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/config.yaml @@ -0,0 +1,53 @@ +checkpoints: null +data: null +general: + benchmark_csv_path: null + consumed_train_samples: null + ignore_sanity_checks: false + project: mistralai + run: Mistral-7B-v0.1 + seed: 42 + step: null +logging: null +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.025 + make_vocab_size_divisible_by: 1 + model_config: + attn_pdrop: 0.0 + bos_token_id: 1 + eos_token_id: 2 + hidden_act: silu + hidden_size: 4096 + initializer_range: 0.02 + intermediate_size: 14336 + is_mistral_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 32 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_theta: 10000.0 + sliding_window_size: 4096 + tie_word_embeddings: false + use_cache: true + vocab_size: 32000 +optimizer: null +parallelism: + dp: 1 + pp: 1 + pp_engine: 1f1b + recompute_granularity: SELECTIVE + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: mistralai/Mistral-7B-v0.1 + tokenizer_revision: null +tokens: null diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/0/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/0/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..0092f5ff3d9c6a793d2bb84fa0d93e5ff02882ef --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/0/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e6ef1be3d2daa611724f02567159bf507c9a9ea276d9771387a01f4942cafb6 +size 33554672 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/0/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/0/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..e5b8eba9df1b4948aa4655817b9a2957747c6864 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/0/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3d4484e1f9505c97b6ac37472d9526e95470e6aef462fec6ae461b63e4ff77a +size 50332000 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/0/pp_block/input_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/0/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..e1626f6b09e658bf52ce6956ac29037f64260b6c --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/0/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c286c58dfce1f3f030c13b90c41c831d05c4323da3d50e23fe434f38f81535b +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/0/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/0/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..35e4e3290ee409366891269a21b8ccb59282cabb --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/0/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d69d412c683fc926a26a71536dd2530877994cfa6e4e9ae3f3a31f6861596b0 +size 117440752 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/0/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/0/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..f46fc12020967a6395c1218326ef60e7f4fc8846 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/0/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6148707c761016f313ee2ada71e15b9eb977878148fa382806eea4ef30a145e6 +size 234881328 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/0/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/0/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..4b2780b600026fa5ce53e8384490dc8d16ceef75 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/0/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05588e50e8fafc16c332e2f7a3d3830c9e59d29c35858d439a98ba4e418eba78 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/1/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/1/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..bec5b920bbfb2c6aaf7e40c51f9b80c47260ee9a --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/1/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cffeb63cbb1f271fd7ab983b618dfe4a4fc2b6b3763b9332fc324d378207210d +size 33554672 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/1/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/1/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..a4e5f389760f7400d3191adf4f5108c042dd921f --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/1/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a1263d75614c4a9710ebc5a57fdec732b9348c1f57ace1887ce296e1805b529 +size 50332000 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/1/pp_block/input_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/1/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..35b78926598a7278835cd45c62d2847c7b502c48 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/1/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71cd7738481e6bcbacbc76ce206545fb2fe6d995f7e1a733b408c3fe92f7356c +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/1/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/1/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..9c3f6449adfa1507993b7f3ed4f62f11a38d051e --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/1/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31de05f9c50d9e94fe67936a973c86840f82ed2aad1494806baa81df8bbf9bf8 +size 117440752 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/1/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/1/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..48b65e18580469292b5cba31b89e496ac988cc30 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/1/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70761ee840fbdc950501814ff397c72e9d8bbc7be2030329f391c12eb5b73a0f +size 234881328 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/1/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/1/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..cb58764a6c1f551e6580cd5b18ffd357a46a7e3f --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/1/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1846ddc1c4ca9d8e03184f2fa34911398202f0edc310df5ea408a323a5f23ee8 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/10/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/10/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..7d67fc2fa8c6bd600e9c3495126f7eb190ee3ba9 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/10/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59d82cc1e5ec1f364e7401e17e58d53f62a39658799aeb4902060236ebb0cb60 +size 33554672 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/10/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/10/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..97859e7e3e11483eed8714c63edc6267f3bb323b --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/10/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:117c7832cefd9a767282b27c852f00ed4ce7888a8abb7e2f9257a0b2fed60608 +size 50332000 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/10/pp_block/input_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/10/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..37774a5aa57a4b59a5964856c81ffe867ef44d7d --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/10/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c93dc35263c0efa22d22795777c009e4f9365cf1ef413b69880d14433d1069e8 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/10/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/10/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..9993f9cd7558c4df20fe41313925a1f4caea640a --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/10/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad3fefd66e98df8fee62bd0fe451b18ca1a14545b72e570d499dce0464368b81 +size 117440752 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/10/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/10/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..a88e624248febf4949828164b720b87f00548920 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/10/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6346f7c23987a4c529ac1b63b5f6f56b4392981ffcaaf2cb84cf2bf5b2bc36a7 +size 234881328 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/10/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/10/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..0c895f2bbce007e00d328e40873ec42ae95919d0 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/10/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6cd70662e84b3d81b4f4512929d00d9377515c2dfe75d78109edce27c57d834 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/11/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/11/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..ddf59b513234e8a0d662aac57e651d6a578d92f0 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/11/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c17ee146b384be81a4b9cb06960728dd540d6650a5798abcc95315bb0daf2ca +size 33554672 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/11/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/11/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..a5a9e9715752e3b2775e6d7b74670987291401f2 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/11/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:497bbea1882c42d134dc4458194d71cd3d7d609b06e91e715093e0c243962116 +size 50332000 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/11/pp_block/input_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/11/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..7639bd3404065e0ecf57cfaeb97555c31efd6ef2 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/11/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7fbc23c909758daf76a1d647d1beefb4c3cc62a4aa04f98679e22d79cc6813e +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/11/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/11/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..b676f47d5a5a91d8d01be99f917d372ecfedea93 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/11/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef4f8de0f891e6d79255af98deda246f04c0775694835f696a1a8b0738f492da +size 117440752 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/11/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/11/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..8fb65cc71e0a5177f081693cb232a57719dfd0ea --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/11/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:844adcbf23618ae38fbffaf92f7d38ce7d853be5643485bb25f8f839c0f2819c +size 234881328 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/11/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/11/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..6e05ab51486a47427f6d3b817b16c5811d413d76 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/11/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1823cbba03a4ec4683cc6a41eab51e34cec90e92cea7af388d0c675abe451284 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/12/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/12/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..8c854fe7b6727e29c8918fc94a937a25a629dc01 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/12/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9da7a13da9a196108d2efd875884aa8629533e8143255eef5915417ac592d9c0 +size 33554672 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/12/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/12/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..464502b2c7d802b41eec37e8c8a2b84268bf81a7 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/12/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31c8fb0c121f6977e10c7277544259157152d28de9559c8aae8236925398329f +size 50332000 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/12/pp_block/input_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/12/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..4add2369d072cc73ea1ac997e0278788b821881c --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/12/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90fd4f646b1f5ca201781cc77b713093ab9a67d4ee8de11c669a486a2896d773 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/12/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/12/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..5aff91b3cef88fd005863affdf691f9732c69df6 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/12/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:368a3142cb9f085a2da1db74de226b13c509467cbea81da25f27db8842347443 +size 117440752 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/12/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/12/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..a4cb5af0ed9cfa7ba10a059646a4876477ac0f2d --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/12/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48e00b1107d1e575c2425fa8368e92eb714b59825153206ae4cccc36eb4e8e45 +size 234881328 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/12/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/12/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..913b6fa0c1523ec518dd5793de54373b947eaaef --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/12/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c3300f1d0af46ca69fc14397728055e302b2955b8b9adfd9705b68a683377b1 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/13/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/13/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..0ea63aa24b2146b6fac66bff869676287e387032 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/13/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0d2409179997ff51079156414cb112c82b964976a8023f5088b1dd7ab28f50c +size 33554672 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/13/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/13/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..fac1e8df4e41c7c457d3d8f2b93f6652a05d77d6 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/13/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c51efd448a50b2c75046bfb12f2703ce19e56b07f4f9e94f7a36f673c70517b8 +size 50332000 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/13/pp_block/input_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/13/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..1056885a65e12a39683e3dc80bd32d398ea998db --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/13/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe220fd602b0f41f30f7bca607c400adacadb7b5e31f81a28d7e103fd5c0b0a8 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/13/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/13/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..40c4e763d0a54d17434aab17209a83b2244ebcbd --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/13/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa8d2abd973723ddb4ea4cb2188fa767ea55168bc2519e170e728619fde864c4 +size 117440752 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/13/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/13/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..4bf3a313c5cb8ee5a3a785e902b6223fde9adf09 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/13/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aeeb6ae12119f5ffd390add932122f819d81d50886389567eb41103451b36d24 +size 234881328 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/13/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/13/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..63532d3fdb990eaab244ec23c180c3f14ae8184f --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/13/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55a66303f00f120db3fba8ae4d13725fc2c22cd6fd3babc1d66dc0fdee7eb45f +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/14/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/14/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..eb95eb885a043cc716ad9521436fb2a2f433b99d --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/14/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9fe17a044a248163b4c45783386f7d414e6217ae9657c8983d54a84e85aae8e +size 33554672 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/14/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/14/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..08a574df339a0884c01214b757f22f814a4ba8d8 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/14/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b727021f1c6625dae8fb240904bf838985311afe7b5e19f62839c3563072e75a +size 50332000 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/14/pp_block/input_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/14/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..5a07c74cd2f27b0fba0ecb5cb35c7790124bf010 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/14/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5f069023e761eacf5747a540516a9215c49e3efc8614ffaa7fa4ca016c67075 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/14/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/14/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..2310ec6b4b327509eb172f9feda4f6c603d4462e --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/14/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:993eebcd3451163de4981bd6f7cd82a2bc0bfcc29a67cbbedceb502a5036466c +size 117440752 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/14/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/14/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..819843fb3afed50ff8057fb9efb33f9fbd29969a --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/14/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e67a1615f7ce698881c0004e8c3e5b53047f9b5b0fa20177eacae18ba987dc0 +size 234881328 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/14/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/14/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..a19ea58ffa94b198b028fc33946a4490f11a1f22 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/14/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6be257153ad0813d89cc8ee6ff0e143f6fbef6b88c1ad05ef1186a067ca05387 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/15/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/15/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..50e4821774e5f49c33aa956cb94cae2c9f3bb0f2 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/15/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:446b67a5ec0b17927becf4cd09b77ca93266e7d12b58296c7c7416b587260669 +size 33554672 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/15/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/15/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..8b80f03a8fd36b3cfffca5a5a25e66b8b5e9d6a1 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/15/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf128d866a6dcc3e289e55ec1b22b361a23b87c8392b53160b14c74567cac379 +size 50332000 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/15/pp_block/input_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/15/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..e30c08b3ecdfd325aba7d4ce3cc2e0851a2570ce --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/15/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2b52886ec5486046d20bb073fdb217e6eeaec4a3e813b92f68da66118f3cf79 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/15/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/15/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..d443e145bb5f492f5cd1c6204324ba1d74d7617b --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/15/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9aa99fcebbd40fdc29d513928e92c19239806807e2a52dc524e9f959b7e6f42e +size 117440752 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/15/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/15/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..232513da3cd8567db93839f1ba4f88bb755906e1 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/15/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0c7c40d0bb0ae6c292f955b7e9fb3247a3ccb8b5273daa160ad6b37acef2fab +size 234881328 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/15/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/15/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..dec5ff37905478d3bae43a9332039d51da7a3e51 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/15/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:052369bbe0e78833b2056ff5302249d0a6b9829a0df56bca9eeff60acbea25b8 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/16/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/16/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..f52e80be869bed40238464ed76f49ddf6b87e20f --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/16/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38093e66017347d7d2d1b37ea60d87a4c484f55b518dc9e16303af3cb7da9103 +size 33554672 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/16/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/16/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..bf9908b3cdbc93e766d057a116e7f9c9d45c1e5d --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/16/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:228c4edfa3a4c52d1e0f1899debc70c770f6203702ea7fd58daec344321a3674 +size 50332000 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/16/pp_block/input_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/16/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..1660620de4a3cebae5bd87573976ef21f9008f52 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/16/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57a32b2bbc2f4a18d80c63f0ad9c9925a5f05d0ee9a1de953ca172ec5282b4fe +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/16/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/16/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..216dc29bf0581f2ea7e55bb8db8f5f7f89a881ee --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/16/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08b8befa0aa2c890088189ff7e9b2f9370cd3a9ffef7103065fe6dce102f055b +size 117440752 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/16/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/16/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..584c2f4fb18ef994d85fa0c942e7f1ee3821bdbb --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/16/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:603f1d0901bf4933dd5525113152abea61bc84c8ced9a19cbd73efa46ff870a6 +size 234881328 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/16/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/16/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..86501d03ec70ddd19bb1ffaeba6183b5614a8769 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/16/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f688de95925f0ced03ab1ffbac34be0be6b6f05ce6c8792d3e0b670ad8ea886 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/17/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/17/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..b69b1870dfb422ab3bbe71503645b61c92d888e9 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/17/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e56791ddf12a6c6ce06aa1267a3ea862324517ef8f71e768b2ee2e741661c7a3 +size 33554672 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/17/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/17/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..d71e63430006bef2cca07aec69d5f3d01a48b3ed --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/17/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11d8fcd20330cdec0f7213096eb4d1c2ef98c92043ae3c73645d9b2607826932 +size 50332000 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/17/pp_block/input_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/17/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..d1223e4e7edf097ffc39ed6ad93baa4d1c55b8f1 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/17/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6598bda7d526973a7850f6dc3b3eb856c451f792b6bd5797fba1e840c8aeb60 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/17/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/17/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..8f136e93217e221718ecc28313ab81b2a9f447a0 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/17/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98aed4210d9d27a453e7095dddb8286c56b92fd645975248bebe49f65957f29f +size 117440752 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/17/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/17/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..a4b0f3f10eb97bf6fee3cd0f2a558faf37463454 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/17/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1788f6f42fa111a01dc8264bd9b642eab08eab5c7000424c15926e4d19ebb7c7 +size 234881328 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/17/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/17/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..fe35bda8b00c98442b78d7ed782915334b670f2e --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/17/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8627f39ccb66f1df7e4fbf7beee9a459da492a6228b07bbcfebbc0b7c4ff34fd +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/18/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/18/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..150279da54f5454eaead1dc69471e9c1026868d7 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/18/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e66ec3798906c83fb3785b348dd765ad68906c68d7634936380999c89d1c224b +size 33554672 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/18/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/18/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..e5ea641e7c385fa8765fe0c72d67bc4d46f01b55 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/18/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a2715cca383354f1dc2bf87465e4861565900194c2d484f63ccd788027a32f1d +size 50332000 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/18/pp_block/input_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/18/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..8643f59da648ebbc766c6ffe82a050dd8a9f89e4 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/18/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bfed49c0ad1dbfdacc2553afae058b3831e6d11e34df5eb54fdccb605f166a5f +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/18/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/18/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..b60dfc30c9a45403b5bb532c672f44af70e9e5f5 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/18/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f67bf1f8f4456d29c16d7e5d8b6220caa7d60a57b6015c82852f34afa96dbda +size 117440752 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/18/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/18/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..4edc6b1852b14c6fb71e7f518632c169b855e98a --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/18/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8dfadc7d479e4b9de1be8338a1317d4810ebe8eb037c6c095596733ed0d662cc +size 234881328 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/18/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/18/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..fe340ce1f94b0ee37874bb42b3684f2d60aeea4e --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/18/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:311f1062cf13fcf013f16289dbeee8a923578d5d72fd0df880fd7d54c225b0d4 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/19/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/19/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..919d64a8c5615bb42377ecd9b861dfee0d6a0eee --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/19/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b07b5cbdb7974ccbca1b38897a349c83e0121462e9e613168ddafb8e43f13d2 +size 33554672 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/19/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/19/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..979df1aeb6eb807c45a23f05e906ff081c7cc049 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/19/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5fee11f41d7ca4140001ab5d2520c11168a7a30c1fdc127afa14ee94caa4be24 +size 50332000 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/19/pp_block/input_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/19/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..63bfb3e269e60bdd6da56865a59d68a7873e07e8 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/19/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0daf239db26eca3a8011c15211a62abe8f70dc84ba82a8404e041377271954e2 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/19/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/19/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..175198e37162ead02e02d9dc2259f2a891f23003 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/19/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:726a01ea201eab5aa0f6425ce207472bdf15a01a0a6311a19afcaaba38dd05f8 +size 117440752 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/19/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/19/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..761fc9adb6ef18b5bc568fc69277c55a308b1d40 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/19/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:284296156e01380de59e3fe77f71c0c894a76294d23296939f2c2e4ac6db5079 +size 234881328 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/19/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/19/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..90dd955d9c64d683cb608c762f41313efebf5bc6 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/19/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b30ca5cad0f458821d41ddaf038f2ad8255663c771b62405fbb4078761fa1416 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/2/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/2/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..bfda57a5490aa709fc40ffdc3f15003e77f98342 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/2/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:edd6de03b1a72918af8199d8fea82df0cec09dc130ef0c90ed2c52213f32941f +size 33554672 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/2/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/2/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..c306d924019026133c2cb5725078aff9427c60a5 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/2/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71d15ca180634b4391697a78f7bb0e758bfed0f5cbfaec06bdd2374b12f6025a +size 50332000 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/2/pp_block/input_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/2/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..1a78265d44e5751df256f3e20f1d9fdc3db64486 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/2/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6cbb2dccbdebf4fdb662b50deb63c29463d66d882c8a1f7353aa2a9de0ba9a8c +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/2/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/2/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..5f11ab9869b565dc383aa026af278ee4dd57ff3b --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/2/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b5efec88ce15e9ddc383f74dfd6cbc99f91a61333e4414f9f3cc242e8226689 +size 117440752 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/2/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/2/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..37cc3434adcc0de06ecde1c5effb0a6940db6727 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/2/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d039852b3c2430f879ca132cdee0c787c0fd7fb149f5e25f5377167f3bb536c2 +size 234881328 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/2/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/2/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..bfb90c1bb401776a104bcd7241152508a64af743 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/2/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33e7448e2fe3b10d301c02741ec5d56a1bb272af33f8af8050ea4b1124c2edcb +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/20/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/20/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..ae89fd60ee7345227650527d2fff414da50d9e4f --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/20/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79f74447847544c0d4a4e3b3574a2de59f94d514e42a659948abe3d58ea4e1d7 +size 33554672 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/20/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/20/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..4d4ecc67e5d35e32183466ac8b9d2ae40fa607df --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/20/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7811c1b25a794d1dee9bb4d869635bf592891e2afe41c11135f9c130ba611d5 +size 50332000 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/20/pp_block/input_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/20/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..d27c8b8b4e251de143890592a7b3ba8988be5374 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/20/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7b70f1d532e1c80d93e53ec7635f69f3d019a2671e6eb62c80048d9ca421e5f +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/20/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/20/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..81ed2ebe6c8cc8929d44fcb5e98ee18113d99f34 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/20/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d926201c3f551625ebdb4b27550ba9252ae167e219d982127e9dfa385c62916 +size 117440752 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/20/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/20/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..44e06e60d925388a1531f0d27be1890cedfc2f08 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/20/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0fcb0c4d7a569ea67391b43f3ed3dd3469ea927ea8cd54974a7a8aba72100c96 +size 234881328 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/20/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/20/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..93c82801fccca848c4dd06fa7c9767b9ed684dfe --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/20/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0570af0be96bb2e161e74c7552622bd5169144f88051ec8cef2842fd29203b40 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/21/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/21/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..fbcf0219d88394347060a185fc5c10f1c8f26679 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/21/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9416812039e0ad594257879985c0a554f0e7ee6515f1508b4538f4ca105e251a +size 33554672 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/21/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/21/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..10edd2c5ffe6e9443a9ad4d82e150046c049bc14 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/21/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b1425ceba2c0731aa7b273c9854c6bacbdcbcdef26e756ef924b18965b37e66 +size 50332000 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/21/pp_block/input_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/21/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..419189094f50c0060f6e8075b28b8647cc9dfed7 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/21/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5134233fb0eb7bc720f0388ad3687e04a7fa91d50ae9d30d217aeb05a6e8d89 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/21/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/21/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..6352ed16a7c0639db6690ccd92b13c7cb669c0fe --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/21/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d8ef695fc129d4abdf0afb98dedd172507b6e4801150427cc979b999c7d2a45 +size 117440752 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/21/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/21/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..5b2fb0e7429ba20f44a6c66a0db7d8ce26a9555d --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/21/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76f8cd335d3530093aeb565faa5f249289353c5fc341a1f1e15bebf92d1f9ac1 +size 234881328 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/21/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/21/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..0d4c23aca3c4209d0204f516d1c091a218b0b459 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/21/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:197dee967ae28fe64463c4c3539e395e95c16741d61ffc5b13b616f935902108 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/22/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/22/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..8ca02b160d65047aaea1cf5ff257dbb9aea38a53 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/22/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb9130ad383969218b0d69b49c03f010f14c946a3a57ff0085e013eac546d26a +size 33554672 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/22/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/22/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..47b1ad9850e4f1f3db33e819da2bb3d57dbb24c1 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/22/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab20acb8b22f16038dd27dff64454d128f2ed7bccf936ccc3c45a2ea3f035df7 +size 50332000 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/22/pp_block/input_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/22/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..9a6e040a3cf537db8eb9c41d006646ddd22dce8b --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/22/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79cbeb1e957af8be02b9c5f8cb768a6c0cf6ab9bd6e9cb17181d4a26bab8f545 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/22/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/22/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..114b9b8cd384c86a63156f75f2b0cbe3da189e89 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/22/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:324d186dfe348fbde4a11f07cf940c63ce34e95838bee6ad5ea3c5cb95d718bc +size 117440752 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/22/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/22/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..12b4913460fd33822cc25f3069e3b55e3e809756 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/22/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1c53c64a45d42f76623271be65cef3fa54da7b7024801759bde658a1a207a16 +size 234881328 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/22/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/22/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..b4db9c3472df1844eaac4f8a4e23b9a2943960d0 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/22/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82384048857a13a5e636e5bae8a2e396c7cb62a21bed6f76272f93f926b92a40 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/23/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/23/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..462082673dcfe1ffa7ae871bca8e00f711b585dc --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/23/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8bd752436e5656d6f608ae54c1637f655f10892cb14515d25ff0f133895aaacc +size 33554672 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/23/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/23/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..83e6acb1ca6b31d0e5cfbda57a14d083b34ae8f1 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/23/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1cf9b9b4367352c3866e89f79ec1dd95b4d6735bd82d5c225651e26d0822107f +size 50332000 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/23/pp_block/input_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/23/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..0f0d15a76a30052199d6c81326adf4e67d39b053 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/23/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a0a3e099590bec502dff8d636c63a759156394f2a531c3a7bd2c528979785a4 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/23/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/23/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..48d905852054fccf66ad2ba6b5ca2aa248351f51 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/23/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:511db2f1b181bf145a8817ce10c8490c2b1752672645b3d60fe4b9445f8a9210 +size 117440752 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/23/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/23/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..ac0b46e712b362fc25c016ccfbd39338bb0da950 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/23/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fda7594686fff4a94fa18256b22f0e2d95590e11727e811c049511fe116debf6 +size 234881328 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/23/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/23/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..48b764d17e35ed45eaad7e59f8efe12ee5c96d3e --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/23/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f1eee05da18a9cf1874a0996f29163f4d58926d835e9886571973144514ef82 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/24/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/24/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..925ba788a2ba1bd2c7e0d65db4f76d9bea12ede7 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/24/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47369ea1a4cf5a55ebcfc5add16ee938ab9162a2aeccde44fe89cd3d64c94093 +size 33554672 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/24/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/24/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..1f0da66de45ed14819f7c451ca51e8cbd856e468 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/24/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae7f1b4c61eb9e937fc2751a63aa11b85a2e31924062641e062472d611cd5d62 +size 50332000 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/24/pp_block/input_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/24/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..67c2e58f51953c8219df0df59da7144b8b61132f --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/24/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3e30bacb9b8300cd85e031e416baf833bce8e9ea51ca2f2be692fa21ec58b7c +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/24/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/24/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..80e8fce0a618be6a4dc86f1519dc3c1ee1232aab --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/24/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91535a43288f39b4daf5a0a2174db2a4e82f286645574b45f9e59a611ffce6f3 +size 117440752 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/24/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/24/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..28d31ef93bfeb81d4f040575fd506448bd6cb969 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/24/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a701e1f66d10697437e53574b163caa5e540dad586babe42c2e5a1c824099f2f +size 234881328 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/24/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/24/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..becad5c5858c3333c0000b2bef0205083662dbbe --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/24/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8873501bf8565a4652a86193db120932fd613a8d3b2d54ac5dd3eb54d7a8335 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/25/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/25/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..509203e6d67ed67dd0eebfc244eb46c25ee5137d --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/25/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be5b63e9f9d4f114fbef1652f0b0146990e7309436e5f05c40029df8cee827ec +size 33554672 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/25/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/25/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..7c9bb5ffd5986ff5bca99bb971b6a2f6f697b681 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/25/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80f82de1cdeb23b84412fb8a465d8b08002aefbee3c38a0b437c18825b069744 +size 50332000 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/25/pp_block/input_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/25/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..a55809181671c12b0a00263dbb2dbe5b0098a666 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/25/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec42af298f54a5beedf5723c6c19ad021be3d1bf5c3a6ee064c56c044e4c447f +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/25/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/25/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..2b8ec3e9ec88829f66a44c3148fb9ce356f7bcb0 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/25/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:857cb08a35c3590c1eb931bee02491dd976c1d8f0551a669d5ebcd10440fab64 +size 117440752 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/25/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/25/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..efc62c5b2e4849811f2d577e8c29b5d6f9dbdf1a --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/25/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a3f3de7ab3102715faa238ce4179faa4714308eddc607faa8ce33777d1ef9d5 +size 234881328 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/25/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/25/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..59ebbca1b0365c82e946c1f628c60655cf9a82cb --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/25/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b9b3ce23911e741b7dc2b1df96203080fd81b4614bc8962685bf01881de9135 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/26/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/26/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..9764cee906f56f40a9c66c9efb17829f2cdc249e --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/26/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c45c7071311af2a04974e71a11e0eea36c85704cb0f9167a94c02e16624e9084 +size 33554672 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/26/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/26/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..0801b63064d3eed59f2725a0e09ab14878f78e6b --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/26/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:210d1d9081f6a1ce5ae1fe1f62a14eb9febe3e2577bb5fbe6656d594d47b4770 +size 50332000 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/26/pp_block/input_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/26/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..c7034d55a5f401499ea726c67a12839677ced525 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/26/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:26c1722e6bb50366fad55073a1f4d3291e184dd52760d1f3769243a2db8226e6 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/26/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/26/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..99a2936d36a8a8376cfa859786300edfd0b21cb9 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/26/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e6005f316026950888c77b3eaaf36117d4bf8f6a187f91d40773cc18cadd85f +size 117440752 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/26/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/26/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..4a9c5ebccd610f329024fd2189f00e72c8933fa3 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/26/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff675ff36e683db53b94261e489ab46b2ff62db3b0c369ada5396348d7c0d024 +size 234881328 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/26/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/26/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..185a086a29d311a05b8ef8db6cdc594146aa55dd --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/26/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46cea8467000b1940b443c3692edf1932651db85d1b60acef09988806ee1e20d +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/27/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/27/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..8191bd652ceaa1ec327fbeef72629b9245a0ee3d --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/27/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee56ef59564d8f73576f0bfd5ec57978e9ff763b9552d50a4c84920184c6b09b +size 33554672 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/27/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/27/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..757831c6ea6dc5df030da1d6cd40c8deda883950 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/27/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:116d902c4ebed40ba302a2a8913c2813d06ab903dcfa8367c6ee82533f02aa69 +size 50332000 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/27/pp_block/input_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/27/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..e48b7f952a81c7cd455dca961525f59401d4308f --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/27/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1d34cb993ff35ba01edf7797f278187d6c332f7471da6ef1845589331ea042f +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/27/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/27/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..8a7597ae66ecbc77b4bfe929ecba5cff85a4d354 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/27/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:732086fd29238ccf871dbfefce7074bcd7fc0a60b58ad52f4467eca3d830c6b2 +size 117440752 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/27/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/27/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..674f062c88950203d86451cf371adb8ed439b2ef --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/27/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:322c6e5463f15df6e164147205780ad72414f1447c75af3ec267ff3db26292a1 +size 234881328 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/27/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/27/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..7dfa6e8b96a906403281a1e2d5f3abff2a4ebabf --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/27/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f29deb575ade1bbbabec477bb7160fcd08b2c6bbf74179cad08ccb405df372e +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/28/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/28/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..f814902af51fc991a2b7f0dd94ff60bad0e8701e --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/28/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:631a3f4fac23a45aefbba93606d4e14bb62a23631481bc461ff955185444b099 +size 33554672 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/28/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/28/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..9354f54d25cc8f42abee6299f190b9c6493557b7 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/28/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73ed0ac1fbc0237582f22ad2e29e3565974400be0dea1bee5e38ed3319fa98ec +size 50332000 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/28/pp_block/input_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/28/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..c441c33f1680e398ebf63720f7377729a47e0bff --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/28/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d266459fe9ab62b23863969f1148d2d50716b1ac279b7958749414f08524f553 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/28/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/28/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..ab725b2c9541a2a2e5bb1f84c765ab7be114955e --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/28/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e4cfb387b883b4b2b29599de2409941ac20ac20228df3e3e7ad22dfb0baf555 +size 117440752 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/28/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/28/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..f490ecf19323b518b3a805abc59b93346c8beba7 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/28/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a36d837e8bbdaf7963127324bc210b4f0982931fdf758ff27741570d9e2d1d51 +size 234881328 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/28/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/28/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..fc18aa23d33aef9dea4374a7c3ea2ebdf618e5bb --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/28/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16902416dbcb2fed2ff6925bd8867850e1cae35bbaf996ca71d0cdf684d3f172 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/29/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/29/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..a5b4d13b1835192a5d2d4702d501b5fc65d6322c --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/29/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9045f8a5972478a724544f84cfe7383b25c919e1c44928934b9a825c785d163d +size 33554672 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/29/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/29/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..ba84de7166270508b3f192d60a4081ac40ba6cf2 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/29/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:137339576d19ecc19730e738d19f7e50cfd4e85bc82f8eb07a74275dfc1b2522 +size 50332000 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/29/pp_block/input_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/29/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..bd1febb2fc2f3d8226fd50037f68af630039baae --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/29/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:792844fdc5bde90cafd03db4ed5b92f60fef10162d5a7cc9cd6e5b1033997328 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/29/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/29/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..e54e402e983a4dce9761084fc867526e0e23c642 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/29/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8be8093427c68f8afb363604bf7b45655b467e12b1d042cd08b052856eccba3c +size 117440752 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/29/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/29/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..35a9c9086f92b26f0b8c25c7ae4ac1e1a0804e37 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/29/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:030a36333a10a9ca56edd6eb2249679a576b985bce09ce64085a3263b6ef10db +size 234881328 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/29/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/29/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..2b9ebdc6bbcf691d63da982213ac44cb7c442ac8 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/29/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13c892791fbddc93ac436d2db43aa455bed54914e6561f93c520352f8f193520 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/3/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/3/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..0f1db79f503ebbfbf3f095896f246bb2aa197929 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/3/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42037bc1c45d33f72c00468cf5e5942ae4ad08fa37b7f53632f2c4b6d5b9331f +size 33554672 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/3/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/3/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..04e7c4ae4be9a7fe14ce3b05b9cfcb99fca47e7a --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/3/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4cb7437ba2e617b21817a336768ca59ba5a1788f35b041a4d9deef22004b20b2 +size 50332000 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/3/pp_block/input_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/3/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..362d7154ef2afe8a162ac6d5dbefc03e51187b7f --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/3/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f839635462c1d12f6996845b247aad2b1ee710e7b9f5433c6103eaade89a2bb7 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/3/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/3/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..96f943202fa1a912966930b13ee8ec787be34517 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/3/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6dd68bf17545904a0f0d5d4333924a87fa5bc50611102886e7f46ead8a64dff1 +size 117440752 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/3/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/3/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..90358ae73c5278d690d0f2bc66db6ea751bba9be --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/3/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b92ecb291b31fe3dfb73dfbbf327bda0871350801ca7b5f7318089277da4e2c +size 234881328 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/3/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/3/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..22e50bc47d87a18e4dae6be9d6571b196df0469c --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/3/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fc7496bb43f96f2d3e0da80c2736d2307003965029e1998eab62bfec0941480 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/30/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/30/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..46bfc3d1f8c36444c8f15377f3c3a2d88f4a5375 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/30/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93a0f05fc6a49b5331118dc40f4f9dddf6716141749253123f5cece0807a7599 +size 33554672 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/30/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/30/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..ade5f2e7de4bfbd961e27f2f5c8bc0b97d79bbb2 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/30/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6dfa4c88c843c9af8501a9689166f92cc574f5edeadb373e32224f559f04cbfb +size 50332000 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/30/pp_block/input_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/30/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..b537649f6fbef0d441c71b5f73cced95131db820 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/30/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2726e8cbfc64bbeec3033a654346b747df16abae344e3ea8de76ca937bfc7482 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/30/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/30/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..f7026fba2597fdc6f8f369a1802a55021e2f739f --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/30/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d2f8cbc04849ff8df37b02cbe38beb267cfc7909bc6209ceaca9983b42afdc8 +size 117440752 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/30/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/30/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..e79324fd999579aeb28f415f0f5bfc15b50c32b3 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/30/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9952aee88ec70a590d29142cdfa3c2043edf9d6134ab1a3c6638c80900e21436 +size 234881328 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/30/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/30/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..f0f228f5aec56d947e82243fb3e9d4f4d183ccef --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/30/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70f84ab74f9570ca333e367ad2ae8b83b180749cdfa783dea8799bf62617314c +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/31/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/31/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..283bf04e1940f39b518d1ff7433a94baa14a4a39 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/31/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2dbcc1deab2507f9cf9750ae8be59e586b818852a3ad3c6e24b31b5694a6cc3a +size 33554672 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/31/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/31/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..a8e6e0f7bf682b83ef335c954ac328d3fea30481 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/31/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5c1a2d64ef38d5b615ae998e097c54bbf8b82cf5bf05f2711a81675b94c3920 +size 50332000 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/31/pp_block/input_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/31/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..5d82cc5fc576f159521f62e94331a6d73d7b45c1 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/31/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17469a56813ba760824801ed57e006e3093265e7550a90065bea004960dff138 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/31/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/31/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..b926bd7333d48904fdaf82f88e93b3ff26bf6c14 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/31/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d0849f505c4e03074900c1a09ebdead2fcf9bf094b7b5cd1761f4184e552383 +size 117440752 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/31/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/31/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..e2a99d5aeb569f4d5a607f811429eede80fc05af --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/31/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b324ce080c13f808dad2f1f15702f9a5f4a3939c3d4f99c447e24ae786908b34 +size 234881328 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/31/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/31/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..87df9c8bf29d793b78e9d632a745029e911b5918 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/31/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5bd032293c8fa5e05591b617c5b669bef109efb4b75ed735a54a75d396cb51a5 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/4/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/4/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..8d1295e09e334e81ca05cb03a66f0a6724a79e4c --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/4/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a21ca1fd2733c05777dee3b24b90b625d88af9c2f454ba7a2aeb5e32fe7d6b8 +size 33554672 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/4/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/4/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..0ca2cfabacd190e5221010b0a12342f83c97245a --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/4/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:257ace20e4b5c4ee95bf8561b353e30595a7b9823f103b436a0d919b79f1b47b +size 50332000 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/4/pp_block/input_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/4/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..d2a9cd04dd3bc738e4aae63b16b08d7baa46bd3b --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/4/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5a6ba84eed87e4e6875778f31d9fe0f04f9d1aee3f831ad290c3cd7f8ef13e1 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/4/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/4/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..8ccb656d63e437f0aa24ab1072f42c386488051e --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/4/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d6154b2cda7190442875a4e41f5e3827e53e0282d4f62f21f365ec70c5acedc +size 117440752 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/4/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/4/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..7fe6900300ba97d5e96cee7b1047311b4f90148a --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/4/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf749f4fc02bb340cf7debf8e672ad2258a72a4c73b852f15cc60f13e7ad3253 +size 234881328 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/4/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/4/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..134e3a2e6100c0c7a675fee75d269bf53b04fd6a --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/4/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:434e6e8a555adb21ecfde086d97453cbb54133fc9c28457e4f77695f9c3e7fef +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/5/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/5/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..3f5746811a91a4b2c796885339ed3ecc7a71888b --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/5/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7216b08cf8c28c3ec1a89331c0b7527d4f0fa28488db8a2b6ddc9b38c47f420a +size 33554672 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/5/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/5/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..c4eb5fc8666814bee6cf42611d2c10b91ad0160f --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/5/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5d2632c52e7a877d42bc48417e5fca13af532b0c796759b6a88c15e7655a199 +size 50332000 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/5/pp_block/input_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/5/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..2af19166404ceaeebc32dcaf177668428bfafccb --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/5/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22d7ea806e4bce283f0ce3b51c64a8ae80841ede39bb43d5b44288ba3946c64d +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/5/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/5/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..b78fb5651c8454baee5fd4cdc842449ffd037c09 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/5/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:28596e291e8e8c92d572b2663a5fe62d50d0f7733ceabc7e8fdbaad584e5f16d +size 117440752 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/5/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/5/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..9de4162156cdad3d4d24d126f1ef775f0c900b4f --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/5/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1848a2acecbe2a83186768b236771802dad785dab102045ccedfa855790cef5b +size 234881328 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/5/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/5/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..1b4e2982b9811aad690cacc18261408e643b9140 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/5/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:329122861d5758b0203cc9900841c57324cfd587d15efabf90ebeeabf0410101 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/6/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/6/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..cbf4c449a0c559b068517c2e1d6f34553dc42803 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/6/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34db07de05bec1ffa3047ea28da73ef0375b502287e1d58dcd9f250c0eb3a8fc +size 33554672 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/6/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/6/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..537adf453daa0229fbe396edbd9fa131a3cff68d --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/6/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6cd9f6e8741c9390adf468ca225f4ed3de6aa94060979c871a04bb30d4f9fd4f +size 50332000 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/6/pp_block/input_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/6/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..6dda39161d5d44d878fff0fb6eae928b190648a9 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/6/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15842e0c4ee869c97a2c4b5455649df903a558ba766fa0042b61b75ce2a20645 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/6/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/6/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..ae70465e004eee842aacda5623dd26f9dee26c4f --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/6/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3da43d08beaf406121e898162d34d6f70ed2b79212d5bc6f83ad5f4f82c1066e +size 117440752 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/6/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/6/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..cf57463f5b30f11fbb61b50a09b89b782c2e3fea --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/6/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:857d563923d9edb8b3cc1e91f25c2fa4fad8c52fe5d8e05bcddac84753adecc5 +size 234881328 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/6/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/6/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..11fe27c1fadab538f1844c8eb793a384ce73668b --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/6/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e27916764a9047f9377f176c26d3fd4ee7e672b9b01d6a770792686b321de54 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/7/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/7/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..b54da7d6cbbb20427e068214c04264d901970f7b --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/7/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:586fa4aca8c1453722adc463a572a10bc5a4b32ca22b9f8b44c4f92ff5dd1377 +size 33554672 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/7/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/7/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..dba948fded302199131ce48c77bca079e472a7f5 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/7/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:378aa3059e84ee41619845e516bcc98ff33f7490732354c3bf8ec0fee5c3f0da +size 50332000 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/7/pp_block/input_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/7/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..5d604c1c0b9560d3908a0f7962f0ac0bb04358e5 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/7/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bbb31970022cf15952ba6e72656beeead599c2772a83a05d622fbb972c45f9f7 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/7/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/7/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..1ff61cd79859c7fc4478e21a8dcba8b3e87eb6cc --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/7/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f66269fd59db7889ca11b5b08764d08f4032f1ba1294e69e25c3e078a8718bd +size 117440752 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/7/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/7/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..99e7ce3eda52f1223cf1739997c249a761b0f63c --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/7/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30faa7a2ab72dee2e162b6f24bc55d65ac798fbb35789efda078e19fc9707ff7 +size 234881328 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/7/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/7/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..6d37c6b21c7a1dbab22b83d893bfc25d957b6585 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/7/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ebcc1720303df1fb8983bf02c95dfbe266afcaff467ee5e7b359b5f30c4d58c +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/8/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/8/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..d888defda9d169f13fbbe75805816a33f9a7033a --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/8/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97df4f839565f7e34bf8063d76c43f66f952ba1f7e073471bd2e1e129410a0c0 +size 33554672 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/8/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/8/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..9652a6ab5c0432ed24d56f0eba3f3756b06fdb7a --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/8/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1641c0d81de42d2a28a8802749bf688fc4a44679766229dcb54ff0519aca86f +size 50332000 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/8/pp_block/input_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/8/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..a0820e06da09eb3fbf89a42bd345b08640540850 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/8/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35b4e125331997d8db1effda695509ca715396438575e2662174c397232673a3 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/8/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/8/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..fffa7bbfa7131a63503638274fac5ca7faa70c2a --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/8/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c18100441f66ca52eec0fccc50d61a5379b35092111dbd88b1bba224724d0c3c +size 117440752 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/8/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/8/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..4277b693548a187709cf28e983a64b5898ad42f2 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/8/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:321f513ca073ccce6736d50e727f13460f9d364f04e6f4b74d30f20869ac43ef +size 234881328 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/8/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/8/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..7a474d62304bc8f4e4dd92f05b499d5f87e1c9b3 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/8/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd9577a9b2f14992e1e0eaa065b7af5638d41ff7dc054c911a535218225d5edc +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/9/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/9/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..ea6f056f001041752d6947f67635b93de087d583 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/9/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0fc38ac51484f9794565b2bbd1b43a2ca94c3d3c1cadca67e2d6fd7aa4cf3b11 +size 33554672 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/9/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/9/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..54fc9f2026ed3e7893faf7750531dc0ff4d0136c --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/9/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ad93ee685713fae71d8ff285bb8d765f4bb7b23c31d2951af607dd3df101d22 +size 50332000 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/9/pp_block/input_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/9/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..e531131608d0d781f6564891399316a4eb696782 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/9/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6c7700dd95e3ae0532ffecd0404c28a7f4bab4532525facc7c7ec2c86d9e20e +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/9/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/9/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..192cc0d004f568f730e266ada4cb92b882e26168 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/9/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ada343e02e762672fcb517b651288d5c9e4082bb96e5f3ce1307fe626adb403 +size 117440752 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/9/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/9/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..13cda4047ed816dd0d00d0ad97556a3395be5230 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/9/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0aac0ca25263c49a8406ff7cd593e1886c64d664065a57e9ae2b6b6f297d1f42 +size 234881328 diff --git a/pretrained/Mistral-7B-v0.1/model/model/decoder/9/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/decoder/9/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..e8b163aeadda7aa7c8c9c41ad5b16acba734a930 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/decoder/9/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9543e6bb4f7cb8224839a47ec41c7aa770b739249ed86ab0daf92e7b3e49cbaf +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/final_layer_norm/pp_block/model_weight.safetensors b/pretrained/Mistral-7B-v0.1/model/model/final_layer_norm/pp_block/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..b1d6c221ea7a3f19cac005047689d4db605acdc1 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/final_layer_norm/pp_block/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:129d96780ba400491de4b9b79c45f462fd7ef114b49ff41598b8d6b83b487303 +size 8288 diff --git a/pretrained/Mistral-7B-v0.1/model/model/lm_head/pp_block/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/lm_head/pp_block/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..4cedac5d82877a787a7e169bfb80117e1a1c7f87 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/lm_head/pp_block/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4026361c0fcd7f5efbe363e109baa0ee34ded7ac0d2f4f57608ec552ba9b5d6 +size 262144240 diff --git a/pretrained/Mistral-7B-v0.1/model/model/token_position_embeddings/pp_block/token_embedding/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/Mistral-7B-v0.1/model/model/token_position_embeddings/pp_block/token_embedding/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..edbcef5763859737056a4be01f2a911fa36372bc --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model/model/token_position_embeddings/pp_block/token_embedding/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ada93960d12127797ad62ff97af415851ac09285dcd8a3c020e3b458549bf51 +size 262144240 diff --git a/pretrained/Mistral-7B-v0.1/model_config.json b/pretrained/Mistral-7B-v0.1/model_config.json new file mode 100644 index 0000000000000000000000000000000000000000..ab387c83224c1d965f7d2e246a1bfd3172e96709 --- /dev/null +++ b/pretrained/Mistral-7B-v0.1/model_config.json @@ -0,0 +1,22 @@ +{ + "attn_pdrop": 0.0, + "bos_token_id": 1, + "eos_token_id": 2, + "hidden_act": "silu", + "hidden_size": 4096, + "initializer_range": 0.02, + "intermediate_size": 14336, + "is_mistral_config": true, + "max_position_embeddings": 32768, + "num_attention_heads": 32, + "num_hidden_layers": 32, + "num_key_value_heads": 8, + "pad_token_id": null, + "pretraining_tp": 1, + "rms_norm_eps": 1e-05, + "rope_theta": 10000.0, + "sliding_window_size": 4096, + "tie_word_embeddings": false, + "use_cache": true, + "vocab_size": 32000 +} \ No newline at end of file diff --git a/run_generate.py b/run_generate.py new file mode 100644 index 0000000000000000000000000000000000000000..4f458e9ff2863783a6a1a635842565d213798208 --- /dev/null +++ b/run_generate.py @@ -0,0 +1,243 @@ +""" +Nanotron Inference Script + +Usage: +``` +export CUDA_DEVICE_MAX_CONNECTIONS=1 # important for some distributed operations +torchrun --nproc_per_node=1 run_generate.py --ckpt-path ./pretrained/Mistral-7B-v0.1 +``` +""" + +import argparse +import os +from pathlib import Path + +import torch +from nanotron import distributed as dist +from nanotron import logging +from nanotron.config import GenerationArgs, LoggingArgs, ParallelismArgs, get_config_from_file +from nanotron.generation.decode import GenerationInput, TokenizerConfig, decode_text, decode_tokenized +from nanotron.logging import log_rank, set_logger_verbosity_format +from nanotron.models import build_model +from nanotron.parallel import ParallelContext +from nanotron.parallel.parameters import sanity_check +from nanotron.parallel.pipeline_parallel.engine import ( + OneForwardOneBackwardPipelineEngine, +) +from nanotron.parallel.pipeline_parallel.tensor_pointer import TensorPointer +from nanotron.parallel.tensor_parallel.enum import TensorParallelLinearMode +from nanotron.random import ( + RandomStates, + get_current_random_state, + get_synced_random_state, + set_random_seed, +) +from nanotron.serialize import ( + load_weights, +) +from nanotron.trainer import CONFIG_TO_MODEL_CLASS, mark_tied_parameters + +from brrr.config import BrrrConfig +from config_mistral_7b import MistralConfig +from modeling_mistral import MistralForTraining + +try: + from transformers import AutoTokenizer +except ImportError: + AutoTokenizer = None + +logger = logging.get_logger(__name__) + + +def get_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--ckpt-path", type=Path, required=True, help="Checkpoint path") + parser.add_argument("--dp", type=int, default=1) + parser.add_argument("--pp", type=int, default=1) + parser.add_argument("--tp", type=int, default=1) + parser.add_argument("--max-new-tokens", type=int, default=128, help="Maximum number of new tokens to generate") + return parser.parse_args() + + +def main(): + args = get_args() + + assert args.ckpt_path.exists(), f"Checkpoint path {args.ckpt_path} does not exist" + + config = get_config_from_file((args.ckpt_path / "config.yaml").as_posix(), config_class=BrrrConfig, model_config_class=MistralConfig) + model_config = config.model.model_config + tokenizer_path = config.tokenizer.tokenizer_name_or_path + + parallel_config = ParallelismArgs( + dp=args.dp or config.parallelism.dp, + pp=args.pp or config.parallelism.pp, + tp=args.tp or config.parallelism.tp, + pp_engine=OneForwardOneBackwardPipelineEngine(), + tp_mode=TensorParallelLinearMode.ALL_REDUCE, + recompute_granularity=None, + tp_linear_async_communication=False, + ) + + # Initialise all process groups + parallel_context = ParallelContext( + data_parallel_size=parallel_config.dp, + pipeline_parallel_size=parallel_config.pp, + tensor_parallel_size=parallel_config.tp, + ) + + # Set log levels + logging_config = LoggingArgs( + log_level="info", + log_level_replica="info", + ) + + if dist.get_rank(parallel_context.world_pg) == 0: + if logging_config.log_level is not None: + set_logger_verbosity_format(logging_config.log_level, parallel_context=parallel_context) + else: + if logging_config.log_level_replica is not None: + set_logger_verbosity_format(logging_config.log_level_replica, parallel_context=parallel_context) + + log_rank(f"model_config: {model_config}", logger=logger, level=logging.INFO, rank=0) + log_rank(f"tokenizer_path: {tokenizer_path}", logger=logger, level=logging.INFO, rank=0) + + dtype = torch.bfloat16 + + # Set random states + set_random_seed(42) + + # Get synchronized random states + if parallel_config.tp_mode is TensorParallelLinearMode.ALL_REDUCE: + random_states = RandomStates( + {"tp_synced": get_synced_random_state(random_state=get_current_random_state(), pg=parallel_context.tp_pg)} + ) + else: + # We don't need to sync across TP when using sequence parallel (REDUCE_SCATTER) + random_states = RandomStates({}) + + model = build_model( + model_builder=lambda: MistralForTraining( + config=model_config, + parallel_context=parallel_context, + parallel_config=parallel_config, + random_states=random_states, + ), + dtype=dtype, + parallel_context=parallel_context, + ) + + # Mark some parameters as tied + # TODO @nouamane: this is only needed for training, can we just mark params as NanotronParameter instead? + mark_tied_parameters(model=model, parallel_context=parallel_context, parallel_config=parallel_config) + + # Sanity check model + sanity_check(root_module=model) + + # Load checkpoint + checkpoint_path = args.ckpt_path + log_rank( + f"Loading checkpoint from {checkpoint_path}:", + logger=logger, + level=logging.INFO, + rank=0, + ) + load_weights(model=model, parallel_context=parallel_context, root_folder=checkpoint_path) + + model.eval() + if AutoTokenizer is not None: + tokenizer = AutoTokenizer.from_pretrained(tokenizer_path) + # tokenizer.pad_token_id = tokenizer.eos_token_id + if tokenizer.pad_token_id is None: + if tokenizer.eos_token_id is not None: + tokenizer.pad_token_id = tokenizer.eos_token_id + elif getattr(model.config, "pad_token_id", None) is not None: + tokenizer.pad_token_id = int(model.config.pad_token_id) + elif getattr(model.config, "eos_token_id", None) is not None: + tokenizer.pad_token_id = int(model.config.eos_token_id) + else: + tokenizer.add_special_tokens({"pad_token": "[PAD]"}) + tokenizer.padding_side = "left" + tokenizer.truncation_side = "left" # TODO @nouamane: do we want this? + dummy_inputs = [ + # "Passage: Daniel went back to the garden. Mary travelled to the kitchen. Sandra journeyed to the kitchen. Sandra went to the hallway. John went to the bedroom. Mary went back to the garden. Where is Mary?\nAnswer:", + # "def fib(n)", + "This film was probably inspired by Godzilla", + ] + + outputs = decode_text( + input_iter=(GenerationInput(text=text) for text in dummy_inputs), + tokenizer=tokenizer, + # TODO @thomasw21: From ModelWithLoss extract the model. + model=model.model, + parallel_context=parallel_context, + max_new_tokens=args.max_new_tokens, + max_micro_batch_size=2, + generation_config=GenerationArgs(sampler="greedy", use_cache=True), + tokenizer_config=TokenizerConfig(max_input_length=None), + is_bench=os.environ.get("USE_BENCH", "0") == "1", + ) + for output in outputs: + input_ids = output.input_ids + generated_ids = output.generation_ids + if isinstance(input_ids, TensorPointer): + assert isinstance(generated_ids, TensorPointer) + continue + assert isinstance(generated_ids, torch.Tensor) + + log_rank( + f"input: {tokenizer.decode(input_ids, clean_up_tokenization_spaces=False)[:1000]}", + logger=logger, + level=logging.INFO, + rank=0, + ) + + log_rank( + f"generation: {tokenizer.decode(generated_ids[len(input_ids) :], clean_up_tokenization_spaces=False)}", + logger=logger, + level=logging.INFO, + rank=0, + ) + + log_rank( + "--------------------------------------------------", + logger=logger, + level=logging.INFO, + rank=0, + ) + else: + outputs = decode_tokenized( + input_ids=torch.zeros(1, 1).to(dtype=torch.int64, device="cuda"), + input_mask=torch.ones(1, 1).to(dtype=torch.bool, device="cuda"), + model=model.model, + parallel_context=parallel_context, + generation_config=GenerationArgs(sampler="greedy", use_cache=True), + max_micro_batch_size=1, + max_new_tokens=12, + returns_logits=False, + ) + for output in outputs: + input_ids = output.input_ids + generated_ids = output.generation_ids + if isinstance(input_ids, TensorPointer): + assert isinstance(generated_ids, TensorPointer) + continue + assert isinstance(generated_ids, torch.Tensor) + log_rank( + f"generation: {generated_ids[len(input_ids) :]}", + logger=logger, + level=logging.INFO, + rank=0, + ) + + log_rank( + "--------------------------------------------------", + logger=logger, + level=logging.INFO, + rank=0, + ) + + dist.barrier() + + +if __name__ == "__main__": + main()