diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..ed8ebf583f771da9150c35db3955987b7d757904 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +__pycache__ \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1ccd34559c94ea31010ffbeb967e319e049aaf21 --- /dev/null +++ b/README.md @@ -0,0 +1,37 @@ +--- +library_name: nanotron +--- + +# ⚙️ Nano-Mistral + +Modeling code for Mistral to use with [Nanotron](https://github.com/huggingface/nanotron/) + +Also contains converted pretrained weights for Mistral-7B-0.1: https://huggingface.co/mistralai/Mistral-7B-v0.1 + +## 🚀 Quickstart + +```bash +# Generate a config file +python config_tiny_mistral.py + +# Run training +export CUDA_DEVICE_MAX_CONNECTIONS=1 # important for some distributed operations +torchrun --nproc_per_node=8 run_train.py --config-file config_tiny_mistral.yaml +``` + +## 🚀 Run generation with pretrained Mistral-7B-0.1 + +```bash +export CUDA_DEVICE_MAX_CONNECTIONS=1 +torchrun --nproc_per_node=1 run_generate.py --ckpt-path ./pretrained/Mistral-7B-v0.1 +``` + +## 🚀 Use your custom model + +- Update the `MistralConfig` class in `config_tiny_mistral.py` to match your model's configuration +- Update the `MistralForTraining` class in `modeling_mistral.py` to match your model's architecture +- Pass the previous to the `DistributedTrainer` class in `run_train.py`: +```python +trainer = DistributedTrainer(config_file, model_class=MistralForTraining, model_config_class=MistralConfig) +``` +- Run training as usual diff --git a/config_minicpm.py b/config_minicpm.py new file mode 100644 index 0000000000000000000000000000000000000000..96c5076a07b4a9a28fc92c5c7babb066785f1390 --- /dev/null +++ b/config_minicpm.py @@ -0,0 +1,119 @@ +""" Example python script to generate a YAML config file which can be used to run a training with nanotron. Refer to "examples" section in the `/README.md` for more information. + +Usage: +``` +python config_tiny_mistral.py +``` +""" +import os +from dataclasses import dataclass +from typing import Optional + +from nanotron.config import ( + CheckpointsArgs, + Config, + DataArgs, + GeneralArgs, + LoggingArgs, + LRSchedulerArgs, + ModelArgs, + OptimizerArgs, + ParallelismArgs, + PretrainDatasetsArgs, + RandomInit, + TokenizerArgs, + TokensArgs, +) +from nanotron.logging import human_format + + +@dataclass +class MiniCPMConfig: + """Configuration for a MiniCPM model. + + Be careful on having a coherent typing as we use it to reconstruct the model from yaml + """ + + attn_pdrop: float = 0.0 + bos_token_id: int =1 + eos_token_id: int =2 + pad_token_id: Optional[int] = None + hidden_act: str ="silu" + hidden_size: int =2304 + initializer_range: float =0.1 + intermediate_size: int =5760 + max_position_embeddings: int =2048 + num_attention_heads: int =36 + num_hidden_layers: int =40 + num_key_value_heads: int =36 + pretraining_tp: int=1 + rms_norm_eps: float=1e-05 + rope_theta: float = 10000.0 + tie_word_embeddings: bool =True + use_cache: bool =True + vocab_size: int = 122753 + scale_emb: float = 12 + dim_model_base: int= 256 + scale_depth: float = 1.4 + + def __post_init__(self): + # for backward compatibility + if self.num_key_value_heads is None: + self.num_key_value_heads = self.num_attention_heads + +def get_num_params(model_config: MiniCPMConfig) -> int: + num_params = model_config.vocab_size * model_config.hidden_size * 2 + \ + model_config.num_hidden_layers * ( + 3 * model_config.hidden_size * model_config.intermediate_size + + 2 * model_config.hidden_size * model_config.hidden_size + + 2 * model_config.hidden_size * (model_config.hidden_size / (model_config.num_attention_heads / model_config.num_key_value_heads)) + ) + return num_params + +def get_num_params_no_embed(model_config: MiniCPMConfig) -> int: + num_params = model_config.num_hidden_layers * ( + 3 * model_config.hidden_size * model_config.intermediate_size + + 2 * model_config.hidden_size * model_config.hidden_size + + 2 * model_config.hidden_size * (model_config.hidden_size / (model_config.num_attention_heads / model_config.num_key_value_heads)) + ) + return num_params + +MODEL_CONFIG = MiniCPMConfig() + +num_params = human_format(get_num_params(MODEL_CONFIG)).replace(".", "p") +num_params_no_embed = human_format(get_num_params_no_embed(MODEL_CONFIG)).replace(".", "p") + +print(f"Model has {num_params} parameters or {num_params_no_embed} without embeddings") + +PARALLELISM = ParallelismArgs( + dp=1, + pp=1, + tp=1, + pp_engine="1f1b", + tp_mode="REDUCE_SCATTER", + tp_linear_async_communication=True, + recompute_granularity="selective", +) + +CONFIG = Config( + general=GeneralArgs(project="openbmb", run="MiniCPM-2B-dpo-bf16", seed=42, step=0), + checkpoints=None, + parallelism=PARALLELISM, + model=ModelArgs(init_method=RandomInit(std=0.025), model_config=MODEL_CONFIG), + tokenizer=TokenizerArgs("openbmb/MiniCPM-2B-dpo-bf16"), + optimizer=None, + logging=None, + tokens=None, + data=None, + profiler=None, + lighteval=None, +) + +if __name__ == "__main__": + file_path = os.path.abspath(__file__) + + file_path = file_path.replace(".py", ".yaml") + # Save config as YAML file + CONFIG.save_as_yaml(file_path) + + # You can now train a model with this config using `/run_train.py` diff --git a/config_tiny_mistral.yaml b/config_tiny_mistral.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cecc9a4dd5b8f986cfe7b0dd390ca287f1f4cbc6 --- /dev/null +++ b/config_tiny_mistral.yaml @@ -0,0 +1,92 @@ +checkpoints: + checkpoint_interval: 10 + checkpoints_path: /fsx/thomwolf/github/textbooks-proj/brrr/models/checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data: + dataset: + dataset_overwrite_cache: false + dataset_processing_num_proc_per_process: 1 + hf_dataset_config_name: null + hf_dataset_or_datasets: HuggingFaceH4/testing_alpaca_small + hf_dataset_splits: train + text_column_name: completion + num_loading_workers: 1 + seed: 42 +general: + benchmark_csv_path: null + consumed_train_samples: null + ignore_sanity_checks: false + project: debug + run: tiny_mistral + seed: 42 + step: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.025 + make_vocab_size_divisible_by: 1 + model_config: + attn_pdrop: 0.0 + bos_token_id: 1 + eos_token_id: 2 + hidden_act: silu + hidden_size: 16 + initializer_range: 0.02 + intermediate_size: 64 + is_mistral_config: true + max_position_embeddings: 256 + num_attention_heads: 4 + num_hidden_layers: 2 + num_key_value_heads: 4 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_theta: 10000.0 + sliding_window_size: 4096 + tie_word_embeddings: true + use_cache: true + vocab_size: 256 +optimizer: + accumulate_grad_in_fp32: true + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_steps: 8 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 2 + pp: 2 + pp_engine: 1f1b + recompute_granularity: SELECTIVE + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: gpt2 + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 32 + train_steps: 10 + val_check_interval: -1 diff --git a/convert_trfrs_to_brrr.py b/convert_trfrs_to_brrr.py new file mode 100644 index 0000000000000000000000000000000000000000..9dcc4f7281ca38ab0d68e45ad3088d32d5a1f593 --- /dev/null +++ b/convert_trfrs_to_brrr.py @@ -0,0 +1,262 @@ +# ruff: noqa: E402 +""" +This module converts a transformers LlamaForCausalLM to a brrr model + +Command: +torchrun --nproc_per_node=1 convert_trfrs_to_brrr.py \ + --model_name openbmb/MiniCPM-2B-dpo-bf16 \ + --save_path ./pretrained/MiniCPM-2B-dpo-bf16 +""" +import argparse +import sys +from dataclasses import asdict +from pathlib import Path +from typing import Dict, List + +import torch + +from brrr.trainer import DistributedTrainer + +sys.path.append(Path(__file__).parent.parent.as_posix()) +import os + +from nanotron.parallel.parameters import NanotronParameter, sanity_check +from nanotron.parallel.pipeline_parallel.engine import ( + AllForwardAllBackwardPipelineEngine, +) +from nanotron.parallel.tensor_parallel.nn import TensorParallelLinearMode +from transformers import MistralConfig as MistralConfig_trfs, MistralForCausalLM + +import nanotron.distributed as dist +from nanotron.config import ParallelismArgs, RecomputeGranularity +from nanotron.parallel.context import ParallelContext +from nanotron.models import build_model +from nanotron.trainer import mark_tied_parameters +from nanotron.serialize import save_meta, save_weights, save + +from modeling_minicpm import MiniCPMForTraining +from config_minicpm import PARALLELISM as PARALLELISM_BRRR, CONFIG as CONFIG_BRRR + + +def get_args(): + parser = argparse.ArgumentParser(description="Convert transformers weights to brrr weights") + parser.add_argument("--model_name", type=str, default="openbmb/MiniCPM-2B-dpo-bf16") + parser.add_argument("--save_path", type=str, default="pretrained/MiniCPM-2B-dpo-bf16") + parser.add_argument("--dp", type=int, default=1) + parser.add_argument("--pp", type=int, default=1) + parser.add_argument("--tp", type=int, default=1) + return parser.parse_args() + + +def permute_for_rotary(tensor, num_heads, per_head_hidden_size, hidden_size): + return ( + tensor.view(num_heads, 2, per_head_hidden_size // 2, hidden_size) + .transpose(1, 2) + .contiguous() + .view(num_heads * per_head_hidden_size, hidden_size) + ) + + +def get_transformers_weight( + name: str, ref_module_state_dict: Dict[str, torch.Tensor], ref_module: MistralForCausalLM, get_grad: bool = False +) -> torch.Tensor: + """From our brrr implementation, we get the equivalent tensor in transformers implementation""" + config = ref_module.config + brrr_prefix = "model." + assert name.startswith(brrr_prefix) + name = name[len(brrr_prefix) :] + + path = name.split(".") + path.remove("pp_block") + name = ".".join(path) + + if get_grad is False: + + def get_tensor(path: str): + return ref_module_state_dict[path] + + def get_tensors(path: List[str]): + return [get_tensor(p) for p in path] + + else: + + def get_tensor(path: str): + weight = ref_module.get_parameter(path) + return weight.grad + + def get_tensors(path: List[str]): + return [get_tensor(p) for p in path] + + if name == "token_position_embeddings.token_embedding.weight": + return get_tensor("model.embed_tokens.weight") + + elif name == "lm_head.weight": + # This only used when weights are not shared + return get_tensor("lm_head.weight") + + elif name == "final_layer_norm.weight": + return get_tensor("model.norm.weight") + + if path[0] == "decoder": + transformer_path = ["model"] + ["layers"] + [path[1]] + + if path[2] == "attn": + path[2] = "self_attn" + + if path[2] == "ff": + path[2] = "mlp" + + if path[3] == "qkv_proj": + proj_names = ["q_proj", "k_proj", "v_proj"] + tensor_list = get_tensors( + [".".join(transformer_path + path[2:3] + [proj_name] + path[4:]) for proj_name in proj_names] + ) + # Permute q/k + per_head_hidden_size = config.hidden_size // config.num_attention_heads + # Permute q + print(f"Permuting q {tensor_list[0].shape}") + tensor_list[0] = permute_for_rotary( + tensor=tensor_list[0], + num_heads=config.num_attention_heads, + per_head_hidden_size=per_head_hidden_size, + hidden_size=config.hidden_size, + ) + # Permute k + print(f"Permuting k {tensor_list[1].shape}") + tensor_list[1] = permute_for_rotary( + tensor=tensor_list[1], + num_heads=config.num_key_value_heads, + per_head_hidden_size=per_head_hidden_size, + hidden_size=config.hidden_size, + ) + return torch.cat(tensor_list, dim=0) + + if path[3] == "gate_up_proj": + tensor_list = get_tensors( + [ + ".".join(transformer_path + path[2:3] + [proj_name] + path[4:]) + for proj_name in ["gate_proj", "up_proj"] + ] + ) + return torch.cat(tensor_list, dim=0) + + return get_tensor(".".join(transformer_path + path[2:])) + + else: + raise ValueError(f"Couldn't find transformer equivalent of {name}") + + +def convert_trfrs_to_brrr(dp, pp, tp, model_name="huggyllama/llama-7b", save_path="pretrained/llama-7b"): + # check save_path doesnt exist or is empty + save_path = Path(save_path) + # assert not save_path.exists() or len(list(save_path.iterdir())) == 0, f"save_path {save_path} is not empty" + + parallel_config = PARALLELISM_BRRR + + parallel_config.dp = dp + parallel_config.pp = pp + parallel_config.tp = tp + + # Initialise all process groups + parallel_context = ParallelContext( + data_parallel_size=parallel_config.dp, + pipeline_parallel_size=parallel_config.pp, + tensor_parallel_size=parallel_config.tp, + ) + # params + dtype = torch.bfloat16 # Flash attention doesn't support fp32 + + # Initialise brrr model + model_config_brrr = CONFIG_BRRR.model.model_config + + model = build_model( + model_builder=lambda: MiniCPMForTraining( + config=model_config_brrr, + parallel_context=parallel_context, + parallel_config=parallel_config, + random_states=None, + ), + dtype=dtype, + parallel_context=parallel_context, + device=torch.device("cpu"), + ) + + # Initialise transformers model + device_map = {} + current_pp_rank = dist.get_rank(group=parallel_context.pp_pg) + device_map["model.embed_tokens"] = ( + model.model.token_position_embeddings.rank + if current_pp_rank == model.model.token_position_embeddings.rank + else "meta" + ) + for i in range(model_config_brrr.num_hidden_layers): + device_map[f"model.layers.{i}"] = ( + model.model.decoder[i].rank if current_pp_rank == model.model.decoder[i].rank else "meta" + ) + device_map["model.norm"] = ( + model.model.final_layer_norm.rank if current_pp_rank == model.model.final_layer_norm.rank else "meta" + ) + device_map["lm_head"] = model.model.lm_head.rank if current_pp_rank == model.model.lm_head.rank else "meta" + model_ref = MistralForCausalLM.from_pretrained(model_name, torch_dtype=dtype, device_map=device_map) + + # Copy weights from trfrs to brrr + ref_state_dict = model_ref.state_dict() + for name, param in model.named_parameters(): + print(f"Syncing {name}") + ref_param = get_transformers_weight(name=name, ref_module_state_dict=ref_state_dict, ref_module=model_ref) + + param_is_tp_sharded = ( + isinstance(param, NanotronParameter) + and param.is_sharded + and parallel_context.world_ranks_to_pg[param.get_sharded_info().global_ranks] == parallel_context.tp_pg + ) + + if param_is_tp_sharded: + sharded_info = param.get_sharded_info() + # copy param data (not just the reference) + with torch.no_grad(): + for local_global_slices_pair in sharded_info.local_global_slices_pairs: + local_slices = local_global_slices_pair.local_slices + global_slices = local_global_slices_pair.global_slices + param[local_slices].copy_(ref_param[global_slices]) + else: + assert ( + ref_param.shape == param.shape + ), f"Parameter shape don't match for {name}\n{ref_param.shape} != {param.shape}" + # copy param data (not just the reference) + with torch.no_grad(): + param.copy_(ref_param) + ref_param = None + # torch.cuda.empty_cache() + + # TODO @nouamanetazi: assert weights are the same + # Marks parameters as NanotronParameters + mark_tied_parameters(model=model, parallel_context=parallel_context, parallel_config=parallel_config) + + sanity_check(root_module=model) + + checkpoint_metadata = { + "last_train_step": 0, + "consumed_train_samples": 0, + } + save(config=CONFIG_BRRR, model=model, optimizer=None, lr_scheduler=None, parallel_context=parallel_context, root_folder=save_path, + should_save_optimizer=False, should_save_lr_scheduler=False, checkpoint_metadata=checkpoint_metadata, + sanity_checks=False) + # save_weights(model=model, parallel_context=parallel_context, root_folder=save_path) + # save_meta(root_folder=save_path, parallel_context=parallel_context, checkpoint_metadata=checkpoint_metadata) + + if dist.get_rank(parallel_context.world_pg) == 0: + print(save_path) + import json + + with open(save_path / "model_config.json", mode="w") as fo: + fo.write(json.dumps(asdict(CONFIG_BRRR.model.model_config), indent=4)) + + +def main(): + args = get_args() + convert_trfrs_to_brrr(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/dataloader.py b/dataloader.py new file mode 100644 index 0000000000000000000000000000000000000000..1cafdd383704f68860f5ac6d7a4eaf7bd0405ebb --- /dev/null +++ b/dataloader.py @@ -0,0 +1,107 @@ +from nanotron import logging +from nanotron.config import ( + PretrainDatasetsArgs, +) +from nanotron.dataloader import ( + clm_process, + dummy_infinite_data_generator, + get_datasets, + get_train_dataloader, +) +from nanotron.logging import log_rank +from nanotron.parallel.pipeline_parallel.utils import get_input_output_pp_ranks +from nanotron.trainer import DistributedTrainer +from nanotron.utils import ( + main_rank_first, +) + +try: + from huggingface_hub import __version__ as hf_hub_version + from transformers import AutoTokenizer + from transformers import __version__ as tf_version +except ImportError: + hf_hub_version = None + tf_version = None + +logger = logging.get_logger(__name__) + + +def get_dataloader(trainer: DistributedTrainer): + """Returns a dataloader for training.""" + + # First, we need to know which ranks to feed the dataloader to + input_pp_rank, output_pp_rank = get_input_output_pp_ranks(model=trainer.model) + + # Case 1: Dummy data generator + if trainer.config.data.dataset is None: + log_rank("Using dummy data generator", logger=logger, level=logging.INFO, rank=0) + dataloader = dummy_infinite_data_generator( + micro_batch_size=trainer.micro_batch_size, + sequence_length=trainer.sequence_length, + input_pp_rank=input_pp_rank, + output_pp_rank=output_pp_rank, + vocab_size=trainer.model_config.vocab_size, + seed=trainer.config.data.seed, + parallel_context=trainer.parallel_context, + )() + + # Case 2: HuggingFace datasets + elif isinstance(trainer.config.data.dataset, PretrainDatasetsArgs): + log_rank("Using `datasets` library", logger=logger, level=logging.INFO, rank=0) + tokenizer_path = trainer.config.tokenizer.tokenizer_name_or_path + log_rank( + f"Loading tokenizer from {tokenizer_path} and transformers/hf_hub versions {tf_version, hf_hub_version}", + logger=logger, + level=logging.INFO, + rank=0, + ) + + # We need to the 1st device to process dataset and cache it, then other devices load from cache + with main_rank_first(trainer.parallel_context.world_pg): + # TODO @nouamanetazi: this may timeout before 1st device finishes processing dataset. Can we have a ctxmanager to modify timeout? + # TODO: generalise to include for validation/test splits + + # We load the raw dataset + raw_dataset = get_datasets( + hf_dataset_or_datasets=trainer.config.data.dataset.hf_dataset_or_datasets, + splits=trainer.config.data.dataset.hf_dataset_splits, + )["train"] + + tokenizer = AutoTokenizer.from_pretrained(tokenizer_path) + tokenizer.pad_token = tokenizer.eos_token + tokenizer.padding_side = "left" + + # We apply the Causal Language Modeling preprocessing + train_dataset = clm_process( + raw_dataset=raw_dataset, + tokenizer=tokenizer, + text_column_name=trainer.config.data.dataset.text_column_name, + dataset_processing_num_proc_per_process=trainer.config.data.dataset.dataset_processing_num_proc_per_process, + dataset_overwrite_cache=trainer.config.data.dataset.dataset_overwrite_cache, + sequence_length=trainer.sequence_length, + ) + + # We load the processed dataset on the ranks requiring it + dataloader = get_train_dataloader( + train_dataset=train_dataset, + sequence_length=trainer.sequence_length, + parallel_context=trainer.parallel_context, + input_pp_rank=input_pp_rank, + output_pp_rank=output_pp_rank, + micro_batch_size=trainer.micro_batch_size, + consumed_train_samples=trainer.consumed_train_samples, + dataloader_num_workers=trainer.config.data.num_loading_workers, + seed_worker=trainer.config.data.seed, + dataloader_drop_last=True, + ) + # Check if we have enough samples for train_steps + assert ( + trainer.config.tokens.train_steps - trainer.start_iteration_step + ) * trainer.global_batch_size // trainer.parallel_context.dp_pg.size() < len(dataloader), ( + f"Dataset is too small for steps ({len(dataloader)} < {(trainer.config.tokens.train_steps - trainer.start_iteration_step) * trainer.global_batch_size // trainer.parallel_context.dp_pg.size()}), " + f"Try train_steps<={len(dataloader) * trainer.parallel_context.dp_pg.size() // trainer.global_batch_size + trainer.start_iteration_step}" + ) + else: + raise ValueError(f"Unhandled case of `self.config.data.dataset`. Got: {trainer.config.data.dataset}") + + return dataloader diff --git a/modeling_minicpm.py b/modeling_minicpm.py new file mode 100644 index 0000000000000000000000000000000000000000..d067fd2e3345bff86cbea0776f36189875f42918 --- /dev/null +++ b/modeling_minicpm.py @@ -0,0 +1,1147 @@ +# coding=utf-8 +# Copyright 2018 HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch MiniCPM model. +""" +from typing import Dict, Optional, Union +import inspect +import math + +import torch +from flash_attn import bert_padding +from flash_attn.flash_attn_interface import ( + flash_attn_varlen_func, + flash_attn_with_kvcache, +) +from flash_attn.layers.rotary import RotaryEmbedding as FlashRotaryEmbedding +from nanotron import distributed as dist +from nanotron import logging +from nanotron.config import ParallelismArgs, RecomputeGranularity +from nanotron.generation.generate_store import AttachableStore +from nanotron.logging import log_rank +from nanotron.models import NanotronModel +from nanotron.nn.layer_norm import TritonRMSNorm +from nanotron.parallel import ParallelContext +from nanotron.parallel.parameters import NanotronParameter +from nanotron.parallel.pipeline_parallel.block import ( + PipelineBlock, + TensorPointer, +) +from nanotron.parallel.pipeline_parallel.p2p import P2P +from nanotron.parallel.tensor_parallel.functional import sharded_cross_entropy +from nanotron.parallel.tensor_parallel.nn import ( + TensorParallelColumnLinear, + TensorParallelEmbedding, + TensorParallelLinearMode, + TensorParallelRowLinear, +) +from nanotron.random import RandomStates +from nanotron.utils import checkpoint_method +from nanotron.nn.activations import ACT2FN +from torch import nn + +from config_minicpm import MiniCPMConfig + +logger = logging.get_logger(__name__) + +_flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_varlen_func).parameters) + + +class RotaryEmbedding(nn.Module): + def __init__(self, dim: int, end: int, theta: float = 10000.0): + super().__init__() + assert dim % 2 == 0 + self.dim = dim + self.end = end + self.theta = theta + # TODO @nouamane: Figure out why we can't set `DTypeInvariantTensor` ... + # TODO @thomasw21: Complex buffers break DDP, instead we store float and view them as complex + self.freqs_cis: torch.Tensor + self._initialized_buffer = False + + def init_rotary_embeddings(self): + if self._initialized_buffer is True: + # Buffer if already initialized + return + self.register_buffer( + "freqs_cis", + torch.empty(self.end, self.dim // 2, 2, dtype=torch.float, device="cuda"), + persistent=False, + ) + assert self.freqs_cis.device.type == "cuda" + # TODO @nouamane: One we figure out how to do the DTypeInvariantTensor, this can be removed and changed to an assert + if self.freqs_cis.dtype != torch.float: + self.freqs_cis = self.freqs_cis.to(torch.float) + assert self.freqs_cis.dtype == torch.float + freqs = 1.0 / ( + self.theta + ** (torch.arange(0, self.dim, 2, dtype=torch.float, device="cuda")[: (self.dim // 2)] / self.dim) + ) + t = torch.arange(self.end, device="cuda") + freqs = torch.outer(t, freqs).float() + complex_freqs = torch.polar(torch.ones_like(freqs), freqs) + freqs = torch.view_as_real(complex_freqs) + self.freqs_cis.copy_(freqs) + self._initialized_buffer = True + + def forward( + self, + x: torch.Tensor, # [batch_size, seq_length, num_heads, d_qk] + position_ids: Optional[torch.LongTensor], # [batch_size, seq_length] + ): + batch_size, seq_length, num_heads, inner_dim = x.shape + while ( + position_ids is not None and position_ids[-1, -1] >= self.end + ) or seq_length >= self.end: # TODO @nouamane: check if this causes cpu-gpu sync + self.end *= 2 + self._initialized_buffer = False + if self._initialized_buffer is False: + self.init_rotary_embeddings() + dtype = x.dtype + assert inner_dim % 2 == 0 + x = x.view( + batch_size, seq_length, num_heads, inner_dim // 2, 2 + ) # [batch_size, q_length, num_heads, inner_dim] + if x.dtype == torch.bfloat16: + x = x.float() + complex_x = torch.view_as_complex(x) # [batch_size, q_length, num_heads, inner_dim // 2] + if position_ids is None: + freqs_cis = self.freqs_cis[None, :seq_length, None, :] + else: + # TODO(kunhao): Should None follow the num_heads dimension? + if position_ids[-1, -1] < 0 or position_ids[-1, -1] >= self.end: # Quick test hopefully + raise ValueError(f"Position ids must be in the range [0, {self.end}), but got {position_ids}") + freqs_cis = self.freqs_cis[position_ids][:, :, None, :] + complex_freqs = torch.view_as_complex(freqs_cis) + x_out = torch.view_as_real(complex_x * complex_freqs).view(batch_size, seq_length, num_heads, inner_dim) + return x_out.type(dtype) + + +class GLUActivation(nn.Module): + def __init__(self, act_fn_name: str): + super().__init__() + self.act = ACT2FN[act_fn_name] + + def forward(self, merged_states: torch.Tensor): + gate_states, up_states = torch.split(merged_states, merged_states.shape[-1] // 2, dim=-1) + return self.act(gate_states) * up_states + + +class MLP(nn.Module): + def __init__( + self, + config: MiniCPMConfig, + parallel_config: Optional[ParallelismArgs], + tp_pg: dist.ProcessGroup, + ): + super().__init__() + + # TODO @thomasw21: refactor so that we store that default in a single place. + tp_mode = parallel_config.tp_mode if parallel_config is not None else TensorParallelLinearMode.ALL_REDUCE + tp_linear_async_communication = ( + parallel_config.tp_linear_async_communication if parallel_config is not None else False + ) + + gate_up_contiguous_chunks = ( + config.intermediate_size, # shape of gate_linear + config.intermediate_size, # shape of up_linear + ) + self.gate_up_proj = TensorParallelColumnLinear( + config.hidden_size, + 2 * config.intermediate_size, + pg=tp_pg, + mode=tp_mode, + bias=False, + async_communication=tp_linear_async_communication, + contiguous_chunks=gate_up_contiguous_chunks, + ) + + self.down_proj = TensorParallelRowLinear( + config.intermediate_size, + config.hidden_size, + pg=tp_pg, + mode=tp_mode, + bias=False, + async_communication=tp_linear_async_communication and tp_mode is TensorParallelLinearMode.REDUCE_SCATTER, + ) + # TODO @nouamane: why can't we torch.jit.script GLUActivation? + self.split_silu_mul = GLUActivation(config.hidden_act) + + def forward(self, hidden_states): # [seq_length, batch_size, hidden_dim] + merged_states = self.gate_up_proj(hidden_states) + hidden_states = self.down_proj(self.split_silu_mul(merged_states)) + return {"hidden_states": hidden_states} + + +class CoreAttention(nn.Module): + def __init__(self, config: MiniCPMConfig, parallel_config: Optional[ParallelismArgs], layer_idx: int): + super().__init__() + # TODO @thomasw21: GPT has a weird `d_kv` config which I'm guessing is essentically a `d_qkv` + assert ( + config.hidden_size % config.num_attention_heads == 0 + ), f"Hidden size {config.hidden_size} must be divisible by number of attention heads {config.num_attention_heads}." + self.d_qk = config.hidden_size // config.num_attention_heads + self.d_v = config.hidden_size // config.num_attention_heads + self.dropout = config.attn_pdrop + + self.checkpoint_attention = False # Because flash_attn already does checkpointing + + # if config.sliding_window_size is not None: + # assert ( + # _flash_supports_window_size + # ), "Current version of flash-attn doesn't support sliding window: `pip install flash-attn>=2.3`" + # self.sliding_window_size = config.sliding_window_size # if layer_idx not in config.global_attn_layers else None + + @checkpoint_method(attr_name="checkpoint_attention") + def forward( + self, + query_states: torch.Tensor, # [batch_size * q_length, num_heads, inner_dim] + key_states: torch.Tensor, # [batch_size * kv_length, 1, inner_dim] + value_states: torch.Tensor, # [batch_size * kv_length, 1, inner_dim] + q_sequence_mask: torch.Tensor, # torch.BoolTensor [batch_size, q_length] (can be broadcasted to that size) + kv_sequence_mask: torch.Tensor, # torch.BoolTensor [batch_size, kv_length] (can be broadcasted to that size) + ): + # TODO @thomasw21: Compute once, instead of computing for each layers. + cu_seqlens_q = torch.zeros((q_sequence_mask.shape[0] + 1), dtype=torch.int32, device=query_states.device) + cu_seqlens_k = torch.zeros((kv_sequence_mask.shape[0] + 1), dtype=torch.int32, device=query_states.device) + torch.cumsum(q_sequence_mask.sum(-1, dtype=torch.int32), dim=0, dtype=torch.int32, out=cu_seqlens_q[1:]) + torch.cumsum(kv_sequence_mask.sum(-1, dtype=torch.int32), dim=0, dtype=torch.int32, out=cu_seqlens_k[1:]) + + # TODO(kunhao): flash attn's causal means that the query can only attend to the keys before it. This is not + # what we want if we are using kv cache. This is a hack as we always have q_length == 1 when using kv cache. + causal = False if q_sequence_mask.shape[1] == 1 else True + attn_output = flash_attn_varlen_func( + q=query_states, + k=key_states, + v=value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=q_sequence_mask.shape[1], + max_seqlen_k=kv_sequence_mask.shape[1], + dropout_p=self.dropout if self.training else 0.0, + softmax_scale=None, # defaults to 1/sqrt(d_qk) + causal=causal, + # window_size=(self.sliding_window_size - 1, 0) if self.sliding_window_size is not None else (-1, -1), + return_attn_probs=False, + ) + + return attn_output + + +def pad_to_right(tensor, mask, new_tensor=None): + """Transform a left-padded tensor into a right-padded tensor. (Useful for prefilling key/value states) + Args: + tensor: (batch_size, seqlen, d1, d2) + mask: (batch_size, seqlen) + new_tensor: (batch_size, new_tensor_seqlen, d1, d2) + Returns: + new_tensor: (batch_size, new_tensor_seqlen, d1, d2) + right_padded_mask: (batch_size, seqlen) + """ + # First, we need to find the number of padding for each row + unpad_seqlens = mask.sum(1) + # Then, we need to find the maximum length of the tensor + max_seqlen = mask.shape[1] + # We can then create the indices to select the padded values + # The indices are the same for each row + indices = torch.arange(max_seqlen, device=mask.device) + # We can then create the mask for the padded values + right_padded_mask = indices < unpad_seqlens[:, None] + # We select the useful values + useful_values = tensor[mask] + # We create the new tensor (if not provided) + new_tensor = torch.zeros_like(tensor) if new_tensor is None else new_tensor + # We fill the new tensor with the useful values + new_tensor[:, : right_padded_mask.shape[1], :, :][right_padded_mask] = useful_values + return new_tensor, right_padded_mask + + +class CausalSelfAttention(nn.Module, AttachableStore): + def __init__( + self, + config: MiniCPMConfig, + parallel_config: Optional[ParallelismArgs], + tp_pg: dist.ProcessGroup, + layer_idx: int, + ): + super().__init__() + # Tensor parallel considerations: We split tensors along head dimension + assert ( + config.num_attention_heads % tp_pg.size() == 0 + ), f"Number of attention heads ({config.num_attention_heads}) must be divisible by TP size ({tp_pg.size()})." + try: + assert ( + config.num_key_value_heads % tp_pg.size() == 0 + ), f"Number of key/value heads ({config.num_key_value_heads}) must be divisible by TP size ({tp_pg.size()})." + except AttributeError: + log_rank( + "WARNING: num_key_value_heads not defined, assuming it is equal to num_attention_heads", + logger=logger, + level=logging.WARNING, + rank=0, + ) + # If num_key_value_heads is not defined, we assume that it is equal to num_attention_heads + config.num_key_value_heads = config.num_attention_heads + assert ( + config.num_attention_heads % config.num_key_value_heads == 0 + ), f"Number of attention heads ({config.num_attention_heads}) must be divisible by number of key/value heads ({config.num_key_value_heads})." + self.n_local_q_heads = config.num_attention_heads // tp_pg.size() + self.n_local_kv_heads = config.num_key_value_heads // tp_pg.size() + self.n_repeats = config.num_attention_heads // config.num_key_value_heads + self.is_gqa = config.num_attention_heads != config.num_key_value_heads # Whether we are using GQA or not + self.d_qk = config.hidden_size // config.num_attention_heads + self.d_v = config.hidden_size // config.num_attention_heads + self.d_model = config.hidden_size + + # TODO @thomasw21: refactor so that we store that default in a single place. + tp_mode = parallel_config.tp_mode if parallel_config is not None else TensorParallelLinearMode.ALL_REDUCE + tp_linear_async_communication = ( + parallel_config.tp_linear_async_communication if parallel_config is not None else False + ) + + # build the slice config for self.qkv for save/load + # shard are done within the contiguous chunk + qkv_contiguous_chunks = ( + config.num_attention_heads * self.d_qk, # shape of q + config.num_key_value_heads * self.d_qk, # shape of k + config.num_key_value_heads * self.d_qk, # shape of v + ) + self.qkv_proj = TensorParallelColumnLinear( + self.d_model, + config.num_attention_heads * self.d_qk + 2 * config.num_key_value_heads * self.d_qk, + pg=tp_pg, + mode=tp_mode, + bias=False, + async_communication=tp_linear_async_communication, + contiguous_chunks=qkv_contiguous_chunks, + ) + # TODO(kunhao): We want to have only one version per device and not one version per layer. + self.rotary_embedding = RotaryEmbedding( + dim=self.d_qk, + end=config.max_position_embeddings, + theta=config.rope_theta + ) + + # NOTE: Only supported for training (TODO(fmom): position_ids not supported yet) + self.flash_rotary_embedding = FlashRotaryEmbedding(dim=self.d_qk, base=config.rope_theta, interleaved=True) + + self.o_proj = TensorParallelRowLinear( + config.num_attention_heads * self.d_qk, + self.d_model, + pg=tp_pg, + mode=tp_mode, + bias=False, + async_communication=tp_linear_async_communication, + ) + + self.attention = CoreAttention( + config, + parallel_config=parallel_config, + layer_idx=layer_idx, + ) + + self.prefill_kv_len = ( + config.max_position_embeddings + ) # TODO @nouamane: compute based on free memory, because in rope we can surpass max_position_embeddings + + def forward( + self, + hidden_states, # [seq_length, batch_size, hidden_size] + sequence_mask, # [batch_size, seq_length] + ): + qkv_states = self.qkv_proj( + hidden_states + ) # [seq_length, batch_size, n_local_q_heads * d_qk + 2 * n_local_kv_heads * d_qk] + q_length, batch_size, _ = qkv_states.shape + + if self.is_gqa: + query_states, key_states, value_states = torch.split( + qkv_states, + [ + self.n_local_q_heads * self.d_qk, + self.n_local_kv_heads * self.d_qk, + self.n_local_kv_heads * self.d_qk, + ], + dim=-1, + ) + + query_states = ( + query_states.transpose(0, 1).contiguous().view(batch_size, q_length, self.n_local_q_heads, self.d_qk) + ) + key_states = ( + key_states.transpose(0, 1).contiguous().view(batch_size, q_length, self.n_local_kv_heads, self.d_qk) + ) + value_states = ( + value_states.transpose(0, 1).contiguous().view(batch_size, q_length, self.n_local_kv_heads, self.d_qk) + ) + else: + query_states, key_states, value_states = ( + qkv_states.view(q_length, batch_size, 3, self.n_local_q_heads, self.d_qk) + .permute(2, 1, 0, 3, 4) + .contiguous() + ) # [3, batch_size, seq_length, n_local_q_heads, d_qk] + + store = self.get_local_store() + if store is not None: # Inference case + # Double check that we use store only at inference time + assert key_states.requires_grad is False + assert value_states.requires_grad is False + if "position_offsets" in store: + old_position_offsets = store["position_offsets"] + position_ids = old_position_offsets[:, None] + sequence_mask + else: + position_ids = torch.cumsum(sequence_mask, dim=-1, dtype=torch.int32) - 1 + position_offsets = position_ids[:, -1] + + # Compute rotary embeddings + # Note: keep track of old rotary embedding end to check if we need to enlarge k_cache and v_cache + old_rotary_embed_end = self.rotary_embedding.end + query_states = self.rotary_embedding(query_states, position_ids=position_ids) + key_states = self.rotary_embedding(key_states, position_ids=position_ids) + + if "key" not in store: + # First inference iteration (Prefill) + # TODO @nouamane: support custom masking + # assert that [ False, False, False, False, True, True, True, True, True, True] is accepted + # but [ False, False, False, False, True, True, False, False, True, True] is not (can't mask in the middle of sequence) + assert ~( + sequence_mask[:, :-1] & (~sequence_mask[:, 1:]) # True is never followed by False + ).any(), "Can't mask in the middle of sequence, please make sure that pads are at the left of the sequence if existing" + + # preallocate k_cache, v_cache to self.prefill_kv_len + k_cache = torch.zeros( + ( + batch_size, + self.prefill_kv_len, + self.n_local_kv_heads, + self.d_qk, + ), + dtype=query_states.dtype, + device=query_states.device, + ) + v_cache = torch.zeros( + (batch_size, self.prefill_kv_len, self.n_local_kv_heads, self.d_v), + dtype=query_states.dtype, + device=query_states.device, + ) + # Remove pad tokens from key_states and concatenate samples in key_unpad + # cu_seqlens_k is the cumulative sequence lengths of key_states + (query_unpad, indices_q, cu_seqlens_q, max_seqlen_q) = bert_padding.unpad_input( + query_states, + sequence_mask, + ) + (key_unpad, indices_k, cu_seqlens_k, max_seqlen_k) = bert_padding.unpad_input( + key_states, sequence_mask + ) + (value_unpad, _, _, _) = bert_padding.unpad_input(value_states, sequence_mask) + + output_unpad = flash_attn_varlen_func( + q=query_unpad, # (total_q, n_local_q_heads, d_qk) + k=key_unpad, # (total_kv, n_local_kv_heads, d_qk) + v=value_unpad, # (total_kv, n_local_kv_heads, d_v) + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_q, + max_seqlen_k=max_seqlen_k, + dropout_p=0.0, + softmax_scale=None, + causal=True, # True in prefill phase, False in subsequent phases + return_attn_probs=False, + ) # (total_unpadded, n_local_q_heads, d_v) + + attention_output = bert_padding.pad_input( + output_unpad, indices_q, batch_size, q_length + ) # (batch_size, q_length, n_local_q_heads, d_v) + + pad_to_right(key_states, sequence_mask, new_tensor=k_cache) + pad_to_right(value_states, sequence_mask, new_tensor=v_cache) + + else: + # Pull pre-computed key/value states + # Subsequent inference iterations (q_length=1) + k_cache = store["key"] + v_cache = store["value"] + + # NOTE(fmom): According to flash_attn_with_kvcache, "If you pass in k / v, you must make sure that the cache is large enough to hold the new values" + # Since rotary embedding has changed (to enable larger context), we need to enlarge k_cache and v_cache + if self.rotary_embedding.end > old_rotary_embed_end: + k_cache = torch.cat( + [ + k_cache, + torch.zeros( + ( + batch_size, + self.rotary_embedding.end - old_rotary_embed_end, + self.n_local_kv_heads, + self.d_qk, + ), + dtype=query_states.dtype, + device=query_states.device, + ), + ], + dim=1, + ) + + v_cache = torch.cat( + [ + v_cache, + torch.zeros( + ( + batch_size, + self.rotary_embedding.end - old_rotary_embed_end, + self.n_local_kv_heads, + self.d_v, + ), + dtype=query_states.dtype, + device=query_states.device, + ), + ], + dim=1, + ) + + assert ( + k_cache.shape[1] == self.rotary_embedding.end + ), f"Cache size {k_cache.shape[1]} is smaller than rotary embedding end {self.rotary_embedding.end}" + assert ( + v_cache.shape[1] == self.rotary_embedding.end + ), f"Cache size {v_cache.shape[1]} is smaller than rotary embedding end {self.rotary_embedding.end}" + + # [batch_size, seq_length, num_heads, d_qk] + query_states = query_states.view( + batch_size, q_length, self.n_local_q_heads, self.d_qk + ) # [batch_size, q_length, self.n_heads, d_qk] + kv_length = key_states.shape[1] + key_states = key_states.view( + batch_size, kv_length, self.n_local_kv_heads, self.d_qk + ) # [batch_size, kv_length, self.n_heads, d_qk] + value_states = value_states.view( + batch_size, kv_length, self.n_local_kv_heads, self.d_v + ) # [batch_size, kv_length, self.n_heads, d_v] + + attention_output = flash_attn_with_kvcache( + query_states, + k_cache, + v_cache, + key_states, + value_states, + rotary_cos=None, + rotary_sin=None, + # TODO @nouamane: seems like this doesnt help to indicate padding in (for first iteration it's just 0) + cache_seqlens=position_offsets.contiguous(), + softmax_scale=None, + causal=True, + rotary_interleaved=False, # GPT-NeoX style + ) + + store.update( + { + "key": k_cache, # flash-attn has updated with new key_states using cache_seqlens + "value": v_cache, + "position_offsets": position_offsets, + } + ) + + else: # Training case + # Apply rotary embeddings to query/key states + # NOTE: The layout is different from models/MiniCPM.py which is [batch_size, num_heads, seq_length, d_qk] + # Here it is, [batch_size, seq_length, num_heads, d_qk] + # [2, batch_size, seq_length, num_heads, d_qk] + key_value_states = torch.cat([key_states.unsqueeze(0), value_states.unsqueeze(0)], dim=0) + # [batch_size, seq_length, 2, num_heads, d_qk] + key_value_states = key_value_states.permute(1, 2, 0, 3, 4).contiguous() + query_states, key_value_states = self.flash_rotary_embedding(query_states, kv=key_value_states) + # [batch_size, seq_length, num_heads, d_qk] + key_states, value_states = torch.split(key_value_states, 1, dim=2) + + q_sequence_mask = sequence_mask + kv_sequence_mask = sequence_mask + + kv_length = key_states.shape[1] + # [batch_size, seq_length, num_heads, d_qk] + # Shaping for use in `flash-attn` version of flash-attn: `flash_attn_unpadded_func` + query_states = query_states.view( + batch_size * q_length, self.n_local_q_heads, self.d_qk + ) # [batch_size * q_length, self.n_heads, d_qk] + + key_states = key_states.view( + batch_size * kv_length, self.n_local_kv_heads, self.d_qk + ) # [batch_size * kv_length, self.n_heads, d_qk] + value_states = value_states.view( + batch_size * kv_length, self.n_local_kv_heads, self.d_v + ) # [batch_size * kv_length, self.n_heads, d_v] + + attention_output = self.attention( + query_states=query_states, + key_states=key_states, + value_states=value_states, + q_sequence_mask=q_sequence_mask, + kv_sequence_mask=kv_sequence_mask, + ) + + attention_output = ( + attention_output.contiguous().view(batch_size, q_length, self.n_local_q_heads * self.d_v).transpose(0, 1) + ) + output = self.o_proj(attention_output) + + return {"hidden_states": output, "sequence_mask": sequence_mask} + + +class MiniCPMDecoderLayer(nn.Module): + def __init__( + self, + config: MiniCPMConfig, + parallel_config: Optional[ParallelismArgs], + tp_pg: dist.ProcessGroup, + layer_idx: int, + ): + super().__init__() + self.scale_depth = config.scale_depth + self.num_hidden_layers = config.num_hidden_layers + self.input_layernorm = TritonRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + self.attn = CausalSelfAttention( + config=config, + parallel_config=parallel_config, + tp_pg=tp_pg, + layer_idx=layer_idx, + ) + + self.post_attention_layernorm = TritonRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + self.mlp = MLP(config=config, parallel_config=parallel_config, tp_pg=tp_pg) + + def forward( + self, + hidden_states: Union[torch.Tensor, TensorPointer], + sequence_mask: Union[torch.Tensor, TensorPointer], + ) -> Dict[str, Union[torch.Tensor, TensorPointer]]: + residual = hidden_states + hidden_states = self.input_layernorm(hidden_states) + + output = self.attn(hidden_states=hidden_states, sequence_mask=sequence_mask) + hidden_states = output["hidden_states"] + hidden_states = residual + hidden_states * (self.scale_depth / math.sqrt(self.num_hidden_layers)) + + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states=hidden_states)["hidden_states"] + hidden_states = residual + hidden_states * (self.scale_depth / math.sqrt(self.num_hidden_layers)) + + return { + "hidden_states": hidden_states, + "sequence_mask": output["sequence_mask"], + } + + +class Embedding(nn.Module, AttachableStore): + def __init__(self, tp_pg: dist.ProcessGroup, config: MiniCPMConfig, parallel_config: Optional[ParallelismArgs]): + super().__init__() + self.token_embedding = TensorParallelEmbedding( + num_embeddings=config.vocab_size, + embedding_dim=config.hidden_size, + padding_idx=config.pad_token_id, + pg=tp_pg, + mode=parallel_config.tp_mode if parallel_config is not None else TensorParallelLinearMode.ALL_REDUCE, + ) + self.pg = tp_pg + + def forward(self, input_ids: torch.Tensor, input_mask: torch.Tensor): # [batch_size, seq_length] + store = self.get_local_store() + if store is not None: + if "past_length" in store: + past_length = store["past_length"] + else: + past_length = torch.zeros(1, dtype=torch.long, device=input_ids.device).expand(input_ids.shape[0]) + + cumsum_mask = input_mask.cumsum(-1, dtype=torch.long) + # Store new past_length in store + store["past_length"] = past_length + cumsum_mask[:, -1] + + # Format input in `[seq_length, batch_size]` to support high TP with low batch_size + input_ids = input_ids.transpose(0, 1) + input_embeds = self.token_embedding(input_ids) + return {"input_embeds": input_embeds} + + +class MiniCPMModel(nn.Module): + """Build pipeline graph""" + + def __init__( + self, + config: MiniCPMConfig, + parallel_context: ParallelContext, + parallel_config: Optional[ParallelismArgs], + ): + super().__init__() + + # Declare all the nodes + self.p2p = P2P(parallel_context.pp_pg, device=torch.device("cuda")) + self.config = config + self.parallel_config = parallel_config + self.parallel_context = parallel_context + self.tp_mode = parallel_config.tp_mode if parallel_config is not None else TensorParallelLinearMode.ALL_REDUCE + tp_linear_async_communication = ( + parallel_config.tp_linear_async_communication if parallel_config is not None else False + ) + + self.token_position_embeddings = PipelineBlock( + p2p=self.p2p, + module_builder=Embedding, + module_kwargs={ + "tp_pg": parallel_context.tp_pg, + "config": config, + "parallel_config": parallel_config, + }, + module_input_keys={"input_ids", "input_mask"}, + module_output_keys={"input_embeds"}, + ) + + self.decoder = nn.ModuleList( + [ + PipelineBlock( + p2p=self.p2p, + module_builder=MiniCPMDecoderLayer, + module_kwargs={ + "config": config, + "parallel_config": parallel_config, + "tp_pg": parallel_context.tp_pg, + "layer_idx": layer_idx, + }, + module_input_keys={"hidden_states", "sequence_mask"}, + module_output_keys={"hidden_states", "sequence_mask"}, + ) + for layer_idx in range(config.num_hidden_layers) + ] + ) + + self.final_layer_norm = PipelineBlock( + p2p=self.p2p, + module_builder=TritonRMSNorm, + module_kwargs={"hidden_size": config.hidden_size, "eps": config.rms_norm_eps}, + module_input_keys={"input"}, + module_output_keys={"hidden_states"}, + ) # TODO + + self.lm_head = PipelineBlock( + p2p=self.p2p, + # Understand that this means that we return sharded logits that are going to need to be gathered + module_builder=TensorParallelColumnLinear, + module_kwargs={ + "in_features": config.hidden_size, + "out_features": config.vocab_size, + "pg": parallel_context.tp_pg, + "bias": False, + # TODO @thomasw21: refactor so that we store that default in a single place. + "mode": self.tp_mode, + "async_communication": tp_linear_async_communication, + }, + module_input_keys={"x"}, + module_output_keys={"logits"}, + ) + + self.cast_to_fp32 = PipelineBlock( + p2p=self.p2p, + module_builder=lambda: lambda x: x.float(), + module_kwargs={}, + module_input_keys={"x"}, + module_output_keys={"output"}, + ) + + def forward( + self, + input_ids: Union[torch.Tensor, TensorPointer], # [batch_size, seq_length] + input_mask: Union[torch.Tensor, TensorPointer], # [batch_size, seq_length] + ): + return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0] + + def forward_with_hidden_states( + self, + input_ids: Union[torch.Tensor, TensorPointer], # [batch_size, seq_length] + input_mask: Union[torch.Tensor, TensorPointer], # [batch_size, seq_length] + ): + # all tensors are optional as most ranks don't need anything from the dataloader. + + output = self.token_position_embeddings(input_ids=input_ids, input_mask=input_mask) + + hidden_encoder_states = { + "hidden_states": output["input_embeds"] * self.config.scale_emb, + "sequence_mask": input_mask, + } + for encoder_block in self.decoder: + hidden_encoder_states = encoder_block(**hidden_encoder_states) + + hidden_states = self.final_layer_norm(input=hidden_encoder_states["hidden_states"])["hidden_states"] + + sharded_logits = self.lm_head(x=hidden_states / (self.config.hidden_size / self.config.dim_model_base))["logits"] + + fp32_sharded_logits = self.cast_to_fp32(x=sharded_logits)["output"] + + return fp32_sharded_logits, hidden_states + + def get_block_compute_costs(self): + """Computes the compute cost of each block in the model so that we can do a better job of load balancing.""" + model_config = self.config + d_ff = model_config.intermediate_size + d_qkv = model_config.hidden_size // model_config.num_attention_heads + block_compute_costs = { + # CausalSelfAttention (qkv proj + attn out) + MLP + MiniCPMDecoderLayer: 4 * model_config.num_attention_heads * d_qkv * model_config.hidden_size + + 3 * d_ff * model_config.hidden_size, + # This is the last lm_head + TensorParallelColumnLinear: model_config.vocab_size * model_config.hidden_size, + } + return block_compute_costs + + def get_flops_per_sec(self, iteration_time_in_sec, sequence_length, global_batch_size): + """Get flops per second for a given model""" + world_size = self.parallel_context.world_pg.size() + try: + num_key_values_heads = self.config.num_key_value_heads + except AttributeError: + num_key_values_heads = self.config.num_attention_heads + + model_flops, hardware_flops = get_flops( + num_layers=self.config.num_hidden_layers, + hidden_size=self.config.hidden_size, + num_heads=self.config.num_attention_heads, + num_key_value_heads=num_key_values_heads, + vocab_size=self.config.vocab_size, + ffn_hidden_size=self.config.intermediate_size, + seq_len=sequence_length, + batch_size=global_batch_size, + recompute_granularity=self.parallel_config.recompute_granularity, + ) + + model_flops_per_s = model_flops / (iteration_time_in_sec * world_size * 1e12) + hardware_flops_per_s = hardware_flops / (iteration_time_in_sec * world_size * 1e12) + return model_flops_per_s, hardware_flops_per_s + + +@torch.jit.script +def masked_mean(loss, label_mask, dtype): + # type: (Tensor, Tensor, torch.dtype) -> Tensor + return (loss * label_mask).sum(dtype=dtype) / label_mask.sum() + + +class Loss(nn.Module): + def __init__(self, tp_pg: dist.ProcessGroup): + super().__init__() + self.tp_pg = tp_pg + + def forward( + self, + sharded_logits: torch.Tensor, # [seq_length, batch_size, logits] + label_ids: torch.Tensor, # [batch_size, seq_length] + label_mask: torch.Tensor, # [batch_size, seq_length] + ) -> Dict[str, torch.Tensor]: + # Megatron by defaults cast everything in fp32. `--f16-lm-cross-entropy` is an option you can use to keep current precision. + # https://github.com/NVIDIA/Megatron-LM/blob/f267e6186eae1d6e2055b412b00e2e545a8e896a/megatron/model/gpt_model.py#L38 + loss = sharded_cross_entropy( + sharded_logits, label_ids.transpose(0, 1).contiguous(), group=self.tp_pg, dtype=torch.float + ).transpose(0, 1) + # TODO @thomasw21: It's unclear what kind of normalization we want to do. + loss = masked_mean(loss, label_mask, dtype=torch.float) + # I think indexing causes a sync we don't actually want + # loss = loss[label_mask].sum() + return {"loss": loss} + + +class MiniCPMForTraining(NanotronModel): + def __init__( + self, + config: MiniCPMConfig, + parallel_context: ParallelContext, + parallel_config: Optional[ParallelismArgs], + random_states: Optional[RandomStates] = None, + ): + super().__init__() + import warnings + + self.model = MiniCPMModel(config=config, parallel_context=parallel_context, parallel_config=parallel_config) + self.loss = PipelineBlock( + p2p=self.model.p2p, + module_builder=Loss, + module_kwargs={"tp_pg": parallel_context.tp_pg}, + module_input_keys={ + "sharded_logits", + "label_ids", + "label_mask", + }, + module_output_keys={"loss"}, + ) + self.parallel_context = parallel_context + self.config = config + self.parallel_config = parallel_config + + def forward( + self, + input_ids: Union[torch.Tensor, TensorPointer], + input_mask: Union[torch.Tensor, TensorPointer], + label_ids: Union[torch.Tensor, TensorPointer], + label_mask: Union[torch.Tensor, TensorPointer], + ) -> Dict[str, Union[torch.Tensor, TensorPointer]]: + sharded_logits = self.model( + input_ids=input_ids, + input_mask=input_mask, + ) + loss = self.loss( + sharded_logits=sharded_logits, + label_ids=label_ids, + label_mask=label_mask, + )["loss"] + return {"loss": loss} + + @torch.no_grad() + def init_model_randomly(self, init_method, scaled_init_method): + """Initialize model parameters randomly. + Args: + init_method (callable): Used for embedding/position/qkv weight in attention/first layer weight of mlp/ /lm_head/ + scaled_init_method (callable): Used for o weight in attention/second layer weight of mlp/ + + Note: + Layernorm weight all 0 or 1 depending on `apply_layernorm_1p` + """ + model = self + initialized_parameters = set() + # Handle tensor parallelism + module_id_to_prefix = {id(module): f"{module_name}." for module_name, module in model.named_modules()} + # Fix the root_model + module_id_to_prefix[id(model)] = "" + + for module_name, module in model.named_modules(): + if isinstance(module, TensorParallelColumnLinear): + # Somehow Megatron-LM does something super complicated, https://github.com/NVIDIA/Megatron-LM/blob/2360d732a399dd818d40cbe32828f65b260dee11/megatron/core/tensor_parallel/layers.py#L96 + # What it does: + # - instantiate a buffer of the `full size` in fp32 + # - run init method on it + # - shard result to get only a specific shard + # Instead I'm lazy and just going to run init_method, since they are scalar independent + assert {"weight"} == {name for name, _ in module.named_parameters()} or {"weight"} == { + name for name, _ in module.named_parameters() + } + for param_name, param in module.named_parameters(): + assert isinstance(param, NanotronParameter) + if param.is_tied: + tied_info = param.get_tied_info() + full_param_name = tied_info.get_full_name_from_module_id_to_prefix( + module_id_to_prefix=module_id_to_prefix + ) + else: + full_param_name = f"{module_name}.{param_name}" + + if full_param_name in initialized_parameters: + # Already initialized + continue + + if "weight" == param_name: + init_method(param) + elif "bias" == param_name: + param.zero_() + else: + raise ValueError(f"Who the fuck is {param_name}?") + + assert full_param_name not in initialized_parameters + initialized_parameters.add(full_param_name) + elif isinstance(module, TensorParallelRowLinear): + # Somehow Megatron-LM does something super complicated, https://github.com/NVIDIA/Megatron-LM/blob/2360d732a399dd818d40cbe32828f65b260dee11/megatron/core/tensor_parallel/layers.py#L96 + # What it does: + # - instantiate a buffer of the `full size` in fp32 + # - run init method on it + # - shard result to get only a specific shard + # Instead I'm lazy and just going to run init_method, since they are scalar independent + assert {"weight"} == {name for name, _ in module.named_parameters()} or {"weight"} == { + name for name, _ in module.named_parameters() + } + for param_name, param in module.named_parameters(): + assert isinstance(param, NanotronParameter) + if param.is_tied: + tied_info = param.get_tied_info() + full_param_name = tied_info.get_full_name_from_module_id_to_prefix( + module_id_to_prefix=module_id_to_prefix + ) + else: + full_param_name = f"{module_name}.{param_name}" + + if full_param_name in initialized_parameters: + # Already initialized + continue + + if "weight" == param_name: + scaled_init_method(param) + elif "bias" == param_name: + param.zero_() + else: + raise ValueError(f"Who the fuck is {param_name}?") + + assert full_param_name not in initialized_parameters + initialized_parameters.add(full_param_name) + elif isinstance(module, TritonRMSNorm): + assert {"weight"} == {name for name, _ in module.named_parameters()} + for param_name, param in module.named_parameters(): + assert isinstance(param, NanotronParameter) + if param.is_tied: + tied_info = param.get_tied_info() + full_param_name = tied_info.get_full_name_from_module_id_to_prefix( + module_id_to_prefix=module_id_to_prefix + ) + else: + full_param_name = f"{module_name}.{param_name}" + + if full_param_name in initialized_parameters: + # Already initialized + continue + + if "weight" == param_name: + # TODO @thomasw21: Sometimes we actually want 0 + param.fill_(1) + elif "bias" == param_name: + param.zero_() + else: + raise ValueError(f"Who the fuck is {param_name}?") + + assert full_param_name not in initialized_parameters + initialized_parameters.add(full_param_name) + elif isinstance(module, TensorParallelEmbedding): + # TODO @thomasw21: Handle tied embeddings + # Somehow Megatron-LM does something super complicated, https://github.com/NVIDIA/Megatron-LM/blob/2360d732a399dd818d40cbe32828f65b260dee11/megatron/core/tensor_parallel/layers.py#L96 + # What it does: + # - instantiate a buffer of the `full size` in fp32 + # - run init method on it + # - shard result to get only a specific shard + # Instead I'm lazy and just going to run init_method, since they are scalar independent + assert {"weight"} == {name for name, _ in module.named_parameters()} + + assert isinstance(module.weight, NanotronParameter) + if module.weight.is_tied: + tied_info = module.weight.get_tied_info() + full_param_name = tied_info.get_full_name_from_module_id_to_prefix( + module_id_to_prefix=module_id_to_prefix + ) + else: + full_param_name = f"{module_name}.weight" + + if full_param_name in initialized_parameters: + # Already initialized + continue + + init_method(module.weight) + assert full_param_name not in initialized_parameters + initialized_parameters.add(full_param_name) + + assert initialized_parameters == { + param.get_tied_info().get_full_name_from_module_id_to_prefix(module_id_to_prefix=module_id_to_prefix) + if param.is_tied + else name + for name, param in model.named_parameters() + }, f"Somehow the initialized set of parameters don't match:\n - Expected: { {name for name, _ in model.named_parameters()} }\n - Got: {initialized_parameters}" + + def get_block_compute_costs(self): + """Computes the compute cost of each block in the model so that we can do a better job of load balancing.""" + return self.model.get_block_compute_costs() + + def get_flops_per_sec(self, iteration_time_in_sec, sequence_length, global_batch_size): + """Get flops per second for a given model""" + return self.model.get_flops_per_sec(iteration_time_in_sec, sequence_length, global_batch_size) + + +def get_flops( + num_layers, + hidden_size, + num_heads, + vocab_size, + seq_len, + kv_channels=None, + ffn_hidden_size=None, + batch_size=1, + recompute_granularity=None, + glu_activation=False, +): + """Counts flops in an decoder-only model + Args: + num_layers: number of decoder layers + hidden_size: hidden size of the model + num_heads: number of heads in the model + num_key_value_heads: number of key/value heads in the model + ffn_hidden_size: hidden size of the FFN + vocab_size: size of the vocabulary + seq_len: sequence length of the decoder + batch_size: batch size + recompute_granularity: Activation recomputation method. Either None, FULL or SELECTIVE. Check Megatron-LM docs for more info. + Returns: + model_flops: flops in the model (should be independent of the hardware and model implementation) + hardware_flops: flops in the hardware (actual flops performed on the hardware). Check 6.3 in https://arxiv.org/pdf/2205.05198.pdf + """ + if kv_channels is None: + assert hidden_size % num_heads == 0 + kv_channels = hidden_size // num_heads + if ffn_hidden_size is None: + ffn_hidden_size = 4 * hidden_size + + # In the following we mark the reduced dimension with parentheses + # decoder + # self attention (MQA) + ## q projection + decoder_q_proj_flops_fwd = 2 * num_layers * batch_size * seq_len * (hidden_size) * num_heads * kv_channels + ## kv projection, shared across heads + decoder_kv_proj_flops_fwd = 2 * num_layers * batch_size * seq_len * (hidden_size) * 2 * kv_channels + ## qk logits + decoder_qk_logits_flops_fwd = 2 * num_layers * batch_size * num_heads * seq_len * (kv_channels) * seq_len + ### SWA (sliding window attention / local attention) + # window_size = 4096 + # decoder_qk_logits_flops_fwd = 2 * num_layers * batch_size * num_heads * seq_len * (kv_channels) * window_size + ## v logits + decoder_v_logits_flops_fwd = 2 * num_layers * batch_size * num_heads * seq_len * (seq_len) * kv_channels + # decoder_v_logits_flops_fwd = 2 * num_layers * batch_size * num_heads * seq_len * (window_size) * kv_channels + ## attn out + decoder_attn_out_flops_fwd = 2 * num_layers * batch_size * num_heads * seq_len * (kv_channels) * hidden_size + # FF + ## 1st layer + decoder_ffn_1_flops_fwd = 2 * num_layers * batch_size * seq_len * (hidden_size) * ffn_hidden_size + if glu_activation: + # 3 matmuls instead of 2 in FFN + # ref. https://arxiv.org/pdf/2002.05202.pdf + # Used for example in T5 v1.1 + decoder_ffn_1_flops_fwd = 4 * num_layers * batch_size * seq_len * (hidden_size) * ffn_hidden_size + ## 2nd layer + decoder_ffn_2_flops_fwd = 2 * num_layers * batch_size * seq_len * (ffn_hidden_size) * hidden_size + + decoder_flops_fwd = ( + decoder_q_proj_flops_fwd + + decoder_kv_proj_flops_fwd + + decoder_qk_logits_flops_fwd + + decoder_v_logits_flops_fwd + + decoder_attn_out_flops_fwd + + decoder_ffn_1_flops_fwd + + decoder_ffn_2_flops_fwd + ) + + # lm head + lm_head_flops_fwd = 2 * batch_size * seq_len * (hidden_size) * vocab_size + + # the bwd pass requires double the flops in case of matmuls to calculate the gradients with respect to + # both input and weight tensors + model_flops = 3 * (decoder_flops_fwd + lm_head_flops_fwd) # 1 for fwd + 2 for bwd + + if recompute_granularity is None: + hardware_flops = model_flops + elif recompute_granularity is RecomputeGranularity.FULL: + # Note: we don't recompute lm head activs + hardware_flops = model_flops + decoder_flops_fwd # + activ recomputation + elif recompute_granularity is RecomputeGranularity.SELECTIVE: + # all terms with s^2 are flops that are recomputed + # ref. appendix A: https://arxiv.org/pdf/2205.05198.pdf + recomputed_decoder_flops = decoder_qk_logits_flops_fwd + decoder_v_logits_flops_fwd + hardware_flops = model_flops + recomputed_decoder_flops + else: + raise ValueError("recompute_granularity must be one of 'full' or 'selective'") + + return model_flops, hardware_flops diff --git a/pretrained/MiniCPM-2B-dpo-bf16/checkpoint_metadata.json b/pretrained/MiniCPM-2B-dpo-bf16/checkpoint_metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..d4edf002aa5cc3e4ac7b7258643ee9f8d008787a --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/checkpoint_metadata.json @@ -0,0 +1,9 @@ +{ + "dp": 1, + "metas": { + "consumed_train_samples": 0, + "last_train_step": 0 + }, + "tp": 1, + "version": "1.2" +} \ No newline at end of file diff --git a/pretrained/MiniCPM-2B-dpo-bf16/config.yaml b/pretrained/MiniCPM-2B-dpo-bf16/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9e9bfcfa617f5f008e4a92592cfa35c7d7b3d3c2 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/config.yaml @@ -0,0 +1,55 @@ +checkpoints: null +data: null +general: + benchmark_csv_path: null + consumed_train_samples: null + ignore_sanity_checks: false + project: openbmb + run: MiniCPM-2B-dpo-bf16 + seed: 42 + step: 0 +lighteval: null +logging: null +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.025 + make_vocab_size_divisible_by: 1 + model_config: + attn_pdrop: 0.0 + bos_token_id: 1 + dim_model_base: 256 + eos_token_id: 2 + hidden_act: silu + hidden_size: 2304 + initializer_range: 0.1 + intermediate_size: 5760 + max_position_embeddings: 2048 + num_attention_heads: 36 + num_hidden_layers: 40 + num_key_value_heads: 36 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_theta: 10000.0 + scale_depth: 1.4 + scale_emb: 12 + tie_word_embeddings: true + use_cache: true + vocab_size: 122753 +optimizer: null +parallelism: + dp: 1 + pp: 1 + pp_engine: 1f1b + recompute_granularity: SELECTIVE + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: openbmb/MiniCPM-2B-dpo-bf16 + tokenizer_revision: null +tokens: null diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/0/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/0/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..ca39428bad7ddedb0341ce467ff3f6316d4f638d --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/0/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36d4080827449bba1682daf6cab4b546e2fc02bb6a5c62efff2470a7e83202f5 +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/0/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/0/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..4b703ec3255bec038a17ed01ecaa54b49c19c37b --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/0/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8843d169ca3025d42ddfdb590fc51d01122c1f9212f9581d4dd319430f0e45b +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/0/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/0/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..38f92394efc052c7ecc3b98afca6fedefc95c6cd --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/0/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:beb60636c7e802f851754225517f7f375a0faff02721fd37a2d460a2ca61458d +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/0/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/0/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..bc71e3be8834f15fe89c7cc42e3b082040fdbd79 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/0/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5fec63ea6756d045071b457036fe09b783b34570efa1705d1d5120611db651f +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/0/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/0/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..89aba47a6436317dd67bd3ba550978e772706bd5 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/0/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b895ee74603d6a88a024041a16266e351714207a61f966ceeb5e638b4ba1a4ed +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/0/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/0/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..57122bb059d373cbbb37470c82cf798c7bc02935 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/0/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a319fc293f19f00db8aa2811b1a730a38a5966459493151020240c7adcb5dfd6 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/1/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/1/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..a0b4c9be084706d8122716f7863cdc7c3ce25833 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/1/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:495d95a04eea9c36ec15b8c1add6e51a52052ee0fd90b446683d7a412d01b526 +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/1/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/1/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..2b7355a5efea911376b5f215bccb79fddff86d96 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/1/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:908e028114e8dc887ca2e80245678ea7ab08662b8462bdf9111d5e342f9de729 +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/1/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/1/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..6d140cc7bf51919193df69ba9b023f54b386b26e --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/1/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6a6bc147e3a4d94a72c4a48c86b7b4517662141923380b80e216897e0c647bf +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/1/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/1/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..d5cb8213a713154c594765ef39a6ce4100cd5b75 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/1/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88805e4d760f1e78e2d42e7ff18c1ffa61051571bf8b9eefc6fdd9f3843770e4 +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/1/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/1/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..b999537f1674ad5186c1b878f7f20fbb124715e5 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/1/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16ff202e99dfe71bb7619cc3580a9f59c788868f8f6fc58c6c694f5eae562c89 +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/1/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/1/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..83beef8d2a8e1436517b1fd16d2ced448fe6e3eb --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/1/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f240f0046868ee13ee354feb943d5c382358774d8e1533dd53e16040d9618124 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/10/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/10/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..8435d9963a6605cc7c301ecf4cba930c1aea56f0 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/10/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56ab2210f9e6e6018e84b6e96ad6bdc525e46396049ff66caa0285fb7a170120 +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/10/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/10/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..759fcc0d5686eae06296f3a485b0c7913a257218 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/10/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8736369fe6db6ca62bdd30014a6c920c6d6bb570c85b79d5ce43bff75366a499 +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/10/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/10/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..745eb97a6127e4e78ccb5dfc495c679e8985d842 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/10/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e37863cb49b75e41eff480d6fb94a3240cbc884dc2e76215d36f8d6207fba5ee +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/10/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/10/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..91bc6c0312fa1e56785d8bf7c988f4f297c580f0 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/10/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b7d3522e6a49963803f5e4248ff76e2a9d51a044c76cfac156bc581b794f492 +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/10/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/10/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..786d13a74006283aff0621711f5700e91dad6402 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/10/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1fdd9830af039908d5a402c6edeccb2527d5cdddfbe020dd3e5adc4d33e0371 +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/10/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/10/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..f732e429db09e4c46953e87761a446096e08d7da --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/10/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:639ee973e90fb837781436fbf443facd7ed1f73f070041ffd71c14196d06e2ce +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/11/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/11/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..5686d4ec525baacb0193870f25236b3ac2247a09 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/11/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70f9c571fb7c57aa4a57d51fe4c5d05fe21a34e32a2f4896049c1d9bed6ea32b +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/11/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/11/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..8f44fd86b90f45a3e31b651b2c212f952f449fe0 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/11/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f4a20b46bd3098c0f1efdd665bba8f99adc2363257db31a8b7a0e4e73a540bd +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/11/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/11/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..69de48fe21b930a766d0a384037f139d19e28ddf --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/11/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c572d06613b5e508d138b5af0180b5b3a09c78abc7b06a07039fec736487d1e9 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/11/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/11/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..4b421b8e55177599a6b56f0d6a3a11363cff4907 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/11/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d021e4f9a713730b67f2e8df642639628ebd5674eb960c39575c62e3d92212f +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/11/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/11/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..c40acf0a4ee5e39992a8107bf258a89b8058cfef --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/11/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a290b7bf65372eb56ae8036fbe048ee0d94a39e01debf2c07600d2228bd6cb0c +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/11/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/11/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..ba5fb6d877437999ac00001ce21195107e7c3b1d --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/11/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03a6490c81420efe127f1f8dc68c7f8734a9b936ca901e2f5e2485d90d178469 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/12/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/12/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..7a6a78f8f0a29b31636e9c9f1f51d26763fb090d --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/12/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1d3c77097833aa4ea96466ae247220e1f2a4ef6957c65f3257ad8b1da84ef70 +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/12/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/12/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..208a6497c1ac7a2c3d94de942cb3362e13de29d5 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/12/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ca582e7a768fa2766bf655a6acb54ee6d8c3ec1fe63a7b8bc5ebb53846828f2 +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/12/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/12/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..e7bbfc1a6173637d9f11b5d1cc5b4ebbf92bf93b --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/12/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b66f9fc608826efaa57dff502e1f0ba1bea00996208f1e8e875f58459d82c65 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/12/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/12/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..46d38d25ec80d353820c4a0d894f88f75a153b3f --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/12/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36035e96236f5c5f39b19d5f53af38da02d05317849852bc5f2dd0eca315148a +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/12/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/12/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..ca1492f076749e9a1abd8a32d7f19be57d772f1a --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/12/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1c6047d8d338b808b94f73626c1a0ef9ac33a4c8cf964041ba8c9db3d8f9629 +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/12/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/12/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..d13cf5645f6601711699124db22ae75a992b1d9a --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/12/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3c926d53d3eb31a3c6aea0390794e16fd9ac99d4a99038a48de9ce750895ed4 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/13/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/13/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..107f83c2733603a4398b03fd34d8e6d2acde1ccd --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/13/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f1b53cb9c0ea5fe7080e68a0ad6e988aaf958315d3b58276d9ec7e2bd239e67 +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/13/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/13/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..582f813ada88e26ee6bd78ba146070032f17ca9a --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/13/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08eca1997fce9a5815f81299a83848066b309a2d031f88e5012878236ae4650c +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/13/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/13/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..eb2d5250f588131223b9449ccf5f183e060b1ae2 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/13/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab3f62ae9b040d8aa3e1b9d3a8b81d89ad731c5d9bb32757a43b116fcfd79ca0 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/13/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/13/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..bb923837756cdd69f6f91c293e06430e2e368647 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/13/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb415ebe5cbfc55a84d7657c74cf1634a9f534a5980242e59484b9c2a041d896 +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/13/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/13/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..6c19cdd645670716f869e2ebd88f1e5e02572dc4 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/13/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4baf79283bdd79707aabf64311ee95bb4149435bb4469aa0e43aed65caa8c667 +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/13/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/13/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..06258e5c01cd19e628f976715e6f731ccb9040eb --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/13/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5543638dcaccb01fa706781c9dc67b820df41a7209c25494dc82b6db8e63b1f3 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/14/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/14/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..46602aa407af98e73918b2f7983134c98d0d51dc --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/14/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0883579e29c4b28ee7f4af72be5d77c2f045fca9757ca06f6d0a8c75de6abd0f +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/14/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/14/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..84a8701681927d514146e479cde4f6b7c1b20f7f --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/14/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c436a6adf43608478642ab6efbf750101b4b3fc4d19425b649443309bde8417a +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/14/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/14/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..59d16985ffc7ca055257fb484f8271d8f810b31a --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/14/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43b8fe2b4b03bbe4ddbd0dc572b4cfac0286624eb2865d96f9afdb75c727643f +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/14/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/14/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..bb65006b7df6f3f92d29f79799cbd480a985d3bf --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/14/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6194cb03e3b7c3260e8a34e632f7736177a5ee15a46d184d90e6e94d5a6802e +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/14/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/14/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..9824bc11eb1668eac78c80fddb4890b22613f608 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/14/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec68dd15fe1f1643fec13bb3cb19d8139beed19fdf48075c097baf16c5f23ba3 +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/14/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/14/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..bd977e35422eba117d69897f091303710c7de107 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/14/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5863fda36bc008eb3cad06022a60e0a5ba90f85362639938849bf18047836646 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/15/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/15/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..a2642be73b0a8c7fe675e2524118e8f3c6f559ce --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/15/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:791662d57e100e6f2e806102876e8dac3e67f459afca1d3930eafe88fb313082 +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/15/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/15/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..c4ce1816b96fca75ce096809f2e9ad2057a66414 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/15/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be49d472ac23b3305533ff560d88769e4a767faeea7d089e7d211a8fa9f7c794 +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/15/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/15/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..3961710f0f7f079ce02ea6f4b66085316b5becc6 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/15/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6cefa5b8706dd635882329157050134322bddc6ae6f765b17baef7160d09db0 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/15/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/15/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..b72669e2d3477734f70718534fdffbc5a924aa4d --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/15/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0bf4fccb952e2dda3c5ca5e1925c90be09394cbcacfdd3654fa3dd77df37d6dc +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/15/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/15/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..985a0be89bfd30833ce59db208bef70ff90f7312 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/15/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bafcbd10b1097171388599a5ef735adca080e1d02dd5a603f38d5df783e3b65c +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/15/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/15/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..20e23d1e6514dd896426f603d606584e6c5b77c4 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/15/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5785197afe7a88464c2d17f004d8060175dbea3bc608d783d7829de333bf7a6c +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/16/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/16/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..ce77b9d470a7c9b8fae6c822decc24bdc0e10a4e --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/16/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33a8b7db47ed8caa037f36f66ca33f2750e67ac351435508bfa3c533c8c219cb +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/16/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/16/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..d059bc714223608d8fad5b92aad20248dfcc435a --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/16/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bdfa6a02441c659310e7e3b6e03a13d49030b15218655285343f63bc836de554 +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/16/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/16/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..c39ae46b1d11cf28dd1ba16d1795049451eb7df1 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/16/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d5a937b09f1e72d5588b9543cc86c28cf3f57ad84ed7908252c2df9f19b3125 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/16/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/16/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..eeec76bb9e54e51597d0ed69f131ac6f2001fddb --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/16/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fb7a1b0b7f556bebe3db5893a78f7b065641515b99011d5d355ad071cf793d3 +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/16/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/16/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..b563153aa155dad2183bec6af564a3171b952997 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/16/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:761ee01a444dacb7612b9c8d0373009eec32af580aaf74531317ea1172ce9906 +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/16/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/16/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..dad6bc9090234e1dbe079cfd7b30095705e46200 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/16/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e0c8d300809c0a7339d961fce29b37043d89fadf242551782960279873e51d6 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/17/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/17/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..99e643f2c4192c7bcd4cb0d69fe3c1210e52cd3d --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/17/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:83ba32c460cd6486ba760aab8f940315a6033d5bde3383931b2a5a045b8864a9 +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/17/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/17/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..e4c9ca86ad4e4e05aafebac37a14fe2ef1366a90 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/17/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ddfa01b62cfa52b7a87388e8e8bf3b34487a43d4ca643eb8ec01a7a99133c6b +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/17/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/17/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..eb0609d8527a091ae1c0948bb2d4d53f3272bcfd --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/17/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6fb4b76dbafae798b82181bb8b30ac9b6e44a5a012a8f1cc955dd23f0a04375b +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/17/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/17/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..11c4df7bf6bb9ac681343cb7f3d8af376073ffce --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/17/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7a231616d5238eb7cab59daee6d808e098bb965a80ea0ac4d1099cb1afed6db +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/17/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/17/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..3a742c8bc2381f1e984019b39b9affe673a863dc --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/17/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6bbecac20f9c9c120fd4c280f1c22b7325ad6c640b00a2dfc02cc68ba6de5722 +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/17/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/17/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..de12541e352ee2d45b80f57e717e642b96654450 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/17/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86c630560ed2b2f928ed9c0418c69889093a18e40375fa4486fdaa335ab42727 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/18/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/18/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..7ee23618aaf9124a333fddc89fe3a816e2bab483 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/18/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80cbc4118ed3cc9716ecc83ec1062a8b061c7bd37e4f07adcb9dadd11f349f34 +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/18/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/18/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..05d72522a0d5f5152dd0c448ec618dce5a459a82 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/18/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ccd496001d4ee4ee442cfd06cdf7ded1c747bca24141516bc7bae792fc4e5bc +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/18/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/18/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..a78208413b7498787f512b3c2b34cd633e5041f6 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/18/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec88fe067fbb83487b857711a227fbe1d95025d79ca9768ec7fba3d13fba9858 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/18/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/18/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..1f74341f941305f8d78c85e27ff1356eeae87216 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/18/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d545abe7b038c0de64fc0b0edda0c963918e1fee2dd1ab7e96ea678340e574f +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/18/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/18/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..fdcff78575a128834a0d39d0043a01d3840bd988 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/18/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb3f901b96d1295536544fb49355ea8b75d00c3a6c4e08d160615174f5bda503 +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/18/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/18/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..50101f3da9b694dddd4dccef20c5632c5aed2d56 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/18/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf44f73a9d871d1bc87d1589553227092a97b6a4edb143a1c5cebb0783541187 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/19/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/19/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..dad7ea48cd1f35f213668cfb1886533a1ee83ae9 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/19/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62a42669cf1ebc90fe8b78d0561acea3d7c29ebb9e2336b8a7bc8a83ecf17ab8 +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/19/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/19/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..275d91dd078999a7b64964ba483d7c191d592293 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/19/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36cba45643aaf71b380d27f1b2a169ec8aa72664d1d3607dca77b8fcb61b6c85 +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/19/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/19/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..f59b72f4d86cf95d16e5d00b55a0ec6c219b80e7 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/19/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c66e98f517f424aae6509fd0801f289fa5a5ef386d41ec0e359b7466feccf580 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/19/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/19/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..63afe4016bddb795ab5cc4d26919f7b74ee3e848 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/19/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ddfc8c6e5a115a7e9000340d2900552798c5baa6c13eb41c580e358ba407f978 +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/19/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/19/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..fb8a347be2e4f038dbfd8956b15441d5a876ff0f --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/19/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ed140446fcefd9e4e839c3f2810c5b3b1523caf9571326ab4cac74f1e5c0fda +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/19/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/19/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..b1669456752018ab3642ee965cb614286c2a7bb2 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/19/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ab469bc95959aaf25dcd53759409ae1bd03ece9f0962f254998c0a1466a5fb5 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/2/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/2/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..a212a67ece6487e54a6c868a612f23eebbd39606 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/2/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e286d25cb0ea9f3a3ff20eff39d4f26bc527293633ef6e86b5ccd84256ad20e9 +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/2/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/2/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..c3fa7e3ec304f1b277d682bea8ac4d8028184a91 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/2/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34e11ab58728db968805940007ddaef6cf60e9a8e465fbfd0037cbbe73845b5c +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/2/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/2/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..8c68f6507a42b8d4ee62641c5718d967132a9859 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/2/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b2ada12f39d9956dae9d5a3e4ebffa55d73b523be6f3a6f1b9fb127fa819aea +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/2/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/2/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..fb47f9e7159ee3f39a190d0fea180a1ebe25f7eb --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/2/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60765815eaba0fa2c891e34e590d7307eabfd86d08f100f0c6781c9ef4b54a85 +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/2/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/2/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..ffc2ff228e97cf50b8e6dc7824eef5a51f890a22 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/2/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:068ca52167f01a7bc12ad48c9d33e4ac301213372464f6595ea630c711738c23 +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/2/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/2/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..610a745769256a8bc130c6577a24c744a9b1eecc --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/2/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f385686e3a4217c8c7d9822bae3c31b2d6fc227c399ad7117351ad20e3196fa9 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/20/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/20/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..f8b0ed369e11c363858f97f5860cb50bb8d7bd53 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/20/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e64553ba2b8ac3393e4e62acbf6b295dd1075bccaccc1eb7ec687c161424e257 +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/20/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/20/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..6bc7aec5b548a5cfa7cd7040947ddb9db6744553 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/20/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b8fb71657c991ae1afd27cc1f73e6a664abfd108512612a2b5552cfe2e20714 +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/20/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/20/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..0d41a853038c4f4dc8ec86022f56473e553b0f55 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/20/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39bbd085e7022cbd37517b62f68926bdd367a5e53b71cb1269809ac40ba54481 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/20/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/20/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..6627c89fdd6d5eab593b56405e1e0584ae2ae0d3 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/20/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86ceb1565b25e415319cb270fcedf524486ee1d81a292e4af0f2ccbaf6704fd0 +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/20/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/20/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..cfe99fc350f0aa57a41916911a57d0f1eeccd08a --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/20/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09a3cc48c3916d7a63fe7cff047fdd3770adace0792c3458edf01fc565f62eb0 +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/20/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/20/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..af4988baac08dd2240b06c201bd05235b65edd6b --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/20/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8dc71460e92985747c97be1fbf7248422471d3525479504e861fb18a3cfc63e +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/21/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/21/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..9e91f5a6d65a9ed8a375ac5c55a3417acb18b4ad --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/21/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88919ef8d3eb25f0fd4463767210a31f20cc9195ac909a85b21535cb426e3f80 +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/21/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/21/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..a21564e69d2b4ff48b69118cb49212ae24b49510 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/21/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80a610ce4f0e8941b09738839d04f5d0541b9d4e8ac62d55e16b947242923706 +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/21/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/21/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..603ec346f6cf432d2835715ec6087b8339a5f6ae --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/21/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fadd9dc3561e2d8d2513004ffb37320fda7e1da885606c46bbc81841077d521 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/21/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/21/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..21a5d9734a98e3671758e2a0a99bc525f51adc7c --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/21/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:712a08d341db094475946d470bda73e02b7f8156339d399cd289f5d72533ffab +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/21/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/21/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..eaf0395c163af78c3b8cc9eab8bac4a78ff539f2 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/21/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5e19d9823702caa8219e9c5c0f452df66a38a436e5ae00b6e9a2bb5f6bef1a7 +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/21/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/21/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..2ab966fc455784baabcae2c40a03f49dfa7437c5 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/21/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4be88b85d2c5db70ead26ca11141c422fda315209afff16a27652be8f6c47605 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/22/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/22/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..8eb6e03d269c93ad8e3c8f5bd6e7126b2ddffe6e --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/22/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff545193a2065a83c4a07c713010f65241a0c57de83704b17b36e87833136b85 +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/22/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/22/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..672632084d1cd051e56c3143fbf47b98870c2f93 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/22/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f34ff69d85fbdf2d47c95a85bba0a4dd48d34b52a41af851cff3fc27a35634c9 +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/22/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/22/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..092f101704d4e22c40864d0c9e4a2a07ca788ec9 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/22/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:222e2c9622219c53dc3e6aefc587651953b43025e84acc294064612ab0dfb2f2 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/22/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/22/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..9533db609f045b8d5e9113ad9a72b891727b5d5e --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/22/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22f977d2941935d50d89015165f3a2f7895d9619ff8c45be757735027115be46 +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/22/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/22/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..b382de711c1e604c67c4c89134a9ed78916fcf73 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/22/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0bc71fdd6441df4fc6e5a35ba8de828897a6308412fb12e34929ede65ac88985 +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/22/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/22/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..3715cdfd2ab046ca695de04a786e7f0b86925cc3 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/22/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f04440f06d1792b471b85e956e1e38a6309b1fb8bcb340b4e928ad83598bce9 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/23/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/23/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..d8bb5a40a7185460de769ba17ca35d750d773cfd --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/23/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80e2b4d026d3ed9dd261fa4e5a42ab122ca08358758b9051e65f0157c4b60677 +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/23/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/23/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..d021a762d43780ab00b2f7a4696a03e756f12088 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/23/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:033f0b973828af5d7e39470c299daee64dae7a89cc96718c06cf79e39c321f25 +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/23/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/23/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..9e16e19972d7352a75daad1d6d9111b55a7b5537 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/23/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c8ed0c4c1c5b437ba3be69ab4fcfdf6e85bbd4c094fc597e016e79b904c1bfa +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/23/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/23/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..6471de3ea22eaf115e4c028065a4d3f2c5fec750 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/23/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3cc1c00b6d46edaab8c81a0a865e20256754631dd417e36b261bade819fc88d +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/23/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/23/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..db3ca55a0e4d092f773d6300ace840e3a0c7fdef --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/23/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dfc03b8bb61d372b9e364f48188d008ab05f31973d594bd090645f336902865a +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/23/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/23/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..482e45d92ae6a887bc2b71e76ad20e969c9dc863 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/23/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4a5563b85220d5016d8b9d376cc65f2f9d75a1177c0850fa5b1090933e0cbda +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/24/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/24/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..a4a87f4d018dc166c0fb392341755e1ab4e4274f --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/24/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:298a97ff37415ccdc9cbaab2f97d50581ac81d51b6efee130968b644b5bf3807 +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/24/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/24/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..bee664eca4fe113aa3df2c839d1ac807e50013ed --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/24/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eee60d89e7a2915e07214bca1b6c6a32255a715db831fe7894d07d574fd4a447 +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/24/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/24/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..03f14bc9e4cc57f04f2e08c176dd97912e49d323 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/24/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:423b3f6738ae1322c90b0d47f0d16700bf0e8ae6cdb91bb256123572f5344d75 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/24/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/24/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..d934a7bc498798b762735cb38775d9cd44d3bacd --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/24/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74bd94f241efa14d425c99704b563c812386cc8dde4bb3e1802b65698e22e033 +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/24/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/24/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..2690c65dcb984b9c3efa472aa1063af56cedec8b --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/24/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5375cd0db1617667adb027b8bcf4ed1b5cc6bbf7bc38ebd3bc023bf05e52145 +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/24/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/24/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..0c4c8ce87f48bf5c8ac6a46cf23fbdd6cc8c05d2 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/24/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70151de3024105730f650a787c8fd16e2cd8fb9eaff05804a04354255e5659ba +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/25/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/25/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..09a3797af8e3644e3db408485a3e54dadf9b055f --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/25/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33cd0a3e96a2baa5476a8db29f98969054ac28078ada0045326b9504e474d8c3 +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/25/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/25/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..f10ee32679aad8a90a3fcd79237d73dd1137f97c --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/25/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f7834ccce075011a0cc3e36e0a3a1cf777259341d6c048a9ea035ff5f383094 +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/25/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/25/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..cd6fa0a5dda8059535886154338f31349a32fdf7 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/25/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6fa8e7b6353a5c8a65e5f5025575ee8e498df5680a25e0a3dffdbe76f7b1e72 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/25/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/25/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..7938b9b71cd1836a5e9e1ef3c067e36f74b35db2 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/25/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca9cce5d7ec2ca5901a8baef475cb6cdaa84d5125a735d593493cf20df9d9a00 +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/25/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/25/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..34788c5fdccc2b0e1fe67ea0039f1aab127609eb --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/25/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3f76a39292dfa1d973f6c252d9df3df8cde60c2e34c88b342276b5a5276bd75 +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/25/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/25/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..ae85b198de390cb0f0d5cbed0aa363d701297d41 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/25/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f4642b8ebb03a292084b107b0da2c4bb75e4ad38623909ab6f5bf6b53053c54 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/26/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/26/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..1d5dcc2eb8a97b5d4316ef5f959bca6b8e742f77 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/26/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3335b7e6fa24ef143aa79edd9dad6b2c1bd154041445622fb707a65ddab38e51 +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/26/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/26/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..59955b4aadde0953a96e4a43005e94f559a8b390 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/26/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:269e261e2e5b8911954f3729154523f8008d2a00e9b3455ac710b8c11af86aa3 +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/26/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/26/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..894d5a2bf9174e2191becd5a3e12a2acc939649a --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/26/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d3949c12120ac5a936881297d03cb5aad6903948873016e22a3a9100713447c +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/26/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/26/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..946fa191626875a29226dcf959d5d9a53612656e --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/26/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d85152c4c9f542d6ea7acd11dfa51b85b70fb9a26092b5f5c12bb2f0dcdb319 +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/26/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/26/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..89d5b300c94c492d9a6b6ceec00f6d2c68581f56 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/26/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ea8c2f56fd1193d750dce19eade8d5ace5d94a02b9c18b68007d792ebebc939 +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/26/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/26/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..4da6ba9548ab722210b022222ccbe1ebabddfc82 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/26/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d4bf55d7d65d2c8c1d07e2d189cca3a87f798fae80379e40179bde0e07a35ddf +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/27/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/27/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..99753f96482f6e1372fc3f6364956300666d70ae --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/27/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95537aca08aec448ec45705a45e4784ce3246ca916021d0767811544d4fe718c +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/27/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/27/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..b4c3705e95ab65470395ad22c5c692d9dbc952ae --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/27/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e402146f5bf2487bc0542fd90efaf1604bd1e47c77e59a07a5f6a422a96115da +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/27/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/27/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..a652d4d932ce48af9400c0839a175d60af9a91f9 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/27/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b61b98df7e80d468ab96120e1e6286eebe81ba08fa388b3fd7255ce87be31c6f +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/27/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/27/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..cf53f694f0a1256342308bc0e5374aafcf153d0d --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/27/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7427a8377c7daa2f8ddca36e04d82ccf17f0b21447053f350cd29220ebaead6 +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/27/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/27/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..3560b3beeca83528e64351ae7b7c4d1a0f0f4907 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/27/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f293ba02acaa016b161939c24038cdcb2e90c1d413ea1b24fa56d5fe8aa62224 +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/27/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/27/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..8269259f7b198a65109e1fb2cca29001baa1afc2 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/27/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22c70c4ac5b827e082f3468ad77445e1ac4079a151cc3643bb741390331cd5a3 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/28/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/28/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..d0bba3ec8dff13009fc0dca3608be8a5c7c1bb55 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/28/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06526fc2e289f1ef17cf44b842e3c62af2628bd2b7b131d19d8f3e2c5a49a23e +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/28/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/28/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..451dfdc8df7b056aab9e9a0a289fbe651d07850c --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/28/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e0f8ad4b1043b14fef597d86c97342452f0e8bb1839005575ed2d908fffbefc +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/28/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/28/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..057d51a11f059821501b10fb222125939c24c060 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/28/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84f6fa05685ba387aadbe595de14f93cf5dd86c2abd8da13ce996d026e6ed1eb +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/28/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/28/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..a0852d69fba567ce69886eb571f846da6eef7c24 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/28/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0b712e521e788f28a6e19b8f792f764f8dc519314e4d556ffb2c1ecb09587ec +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/28/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/28/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..f78eacb9c6c0b7558c06e4191c67edadd2e030ad --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/28/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1179059c795b3e9803cc2178c6704d7e915562276f35fb4f0dbdb92a9541446 +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/28/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/28/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..1148592a3960b3cbff5a1a645facf80e713e722e --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/28/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba2f2bb2f5094f5cddc737329bc181f941e1e9e7ce14a2b78f2282791f804e0e +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/29/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/29/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..fde8bfd263658f4977cf4bdd7598e75b0b64714e --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/29/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16648d4a89c9280282ad3e8892a4b887fb44bc918eb93568cb50bfe2ab573cf5 +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/29/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/29/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..ccb98990465995a66a64dccf8ce81eef4ee77455 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/29/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3afd4fac397169646ca6fbb5e5ca995c43a96c83741f481802fb6ec816e1c6ed +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/29/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/29/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..18796602ebef45ed2599721dcbafab0168d2f3bd --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/29/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d424c441fbc7eb4a901a53a4da01509d5eb971e9c3ed65c0a0caedc2d9a6520b +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/29/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/29/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..827eaede00fb0ba770f1664b82c9f5c25ac7f9d2 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/29/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd6b74724be46ba9cff7acadca09dd9d7ff85629e827a6ac91afaa4887f66a61 +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/29/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/29/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..0c8ec6d372d03d401280762e27485b84467b8ed8 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/29/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8deaa0ef4e03a7106e560c69d21ce2a23848aa7c8a2bce54081d62f53f617842 +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/29/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/29/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..eb4c52553a851df3d68121e88056688652999938 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/29/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05c370e28a28c510db34c6e817e0005e81fb9eb116dab33f1dd104b6207fa010 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/3/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/3/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..56a1ae792b4ff32a28eae7849739f74bfdc68013 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/3/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de35a6ea22404a7d865383348b5045c05be7d0497b508ac659259a054326bfd9 +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/3/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/3/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..609063fcd25daf36d9c02c909349201dda538b70 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/3/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f5ee6838a86052a8324ba64e45fc39025a073b7b5e3fc70b83f52bb1b47c7be +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/3/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/3/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..10c51ece620dcf0ba23a2261b58282399e0737ee --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/3/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa474d5999ffb3514074b768f706922d194232fff347885f9d6d3cdd069c4920 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/3/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/3/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..79181438a7b4ed4f6eb394371e2bc17e8e9e345b --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/3/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d6aa0ea48e4723f3127e9a1172c181c5893cb127b440459e2be4752d2306daf +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/3/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/3/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..f235c851f694f8019c0a1b133353162ff106c54a --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/3/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01a91e31064a1a83b6c35c224a60d3798e6e84be933b0401434ac3e1ffaa8ddd +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/3/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/3/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..795d8212ceccddf2b25e821ba6b7dcbbcd42aa19 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/3/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e02bbbd4467bd6452cb4cbed57ed80f6e5d1e60d133318f7836274b9b3788a88 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/30/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/30/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..10431412ee82be3730f1aef05ce1e011e39cb7b1 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/30/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c46704dfbeca211f9997f08dafd0b17ba4f83a1b54ef32c8b8015533f0657289 +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/30/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/30/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..d2bc78db0bce0a0051730553aa1312cfdbc2fd53 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/30/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:398037fb0d83874620796e316bf3d83218bc3e1499ceca62c584e460c8108a62 +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/30/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/30/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..11058a9a0f785f6ef4f068ab5b6de6eb17e3bc03 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/30/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a4700866ebe8eeda1bf23558d78b81088e13abe488670b23b5c2beeeaf79858 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/30/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/30/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..ccd9d43c0622a7e844db9a30256b820071724465 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/30/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b7eabc413ae55430f48c537be61c67afa7fd11cd5c4f702e5c342bac880b324 +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/30/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/30/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..44d82550beb15965e09481d10c6e4ec1cac485aa --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/30/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02d1bd51861bfb136c85f8c1bf2847f8b8af51f666e55301c25ce7a076b5d617 +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/30/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/30/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..92ac4de84498f412f74ec197710ee233a2d0fe23 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/30/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f60b53e06bc8c4c511ccfcb836cd62dbe13dbb31f6ee0721e0b1a2c135182e18 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/31/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/31/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..388bf390a7b36c8c2852082f0c1ba6844a51cb98 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/31/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e672563ea9cf8145e34b31930e1fdd4eb217854d279949337f89364df4d44e9 +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/31/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/31/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..f755d2b31dbe0ed51428f1cb8d4303af7339d3fd --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/31/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49949fc990ade5c6e7bfcca63bfafe2f116ff9b3c26355929588afd04293d5b0 +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/31/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/31/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..33038a938c0ca30df6795f8650c20365f23d9da6 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/31/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:343505cddd4d753d29691ea36338b4f4cc3e897d1446377e2c34343ef82fa64c +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/31/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/31/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..9641cd22b0cf0c11361480b625fb1da6b17dd0d0 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/31/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:047c9dbae31f00485e047cba6cea45cf1f87037f22cfc752cc9f6461ae37520b +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/31/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/31/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..bdc3396dda0ab860a133f0741d1adf7d46992e11 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/31/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:054d0a05440078362548b1dcdde6ac81561a0b2a7329ba80c411b1c8339f13af +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/31/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/31/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..24edd667bf496c2722b8141e9d38331eb2c13741 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/31/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f201d1b0750ce03be7b738396f25f8cb06346f0648003c0ffc16edccb80cb51 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/32/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/32/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..056087e30ac7b154e902805995bdb075816a4eea --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/32/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d0a9bce6f9687e9319727060f050c104510aab190655960baf738e9d2cc2321 +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/32/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/32/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..34af2ae7b453e0c9157b91bd5de83c5fbfa1c64b --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/32/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7eb3de3885500e485ae20c716abeae58ffe5ac402f44b5259ba2667d58ba3d21 +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/32/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/32/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..8027e42143312a2b6500a47131efdf5c8ce78c34 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/32/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d92231565128f645d595fb6475cc70852a9d1a0da84b915fb53244b4dfbf9383 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/32/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/32/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..8c080f2c4e84423da69eb627bcbbecc9aeeb9884 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/32/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf01b3fffd406862433d0a5e95eb3997dd802a496fdad2c44cb6d7b52979f961 +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/32/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/32/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..08b2da6b37afc85ec141a0cb32cdaf48840738c9 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/32/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de3e04daf3ad19412f9e7add04fa304c75a3ddf9e37254fbeb27c67b15215d45 +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/32/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/32/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..8f5d808685b3a413b8aa2c20d2978d5a8fca72aa --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/32/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17955277ae2889cbbad9ff2149b610f52ac10839d6743c39080ca3d0c3b29a92 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/33/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/33/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..7c656f79366f8208bf76db1441a94f21a2573a62 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/33/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8dc875022d4a07bf1ec333afcc0c1aaa573c2ca0a181c17bcb01df8aeaa27749 +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/33/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/33/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..8e40def0e99ab2ebfd65ba83e55c5e827fb10ce7 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/33/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6d730badc429978085bbeefdbeaa7afe16489c132462e57aeea4f4eb540329b +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/33/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/33/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..a743ed7399851d9b9e0fbb89cc8193ebbb75c06e --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/33/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9285145035b85caf579de88f130c21898d888afe8d062744d956e32f9733412 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/33/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/33/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..c0dcf488e1dba94d8e2b3db03aef0cb9967a1d4c --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/33/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb4ecc1f11f8375c444f6a9902f29478ed918a7d36179f845ae6b7200b94f822 +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/33/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/33/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..5fff41ef2418693e9cc9c5d0cd3fcc813489ff76 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/33/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df69cbcd6e73969940d83b24a3c2c4b5c2a39d349e57d409a58de66007b7a741 +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/33/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/33/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..0b2c50768ae808612fe0dd9daf809f3bcb49852a --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/33/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f111e714c3f34744423291857c55f1fff01eafada351e30043de024fbe973a9 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/34/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/34/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..739a9273bbaf4f79100dd1d78c411b2b5554ddeb --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/34/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b20262dd911a1db1e8ef16ae6655e8404a0b32969af82a6b423ceafff6c40c5 +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/34/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/34/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..a65b15b4455a1287e98c0825e37853a3abcf0ca3 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/34/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8a3eeb410f6d42eb1fe59038369b56f94a4a5772f2c78a36a9e84d3d1a15040 +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/34/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/34/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..6701fd6dd256beb363bed9ba438d018a15f80195 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/34/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93c9e105cc1a5677c1c11c79891b88e889f61fff5839e16f0239bdead6cc6251 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/34/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/34/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..c0d210570d2df5a160591e1adb94eb22675bb351 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/34/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1d1e7d690721a26b46f90a0cc73875383e66cb477d406d797fefaa1bbf5b8d5 +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/34/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/34/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..c996fbf5cb93c496206d7a84ef3e372b8e635449 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/34/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56b6d3bef79dd71a2641b6fb9cc3cb79d5a85c2440abad70deb10aaebae3f3a7 +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/34/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/34/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..e7f7fba0cc33258543ca733768ebf0f7ea696c8a --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/34/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:151eefb02db6c52d807b75dc4dc8e8e04b9cd8762809f8fb65a82f90522519b2 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/35/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/35/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..c831e7d64c5f4c7352d2b716a75fbd24f4b517fe --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/35/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5c0b449560e08bc92ca1864712cc2e7238cea4b6b4bcb0047567be9ca258cd0 +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/35/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/35/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..2bf3ef435a8402bec1e881e82562e9550a1c2393 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/35/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24ced81485df415d9f7c98cb8bb0f2fd4d60db4e7924b59e4ab76db407a6d3e1 +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/35/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/35/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..c4182c23461ae815ec7353b96d79589c5717f062 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/35/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6abda3b18932964804ff8c79b0d9782a9128ea764417377833295ab362549631 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/35/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/35/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..18df3198f6aa070b08bb1b0bed3da244ac1e2695 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/35/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1c4f76d72a0177a88633850db1b8052a7dbe24a7bc22912cfafc6b685d12968 +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/35/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/35/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..df8604bb69ba30a8683b6c555fffb0d0400f959d --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/35/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea5db525056a40b23611502e896273bb7adcbf8824e33588d3bc756be15d03fd +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/35/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/35/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..41eaf2d78cb3fad07d837f6bc8b9ec7432248ce3 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/35/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5d1f22281d7e98b5792f4bf770a19489edcd8754d05ffda58aa1d0e34fe93e2 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/36/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/36/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..7b1c5b6c50dc91660a3fe1c4bf8d4846196c2e81 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/36/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97f5c16bb82ee41944fa13ff43932a39f8585f0587003269997dc173df43e47b +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/36/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/36/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..e6c862a204e06d427d1dfd59951996be773f4b14 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/36/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e868f3391abc4615342b0b140fedb74fd5ff1553d2304af625d1b1439f5335a9 +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/36/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/36/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..d68d162338633f8e25e6ca76a766b5dc9a8e0354 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/36/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c8540fd3098f7596722b49821720f2fe92d7128d6667429001524a2822ca6a1 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/36/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/36/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..268fe2329a293c492a50412a856b45e3e9494f6f --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/36/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:143efd77d5b482ae31cf06b372c95cbeed1426f6e78c0ff38340b235eb8ddad9 +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/36/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/36/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..defd15d5ffd6b5a1d49ff063635c475e52017411 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/36/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d9a5b86655e05bc2498c84fbb61c1d1e4f3c02586539916042b22b924dc0843 +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/36/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/36/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..5bce726ecad8b030cf42450c0cb5fad88ac6af50 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/36/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:753d5f137255065b05931bafe34f5ee41d4600f8df6695157c9498f6f95ccfd0 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/37/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/37/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..449d26931e74d0a1b6d5c074c4e5f49c1f93d73d --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/37/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe31f93f22a6a46eb1f7323d76d01f36235e3f2c2332cf7481a171de6649d7d7 +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/37/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/37/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..9bfa5cffdf729ed7b7d8bcb20540b93db2582eb4 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/37/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d15de60ec38da077f14ac7c34d0c1f5bd98cc78cdea94fd920f254df156e99b +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/37/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/37/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..25a2c0ac589bc677ac56b240eb42325c353d8204 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/37/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e97ab4dde818c38fff129587509c8bc9eae570d587611875108dae35eaaada7 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/37/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/37/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..980012eb58255eadd0e9506e305970593f19e2d0 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/37/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7abdff7150c88f3eb1050f21a75e5fe941b7c6cefe701728fb8d14f9cb46087 +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/37/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/37/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..8ba9272e9bf364f4573ee43191cc3359170e8698 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/37/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:500ab3af94bc3ca55175476f53b2a4cdc6691302208bd586f701b25e44b11276 +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/37/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/37/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..5fda7af74f7fa65b734c690b1d669b3dfbb2ebc8 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/37/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c955dedd29b45875308e7e0fd82b866c389f40a81da46cc69919fde71d0f7a52 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/38/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/38/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..98e1d6b2933fb02965d6fee1c6c9c3d819b1fad0 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/38/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e2707ad37f4e19ee489ddb685feecc7710a53c94037e71ee151204614eb23b3 +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/38/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/38/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..88aa428449cc295588ea9e57d53644a11bb1f582 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/38/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c12be56a79a3a22e1e064d8eee9f2ef0e376576cd933246ad20d127d3d68f7fc +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/38/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/38/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..b5eb754318ae73d9b1494ae75c06da874ec677ac --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/38/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bdb73921bd622da8d5d3e8cd25de82fffce927e3a0639ef4f31d677f4fd937fd +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/38/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/38/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..b7e0131e79b2f7e29fd22c1d68d43a4d819f02f4 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/38/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db938a5ecb03e4be0d60964bac299a3fdd74d85560d8886f1dd6230b9aed7941 +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/38/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/38/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..cf82a8e1d83be198f481e443f84b04c117e1dddb --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/38/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f3ba94fcdb0f519103634cc31a141b575c11af8acde495c1c57a181afe4d356 +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/38/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/38/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..71064354c9902d1b07f3af9c55a742b7ab7dd403 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/38/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ecc5616b7e4af515ba003d740d1e091565129f0e1411e1faeb6a9d4bd99965b +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/39/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/39/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..cb8b4ec5d085d5c55b7a245e7deed2fed68cabe8 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/39/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2910537dfea3725cf78342b1bb82aab20fe860e95c0aa223b57774710ccbb981 +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/39/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/39/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..d2649e05d0888d5672f4cd3aa62c2cb92d7131e4 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/39/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e3d039b2bb5336f4d8cf1fc44c5fa7c59e5b016ac568b2f74c5cf814faae443 +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/39/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/39/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..820db15fe6f448432314c9d4bb41f6a25ad40f8b --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/39/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e29d00bca845c1286640a9269f796319194ae50a8fa77845a7f48397f0e46d00 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/39/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/39/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..591c12ff309d684142a5620eebcc7c75a0cb1d53 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/39/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d85512ca736dbbcf7dfd6637907070588120e718a44eb90281a0c6b908a029fd +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/39/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/39/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..f53b2954ba61dd9697b274306c34bd238310b984 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/39/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b7f4a6794a42d7797c77ac8730633e6106ee4849c1472c5f34aa188a47eefcf +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/39/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/39/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..1a474b90c731e1f5e36617c42af9709226d3f5dc --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/39/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58d2546e76194c3885974d4025f7d826faba18762d8c18139af74c5c55527590 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/4/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/4/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..929b5ab13eef8c282725ee74355d648ecc26bd93 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/4/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61d94a2b7b130af47476c62524f6c5b341288fc461108e24b66441f6022227f3 +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/4/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/4/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..cdcb7c2ffc4965e20b3f96b9ba9556d93ff96304 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/4/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85a401909030e07c5cfd611c97c01b9b12ef978e3be901a334993dd6eb003eb2 +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/4/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/4/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..051fc6110c7fdb5af29619db43e9907e152c881b --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/4/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9bf27ed66e15e7404e2f9d87cfcb7a0125a6dd9ec7fd65dde2f7f0e8af500a7 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/4/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/4/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..77f8b6a18745967339aecdf3a665633c71481042 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/4/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9437392df6735e444239c37d5fc48147de465da87d0f8f446535b3bed62d18c +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/4/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/4/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..5a5c8e9ace897c13e24e10392f6e8feef337f05a --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/4/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb94edac3c5a55fe62a1ba0114f87f98ee2a9190a8cf4a30696677b631848783 +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/4/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/4/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..ee72e4d6a8d544e8550029ae6c7667888da64be9 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/4/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1d5ca76d4b58628824cebca9b967d9efc71b8746e6da5dffe9ffa0c149e705e +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/5/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/5/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..881fff6fcf47c497daf21c8b94993da483938617 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/5/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65808e5b6aa7295f380786139a66d4361d5a83902d4b347257043df686861b2b +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/5/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/5/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..9e26cf0e79ac95e2aa4542bf54b06ba00ef81809 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/5/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:373ca4b17a7c9406c5b236f3c9f2953e3e648c6fd4161a237e661cd73ad834fc +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/5/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/5/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..26976d0ee48a2fb247c0967c21e96d30796e1155 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/5/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8365eebd976d7b7f0dd330e2b782b2650dd91539f2d0372160a76174d85aeef7 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/5/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/5/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..1db3bd3260c73c04982b2858e441418637a49ffb --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/5/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd4d33b3fd0cfd4fd63cfad0aaa18636fca0a5c8078622b3a370aeffcfaf4903 +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/5/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/5/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..0530067b368d643fa9946afe3304ff0675243e9a --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/5/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a3939bd849ffd159a041146609c528eb3450e7fecc1a96099def956de016ea4 +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/5/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/5/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..918c6eb689609807f113237f8f9915aa102ff412 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/5/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c92046456d1e98794782028d1977c2d2b7a90f0089627f524d0195643b73c6e +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/6/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/6/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..55f737632e699e2607affd9d4a98e12a6846a502 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/6/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:927248805544fd572dbf0bc20d54d410a43dd8283c3f63f999cfc5e9cdc10f9f +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/6/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/6/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..f7a52752be8dac8e10db7905bc189ba5f6dc051d --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/6/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6174fa6e04d91317b1091ad2b82c66df94143e1ad1132f72bcee46df679c5d1 +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/6/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/6/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..f1c209dd60e3db4cc65463cd9e1e664904a39ef3 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/6/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0cc6015ab670abc1e1cbba1c27cd3fe8f41f0836bf08673e37be3d3c7aa3c0d9 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/6/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/6/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..162967882f70b91de3914a0147797cf2387e677d --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/6/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d57fded848275a146267a45c61f04691c3d69ab87911511ce1d7d88d5455f71e +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/6/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/6/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..c5f4e10928e8a75ccd58d5528b3fab91b92a3622 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/6/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2f533b69a6b028f31996da12685c5f737b9daa97142d3f5137aa9e7848c1af6 +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/6/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/6/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..0f5680dac1c823bb3818c29fd8f077f8b09e5e3b --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/6/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff318281834c7e664ef94d2c19556f296b73ff19b9311f4140e20529fa27279e +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/7/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/7/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..3109b04c62a9557ab44979bb3ce83bab9bd024f2 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/7/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42916d7b71ed7edcbf306c6d8d8945fb30068c1d5cea40fd2de47234bc4587c6 +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/7/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/7/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..2ec486703a87fe216503795e41f0a5a39f3441b5 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/7/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4133ecbdaee4eb245a3e6e4ff140293767e99fdbca231f775b45099b1c64d11e +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/7/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/7/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..19a7733e78216a57fe2029ae324b1e23e1d94af0 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/7/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c3bb8102bd24253f710b09049f663a0c3259f9dd0bd083ff0518ecf08150cd9 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/7/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/7/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..be0cb9089fa8766261adbf965ef5ce172703d157 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/7/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66b047c57d1fcfffd85735257833cfdd0c66c9e0b3e4bcfa2fa7589568a299ca +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/7/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/7/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..96485755694afff526d882a1e657378579660793 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/7/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60854fcc0c2eaf2967826fe19d4a507ecd48508a9bc17c2139cb59ae47cf8ad9 +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/7/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/7/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..02138c8f5118296cde53366142c35e2dc4d33ed1 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/7/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d14cea9d6f80cd3102a1579e041055c732bef3922099bd50fe8951db42966168 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/8/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/8/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..3bae5dee1f7e9ee2753c72cdf707b6e3276f1a1a --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/8/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:54328475377ea67e86fbbd394760e9da7442e311f1d3ac38baed679d17e3c48d +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/8/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/8/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..26dee3a54a21b9905c4d36514f423e43685951ba --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/8/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c062a728bf6a12dd463f871d0e512f3dd422e6fe7a32f3b69743c8b24d8d94e8 +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/8/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/8/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..185d321c1777b953b7adc0ffacb2b41ae170d840 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/8/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0f7ee8ff9a9864edf0f6054085cdfbd1e40e9b358f67f9182605d7e7286fb27 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/8/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/8/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..b37bd3149bca18a21698d39e69e7cf65a71ec932 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/8/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ce76ef02204ee8ced730002e2fe757093574cc71b142aa21f9220084d5bded6 +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/8/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/8/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..5d00e6454ca42d95fa8a37e43f9cf818f4126b0c --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/8/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6dbf24b9e58a52eea93b480e663cd63b6c56ee5c4ea1339348ef4de796693823 +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/8/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/8/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..2ef51fc2905533743c9112bb1478c0e936bb26b7 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/8/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7bc887759cfb77384ddf4476ac42e081fa1d2a965c9b2c61b8811b500000a56a +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/9/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/9/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..97f3f0e485c65327278b199f48c70614b40f1fae --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/9/pp_block/attn/o_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c364b5259469352e532253e47a868a42a68c36a0a51aa0911b5e3d65b587c12d +size 10617072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/9/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/9/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..78d39a4fee8ce1ff913b76cd960bf6ffc648c090 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/9/pp_block/attn/qkv_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ca831a0497261fbd17e4609732b27c7c28edb55aaa60a6b76afe7910cd431db +size 31850848 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/9/pp_block/input_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/9/pp_block/input_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..688f12518fca64f90f8b641875e401b32e35dc93 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/9/pp_block/input_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fef3344e4716bb2d2721c0c61f8301e2a8d54426d0450f5144a16a27ced7b744 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/9/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/9/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..e6b703d73a550c57f4670a478294b88684fac8a8 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/9/pp_block/mlp/down_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14dffa73cd41c76dac0924758c56b7c748b5e28d066493a4a35c44e20c9a6a1c +size 26542320 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/9/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/9/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..a99000094bdcbcab560c9f918328e06efdaf6529 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/9/pp_block/mlp/gate_up_proj/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:134c37a2efba8134a421d56dc96da6c568a987dd7a84d4269f3ddee93f9dfd50 +size 53084456 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/9/pp_block/post_attention_layernorm/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/9/pp_block/post_attention_layernorm/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..8785231ff220380599f6abcaa45683a9f1ba8afe --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/decoder/9/pp_block/post_attention_layernorm/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:faad215d7caa1d8bff23526bd5236b00fa66da2f0057897afee3f16609f0945b +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/final_layer_norm/pp_block/model_weight.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/final_layer_norm/pp_block/model_weight.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..1a76001352c2c2801d0aa065f880456a91a91a7c --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/final_layer_norm/pp_block/model_weight.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:760da6445677d98879f4edf025dea0ddedc1f555e2132090cfae79ab7c1443c0 +size 4704 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/lm_head/pp_block/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/lm_head/pp_block/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..0186d99f0aa315eb168ba996a94961e2ac0fed05 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/lm_head/pp_block/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b7aa3a4b767f40bf2a850f13ac188950ddc26f3237b91e6b750e406059c0ed1 +size 565646072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model/model/token_position_embeddings/pp_block/token_embedding/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors b/pretrained/MiniCPM-2B-dpo-bf16/model/model/token_position_embeddings/pp_block/token_embedding/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..0108345b64f2255c68ee7e0e4b0d3332e7ec7339 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model/model/token_position_embeddings/pp_block/token_embedding/model_weight_pp-rank-0-of-1_tp-rank-0-of-1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1239e0f2ca77ed832b785424d75850a2f96babbd2261ea869b81b770c998d67 +size 565646072 diff --git a/pretrained/MiniCPM-2B-dpo-bf16/model_config.json b/pretrained/MiniCPM-2B-dpo-bf16/model_config.json new file mode 100644 index 0000000000000000000000000000000000000000..424754b491db7f806effe576c0d8cfabbdf9ec72 --- /dev/null +++ b/pretrained/MiniCPM-2B-dpo-bf16/model_config.json @@ -0,0 +1,23 @@ +{ + "attn_pdrop": 0.0, + "bos_token_id": 1, + "eos_token_id": 2, + "pad_token_id": null, + "hidden_act": "silu", + "hidden_size": 2304, + "initializer_range": 0.1, + "intermediate_size": 5760, + "max_position_embeddings": 2048, + "num_attention_heads": 36, + "num_hidden_layers": 40, + "num_key_value_heads": 36, + "pretraining_tp": 1, + "rms_norm_eps": 1e-05, + "rope_theta": 10000.0, + "tie_word_embeddings": true, + "use_cache": true, + "vocab_size": 122753, + "scale_emb": 12, + "dim_model_base": 256, + "scale_depth": 1.4 +} \ No newline at end of file diff --git a/run_generate.py b/run_generate.py new file mode 100644 index 0000000000000000000000000000000000000000..ba972122e02217b5cda86598cf2dde5481d80a7c --- /dev/null +++ b/run_generate.py @@ -0,0 +1,243 @@ +""" +Nanotron Inference Script + +Usage: +``` +export CUDA_DEVICE_MAX_CONNECTIONS=1 # important for some distributed operations +torchrun --nproc_per_node=1 run_generate.py --ckpt-path ./pretrained/MiniCPM-2B-dpo-bf16 +``` +""" + +import argparse +import os +from pathlib import Path + +import torch +from nanotron import distributed as dist +from nanotron import logging +from nanotron.config import GenerationArgs, LoggingArgs, ParallelismArgs, get_config_from_file +from nanotron.generation.decode import GenerationInput, TokenizerConfig, decode_text, decode_tokenized +from nanotron.logging import log_rank, set_logger_verbosity_format +from nanotron.models import build_model +from nanotron.parallel import ParallelContext +from nanotron.parallel.parameters import sanity_check +from nanotron.parallel.pipeline_parallel.engine import ( + OneForwardOneBackwardPipelineEngine, +) +from nanotron.parallel.pipeline_parallel.tensor_pointer import TensorPointer +from nanotron.parallel.tensor_parallel.enum import TensorParallelLinearMode +from nanotron.random import ( + RandomStates, + get_current_random_state, + get_synced_random_state, + set_random_seed, +) +from nanotron.serialize import ( + load_weights, +) +from nanotron.trainer import CONFIG_TO_MODEL_CLASS, mark_tied_parameters + +from brrr.config import BrrrConfig +from config_minicpm import MiniCPMConfig +from modeling_minicpm import MiniCPMForTraining + +try: + from transformers import AutoTokenizer +except ImportError: + AutoTokenizer = None + +logger = logging.get_logger(__name__) + + +def get_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--ckpt-path", type=Path, required=True, help="Checkpoint path") + parser.add_argument("--dp", type=int, default=1) + parser.add_argument("--pp", type=int, default=1) + parser.add_argument("--tp", type=int, default=1) + parser.add_argument("--max-new-tokens", type=int, default=128, help="Maximum number of new tokens to generate") + return parser.parse_args() + + +def main(): + args = get_args() + + assert args.ckpt_path.exists(), f"Checkpoint path {args.ckpt_path} does not exist" + + config = get_config_from_file((args.ckpt_path / "config.yaml").as_posix(), config_class=BrrrConfig, model_config_class=MiniCPMConfig) + model_config = config.model.model_config + tokenizer_path = config.tokenizer.tokenizer_name_or_path + + parallel_config = ParallelismArgs( + dp=args.dp or config.parallelism.dp, + pp=args.pp or config.parallelism.pp, + tp=args.tp or config.parallelism.tp, + pp_engine=OneForwardOneBackwardPipelineEngine(), + tp_mode=TensorParallelLinearMode.ALL_REDUCE, + recompute_granularity=None, + tp_linear_async_communication=False, + ) + + # Initialise all process groups + parallel_context = ParallelContext( + data_parallel_size=parallel_config.dp, + pipeline_parallel_size=parallel_config.pp, + tensor_parallel_size=parallel_config.tp, + ) + + # Set log levels + logging_config = LoggingArgs( + log_level="info", + log_level_replica="info", + ) + + if dist.get_rank(parallel_context.world_pg) == 0: + if logging_config.log_level is not None: + set_logger_verbosity_format(logging_config.log_level, parallel_context=parallel_context) + else: + if logging_config.log_level_replica is not None: + set_logger_verbosity_format(logging_config.log_level_replica, parallel_context=parallel_context) + + log_rank(f"model_config: {model_config}", logger=logger, level=logging.INFO, rank=0) + log_rank(f"tokenizer_path: {tokenizer_path}", logger=logger, level=logging.INFO, rank=0) + + dtype = torch.bfloat16 + + # Set random states + set_random_seed(42) + + # Get synchronized random states + if parallel_config.tp_mode is TensorParallelLinearMode.ALL_REDUCE: + random_states = RandomStates( + {"tp_synced": get_synced_random_state(random_state=get_current_random_state(), pg=parallel_context.tp_pg)} + ) + else: + # We don't need to sync across TP when using sequence parallel (REDUCE_SCATTER) + random_states = RandomStates({}) + + model = build_model( + model_builder=lambda: MiniCPMForTraining( + config=model_config, + parallel_context=parallel_context, + parallel_config=parallel_config, + random_states=random_states, + ), + dtype=dtype, + parallel_context=parallel_context, + ) + + # Mark some parameters as tied + # TODO @nouamane: this is only needed for training, can we just mark params as NanotronParameter instead? + mark_tied_parameters(model=model, parallel_context=parallel_context, parallel_config=parallel_config) + + # Sanity check model + sanity_check(root_module=model) + + # Load checkpoint + checkpoint_path = args.ckpt_path + log_rank( + f"Loading checkpoint from {checkpoint_path}:", + logger=logger, + level=logging.INFO, + rank=0, + ) + load_weights(model=model, parallel_context=parallel_context, root_folder=checkpoint_path) + + model.eval() + if AutoTokenizer is not None: + tokenizer = AutoTokenizer.from_pretrained(tokenizer_path) + # tokenizer.pad_token_id = tokenizer.eos_token_id + if tokenizer.pad_token_id is None: + if tokenizer.eos_token_id is not None: + tokenizer.pad_token_id = tokenizer.eos_token_id + elif getattr(model.config, "pad_token_id", None) is not None: + tokenizer.pad_token_id = int(model.config.pad_token_id) + elif getattr(model.config, "eos_token_id", None) is not None: + tokenizer.pad_token_id = int(model.config.eos_token_id) + else: + tokenizer.add_special_tokens({"pad_token": "[PAD]"}) + tokenizer.padding_side = "left" + tokenizer.truncation_side = "left" # TODO @nouamane: do we want this? + dummy_inputs = [ + # "Passage: Daniel went back to the garden. Mary travelled to the kitchen. Sandra journeyed to the kitchen. Sandra went to the hallway. John went to the bedroom. Mary went back to the garden. Where is Mary?\nAnswer:", + # "def fib(n)", + "This film was probably inspired by Godzilla", + ] + + outputs = decode_text( + input_iter=(GenerationInput(text=text) for text in dummy_inputs), + tokenizer=tokenizer, + # TODO @thomasw21: From ModelWithLoss extract the model. + model=model.model, + parallel_context=parallel_context, + max_new_tokens=args.max_new_tokens, + max_micro_batch_size=2, + generation_config=GenerationArgs(sampler="greedy", use_cache=True), + tokenizer_config=TokenizerConfig(max_input_length=None), + is_bench=os.environ.get("USE_BENCH", "0") == "1", + ) + for output in outputs: + input_ids = output.input_ids + generated_ids = output.generation_ids + if isinstance(input_ids, TensorPointer): + assert isinstance(generated_ids, TensorPointer) + continue + assert isinstance(generated_ids, torch.Tensor) + + log_rank( + f"input: {tokenizer.decode(input_ids, clean_up_tokenization_spaces=False)[:1000]}", + logger=logger, + level=logging.INFO, + rank=0, + ) + + log_rank( + f"generation: {tokenizer.decode(generated_ids[len(input_ids) :], clean_up_tokenization_spaces=False)}", + logger=logger, + level=logging.INFO, + rank=0, + ) + + log_rank( + "--------------------------------------------------", + logger=logger, + level=logging.INFO, + rank=0, + ) + else: + outputs = decode_tokenized( + input_ids=torch.zeros(1, 1).to(dtype=torch.int64, device="cuda"), + input_mask=torch.ones(1, 1).to(dtype=torch.bool, device="cuda"), + model=model.model, + parallel_context=parallel_context, + generation_config=GenerationArgs(sampler="greedy", use_cache=True), + max_micro_batch_size=1, + max_new_tokens=12, + returns_logits=False, + ) + for output in outputs: + input_ids = output.input_ids + generated_ids = output.generation_ids + if isinstance(input_ids, TensorPointer): + assert isinstance(generated_ids, TensorPointer) + continue + assert isinstance(generated_ids, torch.Tensor) + log_rank( + f"generation: {generated_ids[len(input_ids) :]}", + logger=logger, + level=logging.INFO, + rank=0, + ) + + log_rank( + "--------------------------------------------------", + logger=logger, + level=logging.INFO, + rank=0, + ) + + dist.barrier() + + +if __name__ == "__main__": + main() diff --git a/run_train.py b/run_train.py new file mode 100644 index 0000000000000000000000000000000000000000..c9160c5e7a9459709d08e3d676ed1707b2832b6e --- /dev/null +++ b/run_train.py @@ -0,0 +1,33 @@ +""" +Nanotron training script. + +Usage: +``` +export CUDA_DEVICE_MAX_CONNECTIONS=1 # important for some distributed operations +torchrun --nproc_per_node=8 run_train.py --config-file config_tiny_mistral.yaml +``` +""" +import argparse +from nanotron.trainer import DistributedTrainer + +from dataloader import get_dataloader +from modeling_mistral import MistralForTraining +from config_tiny_mistral import MistralConfig + + +def get_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--config-file", type=str, required=True, help="Path to the YAML or python config file") + return parser.parse_args() + + +if __name__ == "__main__": + args = get_args() + config_file = args.config_file + + # Load trainer and data + trainer = DistributedTrainer(config_file, model_class=MistralForTraining, model_config_class=MistralConfig) + dataloader = get_dataloader(trainer) + + # Train + trainer.train(dataloader) diff --git a/training_config_minicpm.py b/training_config_minicpm.py new file mode 100644 index 0000000000000000000000000000000000000000..1cd473dac2f3f0164fddd4251a3b86947c7756db --- /dev/null +++ b/training_config_minicpm.py @@ -0,0 +1,116 @@ +""" Example python script to generate a YAML config file which can be used to run a training with nanotron. Refer to "examples" section in the `/README.md` for more information. + +Usage: +``` +python config_tiny_mistral.py +``` +""" +import os +from dataclasses import dataclass +from typing import Optional + +from nanotron.config import ( + CheckpointsArgs, + Config, + DataArgs, + GeneralArgs, + LoggingArgs, + LRSchedulerArgs, + ModelArgs, + OptimizerArgs, + ParallelismArgs, + PretrainDatasetsArgs, + RandomInit, + TokenizerArgs, + TokensArgs, +) +from nanotron.logging import human_format + +from config_minicpm import MiniCPMConfig, get_num_params + + +model_config = MiniCPMConfig( + # Config for a MiniCPM model with 2B parameters + bos_token_id=1, + eos_token_id=2, + hidden_act="silu", + hidden_size=2304, + initializer_range=0.1, + intermediate_size=5760, + max_position_embeddings=2048, + num_attention_heads=36, + num_hidden_layers=40, + num_key_value_heads=36, + pretraining_tp=1, + rms_norm_eps=1e-05, + rope_scaling=None, + tie_word_embeddings=True, + use_cache=True, + vocab_size=50272, # GPT2 tokenizer rounded to next multiple of 8 + scale_emb= 12, + dim_model_base= 256, + scale_depth= 1.4 +) + +num_params = human_format(get_num_params(model_config)).replace(".", "p") + +print(f"Model has {num_params} parameters") + +seed = 42 + +learning_rate = LRSchedulerArgs( + learning_rate=3e-4, lr_warmup_steps=2, lr_warmup_style="linear", lr_decay_style="cosine", min_decay_lr=1e-5 +) + +optimizer = OptimizerArgs( + zero_stage=0, + weight_decay=0.01, + clip_grad=1.0, + accumulate_grad_in_fp32=True, + adam_eps=1e-08, + adam_beta1=0.9, + adam_beta2=0.95, + torch_adam_is_fused=True, + learning_rate_scheduler=learning_rate, +) + +parallelism = ParallelismArgs( + dp=2, + pp=2, + tp=2, + pp_engine="1f1b", + tp_mode="REDUCE_SCATTER", + tp_linear_async_communication=True, + recompute_granularity="selective", +) + +tokens = TokensArgs(sequence_length=32, train_steps=10, micro_batch_size=2, batch_accumulation_per_replica=1) + +dataset = PretrainDatasetsArgs( + hf_dataset_or_datasets="HuggingFaceH4/testing_alpaca_small", text_column_name="completion" +) + +checkpoints_path = os.path.dirname(os.path.dirname(__file__)) + "/checkpoints" +os.makedirs(checkpoints_path, exist_ok=True) + +config = Config( + general=GeneralArgs(project="debug", run="tiny_mistral", seed=seed), + checkpoints=CheckpointsArgs(checkpoints_path=checkpoints_path, checkpoint_interval=10), + parallelism=parallelism, + model=ModelArgs(init_method=RandomInit(std=0.025), model_config=model_config), + tokenizer=TokenizerArgs("gpt2"), + optimizer=optimizer, + logging=LoggingArgs(), + tokens=tokens, + data=DataArgs(dataset=dataset, seed=seed), + profiler=None, +) + +if __name__ == "__main__": + file_path = os.path.abspath(__file__) + + file_path = file_path.replace(".py", ".yaml") + # Save config as YAML file + config.save_as_yaml(file_path) + + # You can now train a model with this config using `/run_train.py`