add config.json
#1
by
shunxing1234
- opened
This view is limited to 50 files because it contains too many changes.
See the raw diff here.
- config.json +0 -40
- configuration.json +0 -1
- configuration_telechat.py +0 -94
- generation_config.json +0 -14
- generation_utils.py +0 -162
- model-00001-of-00050.safetensors +0 -3
- model-00002-of-00050.safetensors +0 -3
- model-00003-of-00050.safetensors +0 -3
- model-00004-of-00050.safetensors +0 -3
- model-00005-of-00050.safetensors +0 -3
- model-00006-of-00050.safetensors +0 -3
- model-00007-of-00050.safetensors +0 -3
- model-00008-of-00050.safetensors +0 -3
- model-00009-of-00050.safetensors +0 -3
- model-00010-of-00050.safetensors +0 -3
- model-00011-of-00050.safetensors +0 -3
- model-00012-of-00050.safetensors +0 -3
- model-00013-of-00050.safetensors +0 -3
- model-00014-of-00050.safetensors +0 -3
- model-00015-of-00050.safetensors +0 -3
- model-00016-of-00050.safetensors +0 -3
- model-00017-of-00050.safetensors +0 -3
- model-00018-of-00050.safetensors +0 -3
- model-00019-of-00050.safetensors +0 -3
- model-00020-of-00050.safetensors +0 -3
- model-00021-of-00050.safetensors +0 -3
- model-00022-of-00050.safetensors +0 -3
- model-00023-of-00050.safetensors +0 -3
- model-00024-of-00050.safetensors +0 -3
- model-00025-of-00050.safetensors +0 -3
- model-00026-of-00050.safetensors +0 -3
- model-00027-of-00050.safetensors +0 -3
- model-00028-of-00050.safetensors +0 -3
- model-00029-of-00050.safetensors +0 -3
- model-00030-of-00050.safetensors +0 -3
- model-00031-of-00050.safetensors +0 -3
- model-00032-of-00050.safetensors +0 -3
- model-00033-of-00050.safetensors +0 -3
- model-00034-of-00050.safetensors +0 -3
- model-00035-of-00050.safetensors +0 -3
- model-00036-of-00050.safetensors +0 -3
- model-00037-of-00050.safetensors +0 -3
- model-00038-of-00050.safetensors +0 -3
- model-00039-of-00050.safetensors +0 -3
- model-00040-of-00050.safetensors +0 -3
- model-00041-of-00050.safetensors +0 -3
- model-00042-of-00050.safetensors +0 -3
- model-00043-of-00050.safetensors +0 -3
- model-00044-of-00050.safetensors +0 -3
- model-00045-of-00050.safetensors +0 -3
config.json
DELETED
@@ -1,40 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"apply_residual_connection_post_layernorm": false,
|
3 |
-
"architectures": [
|
4 |
-
"TelechatForCausalLM"
|
5 |
-
],
|
6 |
-
"auto_map": {
|
7 |
-
"AutoConfig": "configuration_telechat.TelechatConfig",
|
8 |
-
"AutoModelForCausalLM": "modeling_telechat.TelechatForCausalLM"
|
9 |
-
},
|
10 |
-
"attention_dropout": 0.0,
|
11 |
-
"attention_softmax_in_fp32": true,
|
12 |
-
"bias_dropout_fusion": true,
|
13 |
-
"bos_token_id": 1,
|
14 |
-
"eos_token_id": 2,
|
15 |
-
"hidden_dropout": 0.0,
|
16 |
-
"hidden_size": 8192,
|
17 |
-
"initializer_range": 0.02,
|
18 |
-
"layer_norm_epsilon": 1e-05,
|
19 |
-
"masked_softmax_fusion": true,
|
20 |
-
"model_type": "telechat",
|
21 |
-
"n_head": 64,
|
22 |
-
"n_inner": null,
|
23 |
-
"num_key_value_heads": 8,
|
24 |
-
"n_layer": 96,
|
25 |
-
"pad_token_id": 3,
|
26 |
-
"pretraining_tp": 2,
|
27 |
-
"skip_bias_add": false,
|
28 |
-
"skip_bias_add_qkv": false,
|
29 |
-
"slow_but_exact": false,
|
30 |
-
"unk_token_id": 0,
|
31 |
-
"use_cache": true,
|
32 |
-
"vocab_size": 131072,
|
33 |
-
"ffn_hidden_size": 40960,
|
34 |
-
"flash_attn":true,
|
35 |
-
"tie_word_embeddings":false,
|
36 |
-
"training_seqlen":8192,
|
37 |
-
"base_seqlen":8192,
|
38 |
-
"seq_length": 8192
|
39 |
-
}
|
40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
configuration.json
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
{"task":"text-generation"}
|
|
|
|
configuration_telechat.py
DELETED
@@ -1,94 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2022 the Big Science Workshop and HuggingFace Inc. team. All rights reserved.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
""" Telechat configuration"""
|
17 |
-
|
18 |
-
from packaging import version
|
19 |
-
from collections import OrderedDict
|
20 |
-
from transformers.utils import is_torch_available, logging
|
21 |
-
from transformers.configuration_utils import PretrainedConfig
|
22 |
-
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
|
23 |
-
|
24 |
-
logger = logging.get_logger(__name__)
|
25 |
-
|
26 |
-
class TelechatConfig(PretrainedConfig):
|
27 |
-
"""
|
28 |
-
Args:
|
29 |
-
vocab_size (`int`, *optional*, defaults to 160256): Vocabulary size of the Telechat model.
|
30 |
-
hidden_size (`int`, *optional*, defaults to 4096): Dimensionality of the embeddings and hidden states.
|
31 |
-
ffn_hidden_size (`int`, *optional*, defaults to 12288): Dimensionality of the feed-forward hidden states.
|
32 |
-
n_layer (`int`, *optional*, defaults to 30): Number of hidden layers in the Transformer
|
33 |
-
n_head (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer.
|
34 |
-
layer_norm_epsilon (`float`, *optional*, defaults to 1e-5): The epsilon to use in the layer normalization layers.
|
35 |
-
initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
36 |
-
apply_residual_connection_post_layernorm (`bool`, *optional*, defaults to `False`): If enabled, use the layer norm of the hidden states as the residual in the transformer blocks
|
37 |
-
hidden_dropout (`float`, *optional*, defaults to 0.0): Dropout rate of the dropout function on the bias dropout.
|
38 |
-
attention_dropout (`float`, *optional*, defaults to 0.0): Dropout rate applied to the attention probs
|
39 |
-
use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions.
|
40 |
-
training_seqlen (`int`, *optional*, defaults to 8192): Sequence length during last finetuning.
|
41 |
-
logn (`bool`, *optional*, defaults to `True`): Whether or not to use logN during extrapolation.
|
42 |
-
embed_layernorm (`bool`, *optional*, defaults to `True`): Whether or not to use embedding layernorm.
|
43 |
-
|
44 |
-
"""
|
45 |
-
|
46 |
-
model_type = "telechat"
|
47 |
-
keys_to_ignore_at_inference = ["past_key_values"]
|
48 |
-
attribute_map = {
|
49 |
-
"num_hidden_layers": "n_layer",
|
50 |
-
"num_attention_heads": "n_head",
|
51 |
-
}
|
52 |
-
|
53 |
-
def __init__(
|
54 |
-
self,
|
55 |
-
vocab_size=160256,
|
56 |
-
hidden_size=4096,
|
57 |
-
n_layer=30,
|
58 |
-
n_head=32,
|
59 |
-
layer_norm_epsilon=1e-5,
|
60 |
-
initializer_range=0.02,
|
61 |
-
use_cache=True,
|
62 |
-
bos_token_id=1,
|
63 |
-
eos_token_id=2,
|
64 |
-
apply_residual_connection_post_layernorm=False,
|
65 |
-
hidden_dropout=0.0,
|
66 |
-
attention_dropout=0.0,
|
67 |
-
ffn_hidden_size=12288,
|
68 |
-
training_seqlen = 8192,
|
69 |
-
logn = True,
|
70 |
-
embed_layernorm = False,
|
71 |
-
**kwargs,
|
72 |
-
):
|
73 |
-
self.vocab_size = vocab_size
|
74 |
-
n_embed = kwargs.pop("n_embed", None)
|
75 |
-
self.hidden_size = hidden_size if n_embed is None else n_embed
|
76 |
-
self.n_layer = n_layer
|
77 |
-
self.n_head = n_head
|
78 |
-
self.layer_norm_epsilon = layer_norm_epsilon
|
79 |
-
self.initializer_range = initializer_range
|
80 |
-
self.use_cache = use_cache
|
81 |
-
self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm
|
82 |
-
self.hidden_dropout = hidden_dropout
|
83 |
-
self.attention_dropout = attention_dropout
|
84 |
-
self.bos_token_id = bos_token_id
|
85 |
-
self.eos_token_id = eos_token_id
|
86 |
-
self.logn = logn
|
87 |
-
self.ffn_hidden_size = ffn_hidden_size
|
88 |
-
self.training_seqlen = training_seqlen
|
89 |
-
self.embed_layernorm = embed_layernorm
|
90 |
-
self.num_key_value_heads= kwargs.pop("num_key_value_heads", None)
|
91 |
-
|
92 |
-
|
93 |
-
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
|
94 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
generation_config.json
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"max_length": 8192,
|
3 |
-
"do_sample": false,
|
4 |
-
"use_cache": true,
|
5 |
-
"temperature": 0.3,
|
6 |
-
"top_k": 5,
|
7 |
-
"top_p": 0.85,
|
8 |
-
"repetition_penalty": 1.03,
|
9 |
-
"pad_token_id": 3,
|
10 |
-
"bos_token_id": 1,
|
11 |
-
"eos_token_id": 2,
|
12 |
-
"user_token_id": 4,
|
13 |
-
"bot_token_id": 5
|
14 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
generation_utils.py
DELETED
@@ -1,162 +0,0 @@
|
|
1 |
-
from typing import Optional
|
2 |
-
from collections import deque
|
3 |
-
from queue import Queue
|
4 |
-
import copy
|
5 |
-
|
6 |
-
|
7 |
-
class History:
|
8 |
-
|
9 |
-
def __init__(self, tokenizer, history):
|
10 |
-
'''
|
11 |
-
init from a list of dict
|
12 |
-
'''
|
13 |
-
# use deque to meet some special situation
|
14 |
-
self.input_history = deque()
|
15 |
-
self.tokenizer = tokenizer
|
16 |
-
if history:
|
17 |
-
self._transfer_from_list(history)
|
18 |
-
|
19 |
-
def _transfer_from_list(self, history):
|
20 |
-
for message in history:
|
21 |
-
content = message.get("content")
|
22 |
-
# the token result may not be equal to the result model gen
|
23 |
-
message.update(self.tokenizer(content))
|
24 |
-
self.input_history.append(message)
|
25 |
-
|
26 |
-
def append(self, message):
|
27 |
-
content = message.get("content")
|
28 |
-
if "input_ids" not in message or "attention_mask" not in message:
|
29 |
-
message.update(self.tokenizer(content))
|
30 |
-
self.input_history.append(message)
|
31 |
-
|
32 |
-
def append_left(self, message):
|
33 |
-
content = message.get("content")
|
34 |
-
if "input_ids" not in message or "attention_mask" not in message:
|
35 |
-
message.update(self.tokenizer(content))
|
36 |
-
self.input_history.appendleft(message)
|
37 |
-
|
38 |
-
def pop(self):
|
39 |
-
x = self.input_history.pop()
|
40 |
-
return x
|
41 |
-
|
42 |
-
def pop_left(self):
|
43 |
-
x = self.input_history.pop_left()
|
44 |
-
return x
|
45 |
-
|
46 |
-
def update(self, message):
|
47 |
-
self.input_history.pop()
|
48 |
-
self.append(message)
|
49 |
-
|
50 |
-
def __len__(self):
|
51 |
-
return self.input_history.__len__()
|
52 |
-
|
53 |
-
def __str__(self):
|
54 |
-
return self.input_history.__str__()
|
55 |
-
|
56 |
-
def __copy__(self):
|
57 |
-
new_instance = type(self)(self.tokenizer, [])
|
58 |
-
new_instance.input_history = copy.copy(self.input_history)
|
59 |
-
return new_instance
|
60 |
-
|
61 |
-
def __deepcopy__(self, memodict={}):
|
62 |
-
new_instance = type(self)(self.tokenizer, [])
|
63 |
-
new_instance.input_history = copy.deepcopy(self.input_history)
|
64 |
-
return new_instance
|
65 |
-
|
66 |
-
|
67 |
-
class TelechatIterTextStreamer:
|
68 |
-
"""
|
69 |
-
With reference to the TextIterStreamers in transformers, we have rewritten this class
|
70 |
-
"""
|
71 |
-
|
72 |
-
def __init__(
|
73 |
-
self, tokenizer, history: History = None, skip_prompt: bool = False, timeout: Optional[float] = None,
|
74 |
-
**decode_kwargs
|
75 |
-
):
|
76 |
-
|
77 |
-
self.tokenizer = tokenizer
|
78 |
-
self.history = history
|
79 |
-
self.skip_prompt = skip_prompt
|
80 |
-
self.timeout = timeout
|
81 |
-
self.decode_kwargs = decode_kwargs
|
82 |
-
|
83 |
-
self.text_queue = Queue()
|
84 |
-
self.cache_time = 0
|
85 |
-
self.text_until = ""
|
86 |
-
self.token_until = []
|
87 |
-
self.stop_signal = None
|
88 |
-
self.next_tokens_are_prompt = True
|
89 |
-
|
90 |
-
self.history.append({"role": "bot", "content": self.text_until})
|
91 |
-
|
92 |
-
def put(self, value):
|
93 |
-
"""
|
94 |
-
put printable text into queue
|
95 |
-
"""
|
96 |
-
if len(value.shape) > 1 and value.shape[0] > 1:
|
97 |
-
raise ValueError("TextStreamer only supports batch size 1")
|
98 |
-
elif len(value.shape) > 1:
|
99 |
-
value = value[0]
|
100 |
-
|
101 |
-
if self.skip_prompt and self.next_tokens_are_prompt:
|
102 |
-
self.next_tokens_are_prompt = False
|
103 |
-
return
|
104 |
-
|
105 |
-
if value[-1] == self.tokenizer.eos_token_id:
|
106 |
-
return
|
107 |
-
|
108 |
-
# there may be some smart way to decode.
|
109 |
-
self.token_until.extend(value.tolist())
|
110 |
-
text = self.tokenizer.decode(self.token_until, **self.decode_kwargs)
|
111 |
-
|
112 |
-
|
113 |
-
if self._is_printable(text) or self.cache_time >= 6:
|
114 |
-
output_text = text[len(self.text_until):]
|
115 |
-
self.text_until = text
|
116 |
-
|
117 |
-
else:
|
118 |
-
self.cache_time+=1
|
119 |
-
return
|
120 |
-
|
121 |
-
self.on_finalized_text(output_text)
|
122 |
-
|
123 |
-
def end(self):
|
124 |
-
"""Flushes any remaining cache and prints a newline to stdout."""
|
125 |
-
# Flush the cache, if it exists
|
126 |
-
text = self.tokenizer.decode(self.token_until, **self.decode_kwargs)
|
127 |
-
output_text = text[len(self.text_until):]
|
128 |
-
self.text_until = text
|
129 |
-
self.on_finalized_text(output_text, stream_end=True)
|
130 |
-
self.clear_cache()
|
131 |
-
|
132 |
-
def clear_cache(self):
|
133 |
-
self.cache_time = 0
|
134 |
-
self.token_until = []
|
135 |
-
self.text_until = ""
|
136 |
-
self.history = None
|
137 |
-
self.next_tokens_are_prompt = True
|
138 |
-
|
139 |
-
def on_finalized_text(self, text: str, stream_end: bool = False):
|
140 |
-
"""Put the text tuple in the queue."""
|
141 |
-
self.history.update({"role": "bot", "content": self.text_until, "input_ids": self.token_until,
|
142 |
-
"attention_mask": [1] * len(self.token_until)})
|
143 |
-
self.text_queue.put((text, self.history), timeout=self.timeout)
|
144 |
-
if stream_end:
|
145 |
-
self.text_queue.put((self.stop_signal, self.history), timeout=self.timeout)
|
146 |
-
|
147 |
-
@staticmethod
|
148 |
-
def _is_printable(cp):
|
149 |
-
"""Checks whether tokens can be decoded or not"""
|
150 |
-
if "�" in cp:
|
151 |
-
return False
|
152 |
-
return True
|
153 |
-
|
154 |
-
def __iter__(self):
|
155 |
-
return self
|
156 |
-
|
157 |
-
def __next__(self):
|
158 |
-
value_now, history_until = self.text_queue.get(timeout=self.timeout)
|
159 |
-
if value_now == self.stop_signal:
|
160 |
-
raise StopIteration()
|
161 |
-
else:
|
162 |
-
return value_now, history_until
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model-00001-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:50a4549e91bf98dcb213f6455f92440454fdd012cd0369e0fdb2f019c83fe4c7
|
3 |
-
size 8925611272
|
|
|
|
|
|
|
|
model-00002-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:548f67873b1f538bf325590f30dbe267600356ccd1bef73c1f1cc7a2902c2aa7
|
3 |
-
size 9261287712
|
|
|
|
|
|
|
|
model-00003-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:e855d3528914e522dcad5348c2229dc2e88fa55e87416f7d5106437d3679a8d3
|
3 |
-
size 9261287712
|
|
|
|
|
|
|
|
model-00004-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:509b7084b8dcb308c7c7d22fa988e805b6f6947e356526f60cccdd40ceb2a11d
|
3 |
-
size 9261287712
|
|
|
|
|
|
|
|
model-00005-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:bb044a2e88e6e7a0817459f7f409eeb5abd00836346f69f8ebfd9d0c47028ebf
|
3 |
-
size 9261287712
|
|
|
|
|
|
|
|
model-00006-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:a3e410d45493b53ba09cdb8d368ecad07df8c1db96a58d928cd7045c8ed7a48f
|
3 |
-
size 9261287728
|
|
|
|
|
|
|
|
model-00007-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:b9cdcab0486ef69e5eedbb99b08f0b55c918a83f227194cb8b8937f010c950a7
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00008-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:f2a7cf122475960568ea086f9b98b2283dbf9344dda2720726a52bff7f31edbc
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00009-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:cc5d19f0add344ac71fb3855ca8f04e588fd4d5263e05709ec4dd668ebcf8ee4
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00010-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:5d9763dc2004ea25ca751d271f92a1cce636f92c6991077ea391788428cfc6c1
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00011-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:ca7e26c5ed4c0fedec09a4cceef1ac7e03d037c921c18d1df0b782ca4fe7b71b
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00012-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:e0390b0e55acc47cebac2bc81e57e1e7d3f02f72a0cad2b01b5165caf976ab0b
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00013-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:bbb3ccd22fc048c59e67dab8df54264acf53d7d62fc98fb49ffeafe7ea181739
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00014-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:1e2c53dae773a55d42d3265d0d85ae02c8f6f70543a90d923823cab68d44d5ce
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00015-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:b813a2adab5ff776a06986e60d22e917c596122c417a7d6ae1930b8ed5d61a4f
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00016-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:33976aac9f40e0ff430435fe0295ba1f4443dc4b78233c077de95453eff51d01
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00017-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:096850a1c187e804c370956830ef94d20731f79ee1078fa0613604c93015d305
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00018-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:b7ed47b73f1171ee30de651924ab58aaa7088037b9f96203be23b4b2514dfb27
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00019-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:fe1e87f96e41804e06c966605e6b00e0a9c6b51c504ee5e22dfdb28e90d1308f
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00020-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:93bc200e651f67d4910170de85235af15b3228b23192afb5c32d5126daaaf996
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00021-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:645df22333bb21b8cef574bdadcaecaf0efcf43d3e747a0b0c0d462740d0b7c4
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00022-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:3a288d0dafad8ca11e21c869a31d62057f6bc13036934e306cb68cd7fdd5861f
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00023-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:94e110e13a08711bfd02cbb3d20e32baecd675f2593a8ae66ba1fcf039e34073
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00024-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:56894143f2c2a3ff2af49e7f3ec2af9929ff8d95fe7e8e63b709280ba4babe52
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00025-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:72d4aee125d0a41ceb2afc164c78d25b9edc89cf845da5f91e9c60b743b2f3a7
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00026-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:72fdd61ca5c065b386092a0701f6fe8ed7ae8d99b0eaba87e50e60acff058876
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00027-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:764f995de7385ab9c14e7393ee6adc73b557c1a182efdcbbf143c3fe22f353c3
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00028-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:ba9bd63f6e97a17e03b467629d9a4bafd581da1b9445b134423c75d5aa409c08
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00029-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:21c3f8b212fef1678419b95693121290430930dd2930bd531aa7a3faa40fac41
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00030-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:b861e871ac1d1ae14b6fcadaefe876c45d694f715eb91d68bacf114c2c426adf
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00031-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:495ecf892bce401191ce699032b3d95b957d935987ee462df76de2b5928c1127
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00032-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:2613dc2c31320d4c300e260aac5a313dc183b8fdd140f6b3aea338667126ffae
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00033-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:67348068c452d63d6ea33f468f810157993b1431c823fc4fc68d1e4c7cf1b653
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00034-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:e28056f9391f029ddea9743445dbf763c265feb31a184ba73a7e8de91280d981
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00035-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:7c966308e58af945da780ddf143e6f436f21a8019ddb1824200822ac8ecf72c8
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00036-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:998076e5d150bea2630b24af7bf1da793a951f5c5b9ddfeb7b9400eb1188eea6
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00037-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:1facd7663e7db14ec43876cc727f3d8e75b40bb025cb7a25fb7d2f9a60de68f9
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00038-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:c06aafaf597e562ebce7672650df8ae3fcabf22977c4d47004f817ca4f8fe536
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00039-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:d3bda7f14ffa81266bd9ec07e05e9c3bc16726f8fcd75281d4af4002508bfceb
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00040-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:bf19d0fbf517593390ccc10b1c00c1410b54286ca5c651c1fe3ee8de7b02c4d1
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00041-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:f98f5ea063046592be25906b7dc84f0096e67f104f1bcd847858ee778ba4e1eb
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00042-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:033ba66aecb88ed950b1c49bd2c6f6d5e5173ed4882b22ff189f4caef542ec9f
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00043-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:5a46195067f9f791eee221c2195d30136f0f8eb723dd0c1674d68c6c2409fa68
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00044-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:d5becd94d0a4e65b2b0496f21ca679ba4b171ffb32851eabd15af17004bda901
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|
model-00045-of-00050.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:460bd2d228da729c1ef006c65582a8f8d709c748a731f56ff6129ec8beb7cd57
|
3 |
-
size 9261287736
|
|
|
|
|
|
|
|