ariG23498 HF Staff commited on
Commit
89c04d7
·
verified ·
1 Parent(s): 9694807

Upload vandijklab_C2S-Scale-Gemma-2-27B_1.txt with huggingface_hub

Browse files
vandijklab_C2S-Scale-Gemma-2-27B_1.txt ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ```CODE:
2
+ # Load model directly
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
+
5
+ tokenizer = AutoTokenizer.from_pretrained("vandijklab/C2S-Scale-Gemma-2-27B")
6
+ model = AutoModelForCausalLM.from_pretrained("vandijklab/C2S-Scale-Gemma-2-27B")
7
+ ```
8
+
9
+ ERROR:
10
+ Traceback (most recent call last):
11
+ File "/tmp/.cache/uv/environments-v2/50e098834e83db53/lib/python3.13/site-packages/transformers/convert_slow_tokenizer.py", line 1640, in extract_vocab_merges_from_model
12
+ from tiktoken.load import load_tiktoken_bpe
13
+ ModuleNotFoundError: No module named 'tiktoken'
14
+
15
+ During handling of the above exception, another exception occurred:
16
+
17
+ Traceback (most recent call last):
18
+ File "/tmp/.cache/uv/environments-v2/50e098834e83db53/lib/python3.13/site-packages/transformers/convert_slow_tokenizer.py", line 1783, in convert_slow_tokenizer
19
+ ).converted()
20
+ ~~~~~~~~~^^
21
+ File "/tmp/.cache/uv/environments-v2/50e098834e83db53/lib/python3.13/site-packages/transformers/convert_slow_tokenizer.py", line 1677, in converted
22
+ tokenizer = self.tokenizer()
23
+ File "/tmp/.cache/uv/environments-v2/50e098834e83db53/lib/python3.13/site-packages/transformers/convert_slow_tokenizer.py", line 1670, in tokenizer
24
+ vocab_scores, merges = self.extract_vocab_merges_from_model(self.vocab_file)
25
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
26
+ File "/tmp/.cache/uv/environments-v2/50e098834e83db53/lib/python3.13/site-packages/transformers/convert_slow_tokenizer.py", line 1642, in extract_vocab_merges_from_model
27
+ raise ValueError(
28
+ "`tiktoken` is required to read a `tiktoken` file. Install it with `pip install tiktoken`."
29
+ )
30
+ ValueError: `tiktoken` is required to read a `tiktoken` file. Install it with `pip install tiktoken`.
31
+
32
+ During handling of the above exception, another exception occurred:
33
+
34
+ Traceback (most recent call last):
35
+ File "/tmp/.cache/uv/environments-v2/50e098834e83db53/lib/python3.13/site-packages/transformers/tokenization_utils_base.py", line 2343, in _from_pretrained
36
+ tokenizer = cls(*init_inputs, **init_kwargs)
37
+ File "/tmp/.cache/uv/environments-v2/50e098834e83db53/lib/python3.13/site-packages/transformers/models/gemma/tokenization_gemma_fast.py", line 100, in __init__
38
+ super().__init__(
39
+ ~~~~~~~~~~~~~~~~^
40
+ vocab_file=vocab_file,
41
+ ^^^^^^^^^^^^^^^^^^^^^^
42
+ ...<8 lines>...
43
+ **kwargs,
44
+ ^^^^^^^^^
45
+ )
46
+ ^
47
+ File "/tmp/.cache/uv/environments-v2/50e098834e83db53/lib/python3.13/site-packages/transformers/tokenization_utils_fast.py", line 139, in __init__
48
+ fast_tokenizer = convert_slow_tokenizer(self, from_tiktoken=True)
49
+ File "/tmp/.cache/uv/environments-v2/50e098834e83db53/lib/python3.13/site-packages/transformers/convert_slow_tokenizer.py", line 1785, in convert_slow_tokenizer
50
+ raise ValueError(
51
+ ...<3 lines>...
52
+ )
53
+ ValueError: Converting from SentencePiece and Tiktoken failed, if a converter for SentencePiece is available, provide a model path with a SentencePiece tokenizer.model file.Currently available slow->fast converters: ['AlbertTokenizer', 'BartTokenizer', 'BarthezTokenizer', 'BertTokenizer', 'BigBirdTokenizer', 'BlenderbotTokenizer', 'CamembertTokenizer', 'CLIPTokenizer', 'CodeGenTokenizer', 'ConvBertTokenizer', 'DebertaTokenizer', 'DebertaV2Tokenizer', 'DistilBertTokenizer', 'DPRReaderTokenizer', 'DPRQuestionEncoderTokenizer', 'DPRContextEncoderTokenizer', 'ElectraTokenizer', 'FNetTokenizer', 'FunnelTokenizer', 'GPT2Tokenizer', 'HerbertTokenizer', 'LayoutLMTokenizer', 'LayoutLMv2Tokenizer', 'LayoutLMv3Tokenizer', 'LayoutXLMTokenizer', 'LongformerTokenizer', 'LEDTokenizer', 'LxmertTokenizer', 'MarkupLMTokenizer', 'MBartTokenizer', 'MBart50Tokenizer', 'MPNetTokenizer', 'MobileBertTokenizer', 'MvpTokenizer', 'NllbTokenizer', 'OpenAIGPTTokenizer', 'PegasusTokenizer', 'Qwen2Tokenizer', 'RealmTokenizer', 'ReformerTokenizer', 'RemBertTokenizer', 'RetriBertTokenizer', 'RobertaTokenizer', 'RoFormerTokenizer', 'SeamlessM4TTokenizer', 'SqueezeBertTokenizer', 'T5Tokenizer', 'UdopTokenizer', 'WhisperTokenizer', 'XLMRobertaTokenizer', 'XLNetTokenizer', 'SplinterTokenizer', 'XGLMTokenizer', 'LlamaTokenizer', 'CodeLlamaTokenizer', 'GemmaTokenizer', 'Phi3Tokenizer']
54
+
55
+ During handling of the above exception, another exception occurred:
56
+
57
+ Traceback (most recent call last):
58
+ File "/tmp/vandijklab_C2S-Scale-Gemma-2-27B_1wtRfhO.py", line 17, in <module>
59
+ tokenizer = AutoTokenizer.from_pretrained("vandijklab/C2S-Scale-Gemma-2-27B")
60
+ File "/tmp/.cache/uv/environments-v2/50e098834e83db53/lib/python3.13/site-packages/transformers/models/auto/tokenization_auto.py", line 1140, in from_pretrained
61
+ return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
62
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
63
+ File "/tmp/.cache/uv/environments-v2/50e098834e83db53/lib/python3.13/site-packages/transformers/tokenization_utils_base.py", line 2097, in from_pretrained
64
+ return cls._from_pretrained(
65
+ ~~~~~~~~~~~~~~~~~~~~^
66
+ resolved_vocab_files,
67
+ ^^^^^^^^^^^^^^^^^^^^^
68
+ ...<9 lines>...
69
+ **kwargs,
70
+ ^^^^^^^^^
71
+ )
72
+ ^
73
+ File "/tmp/.cache/uv/environments-v2/50e098834e83db53/lib/python3.13/site-packages/transformers/tokenization_utils_base.py", line 2344, in _from_pretrained
74
+ except import_protobuf_decode_error():
75
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~^^
76
+ File "/tmp/.cache/uv/environments-v2/50e098834e83db53/lib/python3.13/site-packages/transformers/tokenization_utils_base.py", line 87, in import_protobuf_decode_error
77
+ raise ImportError(PROTOBUF_IMPORT_ERROR.format(error_message))
78
+ ImportError:
79
+ requires the protobuf library but it was not found in your environment. Check out the instructions on the
80
+ installation page of its repo: https://github.com/protocolbuffers/protobuf/tree/master/python#installation and follow the ones
81
+ that match your environment. Please note that you may need to restart your runtime after installation.
82
+