tokenizer加载非常的慢

#1
by Minami-su - opened

C╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮
│ /root/autodl-fs/llama7bzh/1.py:7 in │
│ │
│ 4 ckpt = './llama' │
│ 5 device = torch.device('cuda') │
│ 6 model = LlamaForCausalLM.from_pretrained(ckpt, device_map='auto', low_cpu_mem_usage=True │
│ ❱ 7 tokenizer = AutoTokenizer.from_pretrained(ckpt) │
│ 8 history = [] │
│ 9 max_history_len=6 │
│ 10 while True: │
│ │
│ /root/miniconda3/lib/python3.8/site-packages/transformers/models/auto/tokenization_auto.py:702 │
│ in from_pretrained │
│ │
│ 699 │ │ │ │ raise ValueError( │
│ 700 │ │ │ │ │ f"Tokenizer class {tokenizer_class_candidate} does not exist or is n │
│ 701 │ │ │ │ ) │
│ ❱ 702 │ │ │ return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *input │
│ 703 │ │ │
│ 704 │ │ # Otherwise we have to be creative. │
│ 705 │ │ # if model is an encoder decoder, the encoder tokenizer class is used by default │
│ │
│ /root/miniconda3/lib/python3.8/site-packages/transformers/tokenization_utils_base.py:1811 in │
│ from_pretrained │
│ │
│ 1808 │ │ │ else: │
│ 1809 │ │ │ │ logger.info(f"loading file {file_path} from cache at {resolved_vocab_fil │
│ 1810 │ │ │
│ ❱ 1811 │ │ return cls._from_pretrained( │
│ 1812 │ │ │ resolved_vocab_files, │
│ 1813 │ │ │ pretrained_model_name_or_path, │
│ 1814 │ │ │ init_configuration, │
│ │
│ /root/miniconda3/lib/python3.8/site-packages/transformers/tokenization_utils_base.py:1965 in │
│ _from_pretrained │
│ │
│ 1962 │ │ │
│ 1963 │ │ # Instantiate tokenizer. │
│ 1964 │ │ try: │
│ ❱ 1965 │ │ │ tokenizer = cls(*init_inputs, **init_kwargs) │
│ 1966 │ │ except OSError: │
│ 1967 │ │ │ raise OSError( │
│ 1968 │ │ │ │ "Unable to load vocabulary from file. " │
│ │
│ /root/miniconda3/lib/python3.8/site-packages/transformers/models/llama/tokenization_llama_fast.p │
│ y:89 in init
│ │
│ 86 │ │ eos_token="", │
│ 87 │ │ **kwargs, │
│ 88 │ ): │
│ ❱ 89 │ │ super().init( │
│ 90 │ │ │ vocab_file=vocab_file, │
│ 91 │ │ │ tokenizer_file=tokenizer_file, │
│ 92 │ │ │ clean_up_tokenization_spaces=clean_up_tokenization_spaces, │
│ │
│ /root/miniconda3/lib/python3.8/site-packages/transformers/tokenization_utils_fast.py:114 in │
init
│ │
│ 111 │ │ │ fast_tokenizer = TokenizerFast.from_file(fast_tokenizer_file) │
│ 112 │ │ elif slow_tokenizer is not None: │
│ 113 │ │ │ # We need to convert a slow tokenizer to build the backend │
│ ❱ 114 │ │ │ fast_tokenizer = convert_slow_tokenizer(slow_tokenizer) │
│ 115 │ │ elif self.slow_tokenizer_class is not None: │
│ 116 │ │ │ # We need to create and convert a slow tokenizer to build the backend │
│ 117 │ │ │ slow_tokenizer = self.slow_tokenizer_class(*args, **kwargs) │
│ │
│ /root/miniconda3/lib/python3.8/site-packages/transformers/convert_slow_tokenizer.py:1288 in │
│ convert_slow_tokenizer │
│ │
│ 1285 │ │
│ 1286 │ converter_class = SLOW_TO_FAST_CONVERTERS[tokenizer_class_name] │
│ 1287 │ │
│ ❱ 1288 │ return converter_class(transformer_tokenizer).converted() │
│ 1289 │
│ │
│ /root/miniconda3/lib/python3.8/site-packages/transformers/convert_slow_tokenizer.py:511 in │
│ converted │
│ │
│ 508 │ │ return decoders.Metaspace(replacement=replacement, add_prefix_space=add_prefix_s │
│ 509 │ │
│ 510 │ def converted(self) -> Tokenizer: │
│ ❱ 511 │ │ tokenizer = self.tokenizer(self.proto) │
│ 512 │ │ │
│ 513 │ │ # Tokenizer assemble │
│ 514 │ │ normalizer = self.normalizer(self.proto) │
│ │
│ /root/miniconda3/lib/python3.8/site-packages/transformers/convert_slow_tokenizer.py:1130 in │
│ tokenizer │
│ │
│ 1127 │ │ if model_type == 1: │
│ 1128 │ │ │ raise RuntimeError("Llama is supposed to be a BPE model!") │
│ 1129 │ │ elif model_type == 2: │
│ ❱ 1130 │ │ │ _, merges = SentencePieceExtractor(self.original_tokenizer.vocab_file).extra │
│ 1131 │ │ │ bpe_vocab = {word: i for i, (word, _score) in enumerate(vocab_scores)} │
│ 1132 │ │ │ tokenizer = Tokenizer( │
│ 1133 │ │ │ │ BPE(bpe_vocab, merges, unk_token=proto.trainer_spec.unk_piece, fuse_unk= │
│ │
│ /root/miniconda3/lib/python3.8/site-packages/transformers/convert_slow_tokenizer.py:60 in │
│ extract │
│ │
│ 57 │ │ for piece_l in vocab.keys(): │
│ 58 │ │ │ for piece_r in vocab.keys(): │
│ 59 │ │ │ │ merge = f"{piece_l}{piece_r}" │
│ ❱ 60 │ │ │ │ piece_score = vocab_scores.get(merge, None) │
│ 61 │ │ │ │ if piece_score: │
│ 62 │ │ │ │ │ merges += [(piece_l, piece_r, piece_score)] │
│ 63 │ │ merges = sorted(merges, key=lambda val: val[2], reverse=reverse) │

在tokenizer那里一直加载,超级慢

解决了,不要用

dac4f592d7e64423dfc1cab3f531dea8_233014055-3756ccd6-9103-4c22-ba93-ce506e27bcca.png

用这个:

f4ab5cff0c7fac45e4bae6644bd3db70_233014228-6ddf1ef3-470c-4d89-9f26-f00e1d0963c4.png

秒加载

说明AutoTokenizer根据配置文件,自动选择了 LlamaTokenizerFast。

Minami-su changed discussion status to closed

Sign up or log in to comment