inference endpoint error

#2
by giuliogalvan - opened

inference endpoint does not work. Getting sharding error.

updated to the newest revision but still cannot use endpoints. I attach the log:

cj6v6 2023-06-30T08:19:33.903Z {"timestamp":"2023-06-30T08:19:33.903685Z","level":"ERROR","fields":{"message":"Error when initializing model\nTraceback (most recent call last):\n File "/opt/conda/bin/text-generation-server", line 8, in \n sys.exit(app())\n File "/opt/conda/lib/python3.9/site-packages/typer/main.py", line 311, in __call__\n return get_command(self)(*args, **kwargs)\n File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1130, in __call__\n return self.main(*args, **kwargs)\n File "/opt/conda/lib/python3.9/site-packages/typer/core.py", line 778, in main\n return _main(\n File "/opt/conda/lib/python3.9/site-packages/typer/core.py", line 216, in _main\n rv = self.invoke(ctx)\n File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1657, in invoke\n return _process_result(sub_ctx.command.invoke(sub_ctx))\n File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1404, in invoke\n return ctx.invoke(self.callback, **ctx.params)\n File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 760, in invoke\n return __callback(*args, **kwargs)\n File "/opt/conda/lib/python3.9/site-packages/typer/main.py", line 683, in wrapper\n return callback(**use_params) # type: ignore\n File "/opt/conda/lib/python3.9/site-packages/text_generation_server/cli.py", line 67, in serve\n server.serve(model_id, revision, sharded, quantize, trust_remote_code, uds_path)\n File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 155, in serve\n asyncio.run(serve_inner(model_id, revision, sharded, quantize, trust_remote_code))\n File "/opt/conda/lib/python3.9/asyncio/runners.py", line 44, in run\n return loop.run_until_complete(main)\n File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 634, in run_until_complete\n self.run_forever()\n File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 601, in run_forever\n self._run_once()\n File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 1905, in _run_once\n handle._run()\n File "/opt/conda/lib/python3.9/asyncio/events.py", line 80, in _run\n self._context.run(self._callback, *self._args)\n> File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 124, in serve_inner\n model = get_model(model_id, revision, sharded, quantize, trust_remote_code)\n File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/init.py", line 246, in get_model\n return llama_cls(\n File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/flash_llama.py", line 44, in __init__\n tokenizer = LlamaTokenizer.from_pretrained(\n File "/usr/src/transformers/src/transformers/tokenization_utils_base.py", line 1812, in from_pretrained\n return cls._from_pretrained(\n File "/usr/src/transformers/src/transformers/tokenization_utils_base.py", line 1975, in _from_pretrained\n tokenizer = cls(*init_inputs, **init_kwargs)\n File "/usr/src/transformers/src/transformers/models/llama/tokenization_llama.py", line 96, in __init__\n self.sp_model.Load(vocab_file)\n File "/opt/conda/lib/python3.9/site-packages/sentencepiece/init.py", line 905, in Load\n return self.LoadFromFile(model_file)\n File "/opt/conda/lib/python3.9/site-packages/sentencepiece/init.py", line 310, in LoadFromFile\n return _sentencepiece.SentencePieceProcessor_LoadFromFile(self, arg)\nTypeError: not a string\n"},"target":"text_generation_launcher","span":{"rank":0,"name":"shard-manager"},"spans":[{"rank":0,"name":"shard-manager"}]}
cj6v6 2023-06-30T08:19:34.351Z Error: ShardCannotStart
cj6v6 2023-06-30T08:19:34.351Z {"timestamp":"2023-06-30T08:19:34.351030Z","level":"ERROR","fields":{"message":"Shard 0 failed to start:\nThe tokenizer class you load from this checkpoint is not the same type as the class this function is called from. It may result in unexpected tokenization. \nThe tokenizer class you load from this checkpoint is 'XgenTokenizer'. \nThe class this function is called from is 'LlamaTokenizer'.\nTraceback (most recent call last):\n\n File "/opt/conda/bin/text-generation-server", line 8, in \n sys.exit(app())\n\n File "/opt/conda/lib/python3.9/site-packages/text_generation_server/cli.py", line 67, in serve\n server.serve(model_id, revision, sharded, quantize, trust_remote_code, uds_path)\n\n File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 155, in serve\n asyncio.run(serve_inner(model_id, revision, sharded, quantize, trust_remote_code))\n\n File "/opt/conda/lib/python3.9/asyncio/runners.py", line 44, in run\n return loop.run_until_complete(main)\n\n File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 647, in run_until_complete\n return future.result()\n\n File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 124, in serve_inner\n model = get_model(model_id, revision, sharded, quantize, trust_remote_code)\n\n File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/init.py", line 246, in get_model\n return llama_cls(\n\n File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/flash_llama.py", line 44, in __init__\n tokenizer = LlamaTokenizer.from_pretrained(\n\n File "/usr/src/transformers/src/transformers/tokenization_utils_base.py", line 1812, in from_pretrained\n return cls._from_pretrained(\n\n File "/usr/src/transformers/src/transformers/tokenization_utils_base.py", line 1975, in _from_pretrained\n tokenizer = cls(*init_inputs, **init_kwargs)\n\n File "/usr/src/transformers/src/transformers/models/llama/tokenization_llama.py", line 96, in __init__\n self.sp_model.Load(vocab_file)\n\n File "/opt/conda/lib/python3.9/site-packages/sentencepiece/init.py", line 905, in Load\n return self.LoadFromFile(model_file)\n\n File "/opt/conda/lib/python3.9/site-packages/sentencepiece/init.py", line 310, in LoadFromFile\n return _sentencepiece.SentencePieceProcessor_LoadFromFile(self, arg)\n\nTypeError: not a string\n\n"},"target":"text_generation_launcher"}
cj6v6 2023-06-30T08:19:34.351Z {"timestamp":"2023-06-30T08:19:34.351077Z","level":"INFO","fields":{"message":"Shutting down shards"},"target":"text_generation_launcher"}

Sign up or log in to comment