Failed. Reason: The primary container for production variant AllTraffic did not pass the ping health check
UnexpectedStatusException Traceback (most recent call last)
Cell In[6], line 2
1 # deploy model to SageMaker Inference
----> 2 predictor = huggingface_model.deploy(
3 initial_instance_count=1,
4 instance_type="ml.g5.2xlarge",
5 container_startup_health_check_timeout=300,
6 )
File ~/anaconda3/envs/pytorch_p310/lib/python3.10/site-packages/sagemaker/huggingface/model.py:313, in HuggingFaceModel.deploy(self, initial_instance_count, instance_type, serializer, deserializer, accelerator_type, endpoint_name, tags, kms_key, wait, data_capture_config, async_inference_config, serverless_inference_config, volume_size, model_data_download_timeout, container_startup_health_check_timeout, inference_recommendation_id, explainer_config, **kwargs)
306 inference_tool = "neuron" if instance_type.startswith("ml.inf1") else "neuronx"
307 self.image_uri = self.serving_image_uri(
308 region_name=self.sagemaker_session.boto_session.region_name,
309 instance_type=instance_type,
310 inference_tool=inference_tool,
311 )
--> 313 return super(HuggingFaceModel, self).deploy(
314 initial_instance_count,
315 instance_type,
316 serializer,
317 deserializer,
318 accelerator_type,
319 endpoint_name,
320 tags,
321 kms_key,
322 wait,
323 data_capture_config,
324 async_inference_config,
325 serverless_inference_config,
326 volume_size=volume_size,
327 model_data_download_timeout=model_data_download_timeout,
328 container_startup_health_check_timeout=container_startup_health_check_timeout,
329 inference_recommendation_id=inference_recommendation_id,
330 explainer_config=explainer_config,
331 )
File ~/anaconda3/envs/pytorch_p310/lib/python3.10/site-packages/sagemaker/model.py:1430, in Model.deploy(self, initial_instance_count, instance_type, serializer, deserializer, accelerator_type, endpoint_name, tags, kms_key, wait, data_capture_config, async_inference_config, serverless_inference_config, volume_size, model_data_download_timeout, container_startup_health_check_timeout, inference_recommendation_id, explainer_config, **kwargs)
1427 if is_explainer_enabled:
1428 explainer_config_dict = explainer_config._to_request_dict()
-> 1430 self.sagemaker_session.endpoint_from_production_variants(
1431 name=self.endpoint_name,
1432 production_variants=[production_variant],
1433 tags=tags,
1434 kms_key=kms_key,
1435 wait=wait,
1436 data_capture_config_dict=data_capture_config_dict,
1437 explainer_config_dict=explainer_config_dict,
1438 async_inference_config_dict=async_inference_config_dict,
1439 )
1441 if self.predictor_cls:
1442 predictor = self.predictor_cls(self.endpoint_name, self.sagemaker_session)
File ~/anaconda3/envs/pytorch_p310/lib/python3.10/site-packages/sagemaker/session.py:4727, in Session.endpoint_from_production_variants(self, name, production_variants, tags, kms_key, wait, data_capture_config_dict, async_inference_config_dict, explainer_config_dict)
4724 LOGGER.info("Creating endpoint-config with name %s", name)
4725 self.sagemaker_client.create_endpoint_config(**config_options)
-> 4727 return self.create_endpoint(
4728 endpoint_name=name, config_name=name, tags=endpoint_tags, wait=wait
4729 )
File ~/anaconda3/envs/pytorch_p310/lib/python3.10/site-packages/sagemaker/session.py:4072, in Session.create_endpoint(self, endpoint_name, config_name, tags, wait)
4068 self.sagemaker_client.create_endpoint(
4069 EndpointName=endpoint_name, EndpointConfigName=config_name, Tags=tags
4070 )
4071 if wait:
-> 4072 self.wait_for_endpoint(endpoint_name)
4073 return endpoint_name
File ~/anaconda3/envs/pytorch_p310/lib/python3.10/site-packages/sagemaker/session.py:4424, in Session.wait_for_endpoint(self, endpoint, poll)
4418 if "CapacityError" in str(reason):
4419 raise exceptions.CapacityError(
4420 message=message,
4421 allowed_statuses=["InService"],
4422 actual_status=status,
4423 )
-> 4424 raise exceptions.UnexpectedStatusException(
4425 message=message,
4426 allowed_statuses=["InService"],
4427 actual_status=status,
4428 )
4429 return desc