reponame
stringclasses
5 values
filepath
stringlengths
15
64
content
stringlengths
63
24.6k
deep-diver/Continuous-Adaptation-for-Machine-Learning-System-to-Data-Changes
custom_components/batch_pred_evaluator.py
""" This component evaluates the performance of a currently deployed model, and the evaluation is based on the result of batch prediction on Vertex AI from the previous component. At the end, this component will output true or false to indicate if retraining is needed. Reference: https://bit.ly/vertex-batch """ from tfx.dsl.component.experimental.annotations import Parameter, OutputArtifact from tfx.dsl.component.experimental.decorators import component from tfx.types.experimental.simple_artifacts import Dataset from absl import logging import os import json @component def PerformanceEvaluator( gcs_destination: Parameter[str], local_directory: Parameter[str], threshold: Parameter[float], trigger_pipeline: OutputArtifact[Dataset], ): """ gcs_destination: GCS location where the files containing the result of batch prediction is local_directory: Temporary directory to hold files copied from the gcs_destination threshold: threshold to decide if retraining is needed or not it is based on the measured accuracy trigger_pipeline: an output artifact which hold true or false to indicate if retraining is needed or not """ full_gcs_results_dir = f"{gcs_destination}/{local_directory}" # Create missing directories. os.makedirs(local_directory, exist_ok=True) # Get the Cloud Storage paths for each result. os.system(f"gsutil -m cp -r {full_gcs_results_dir} {local_directory}") # Get most recently modified directory. latest_directory = max( [os.path.join(local_directory, d) for d in os.listdir(local_directory)], key=os.path.getmtime, ) # Get downloaded results in directory. results_files = [] for dirpath, subdirs, files in os.walk(latest_directory): for file in files: if file.startswith("prediction.results"): results_files.append(os.path.join(dirpath, file)) # Consolidate all the results into a list. results = [] for results_file in results_files: # Download each result. with open(results_file, "r") as file: results.extend([json.loads(line) for line in file.readlines()]) # Calculate performance. num_correct = 0 for result in results: label = os.path.basename(result["instance"]).split("_")[0] prediction = result["prediction"]["label"] if label == prediction: num_correct = num_correct + 1 accuracy = num_correct / len(results) logging.info(f"Accuracy: {accuracy*100}%") # Store the boolean result. trigger_pipeline.set_string_custom_property("result", str(accuracy >= threshold))
deep-diver/Continuous-Adaptation-for-Machine-Learning-System-to-Data-Changes
custom_components/batch_prediction_vertex.py
""" This component launches a Batch Prediction job on Vertex AI. Know more about Vertex AI Batch Predictions jobs, go here: https://cloud.google.com/vertex-ai/docs/predictions/batch-predictions. """ from google.cloud import storage from tfx.dsl.component.experimental.annotations import Parameter, InputArtifact from tfx.dsl.component.experimental.decorators import component from tfx.types.standard_artifacts import String import google.cloud.aiplatform as vertex_ai from absl import logging @component def BatchPredictionGen( gcs_source: InputArtifact[String], project: Parameter[str], location: Parameter[str], model_resource_name: Parameter[str], job_display_name: Parameter[str], gcs_destination: Parameter[str], instances_format: Parameter[str] = "file-list", machine_type: Parameter[str] = "n1-standard-2", accelerator_count: Parameter[int] = 0, accelerator_type: Parameter[str] = None, starting_replica_count: Parameter[int] = 1, max_replica_count: Parameter[int] = 1, ): """ gcs_source: A location inside GCS to be used by the Batch Prediction job to get its inputs. Rest of the parameters are explained here: https://git.io/JiUyU. """ storage_client = storage.Client() # Read GCS Source (gcs_source contains the full path of GCS object). # 1-1. get bucketname from gcs_source gcs_source_uri = gcs_source.uri.split("//")[1:][0].split("/") bucketname = gcs_source_uri[0] bucket = storage_client.get_bucket(bucketname) logging.info(f"bucketname: {bucketname}") # 1-2. get object path without the bucket name. objectpath = "/".join(gcs_source_uri[1:]) # 1-3. read the object to get value set by OutputArtifact from FileListGen. blob = bucket.blob(objectpath) logging.info(f"objectpath: {objectpath}") gcs_source = f"gs://{blob.download_as_text()}" # Get Model. vertex_ai.init(project=project, location=location) model = vertex_ai.Model.list( filter=f"display_name={model_resource_name}", order_by="update_time" )[-1] # Launch a Batch Prediction job. logging.info("Starting batch prediction job.") logging.info(f"GCS path where file list is: {gcs_source}") batch_prediction_job = model.batch_predict( job_display_name=job_display_name, instances_format=instances_format, gcs_source=gcs_source, gcs_destination_prefix=gcs_destination, machine_type=machine_type, accelerator_count=accelerator_count, accelerator_type=accelerator_type, starting_replica_count=starting_replica_count, max_replica_count=max_replica_count, sync=True, ) logging.info(batch_prediction_job.display_name) logging.info(batch_prediction_job.resource_name) logging.info(batch_prediction_job.state)
deep-diver/Continuous-Adaptation-for-Machine-Learning-System-to-Data-Changes
custom_components/file_list_gen.py
""" Generate a txt file formatted required by Vertex AI's Batch Prediction There are few options, and this component generate "file list" formatted txt. (https://cloud.google.com/vertex-ai/docs/predictions/batch-predictions) """ import tensorflow as tf from absl import logging from tfx.dsl.component.experimental.decorators import component from tfx.dsl.component.experimental.annotations import Parameter, OutputArtifact from tfx.types.standard_artifacts import String @component def FileListGen( outpath: OutputArtifact[String], gcs_src_bucket: Parameter[str], gcs_src_prefix: Parameter[str] = "", output_filename: Parameter[str] = "test-images.txt", ): """ : param outpath: OutputArtifact to hold where output_filename will be located This will be used in the downstream component, BatchPredictionGen : param gcs_src_bucket: GCS bucket name where the list of raw data is : param gcs_src_prefix: prefix to be added to gcs_src_bucket : param output_filename: output filename whose content is a list of file paths of raw data """ logging.info("FileListGen started") # 1. get the list of data gcs_src_prefix = ( f"{gcs_src_prefix}/" if len(gcs_src_prefix) != 0 else gcs_src_prefix ) img_paths = tf.io.gfile.glob(f"gs://{gcs_src_bucket}/{gcs_src_prefix}*.jpg") logging.info("Successfully retrieve the file(jpg) list from GCS path") # 2. write the list of data in the expected format in Vertex AI Batch Prediction to a local file with open(output_filename, "w", encoding="utf-8") as f: f.writelines("%s\n" % img_path for img_path in img_paths) logging.info( f"Successfully created the file list file({output_filename}) in local storage" ) # 3. upload the local file to GCS location gcs_dst = f"{gcs_src_bucket}/{gcs_src_prefix}{output_filename}" tf.io.gfile.copy(output_filename, f"gs://{gcs_dst}", overwrite=True) logging.info(f"Successfully uploaded the file list ({gcs_dst})") # 4. store the GCS location where the local file is outpath.value = gcs_dst
deep-diver/Continuous-Adaptation-for-Machine-Learning-System-to-Data-Changes
custom_components/span_preparator.py
""" This component is responsible for separating provided samples into training and validation splits. It then converts them to TFRecords and stores those inside a GCS location. Finally, it returns the latest span id calculated from the current samples in `gcs_source_bucket`. """ from tfx.dsl.component.experimental.decorators import component from tfx.dsl.component.experimental.annotations import Parameter from tfx.dsl.component.experimental.annotations import OutputArtifact, InputArtifact from tfx.types.experimental.simple_artifacts import Dataset from absl import logging from datetime import datetime import tensorflow as tf import random import os # Label-mapping. LABEL_DICT = { "airplane": 0, "automobile": 1, "bird": 2, "cat": 3, "deer": 4, "dog": 5, "frog": 6, "horse": 7, "ship": 8, "truck": 9, } # Images are byte-strings. def _bytestring_feature(list_of_bytestrings): return tf.train.Feature(bytes_list=tf.train.BytesList(value=list_of_bytestrings)) # Classes would be integers. def _int_feature(list_of_ints): return tf.train.Feature(int64_list=tf.train.Int64List(value=list_of_ints)) # Function that prepares a record for the tfrecord file # a record contains the image and its label. def to_tfrecord(img_bytes, label): feature = { "image": _bytestring_feature([img_bytes]), "label": _int_feature([label]), } return tf.train.Example(features=tf.train.Features(feature=feature)) def write_tfrecords(filepaths, dest_gcs, tfrecord_filename, new_span, is_train): # For this project, we are serializing the images in one TFRecord only. # For more realistic purposes, this should be sharded. folder = "train" if is_train else "test" with tf.io.TFRecordWriter(tfrecord_filename) as writer: for path in filepaths: image_string = tf.io.read_file(path).numpy() class_name = path.split("/")[-1].split("_")[0] label = LABEL_DICT[class_name] example = to_tfrecord(image_string, label) writer.write(example.SerializeToString()) # Copy over the zipped TFRecord file to the GCS Bucket and # remove the temporary files. logging.info(f"gsutil cp {tfrecord_filename} {dest_gcs}/span-{new_span}/{folder}/") os.system(f"gsutil cp {tfrecord_filename} {dest_gcs}/span-{new_span}/{folder}/") os.remove(tfrecord_filename) @component def SpanPreparator( is_retrain: InputArtifact[Dataset], gcs_source_bucket: Parameter[str], gcs_destination_bucket: Parameter[str], latest_span_id: OutputArtifact[Dataset], gcs_source_prefix: Parameter[str] = "", ): """ :param is_retrain: Boolean to indicate if we are retraining. :param gcs_source_bucket: GCS location where the entry samples are residing. :param gcs_destination_bucket: GCS location where the converted TFRecords will be serialized. :param latest_span_id: Data span. :param gcs_source_prefix: Location prefix. """ if is_retrain.get_string_custom_property("result") == "False": # Get the latest span and determine the new span. last_span_str = tf.io.gfile.glob(f"{gcs_destination_bucket}/span-*")[-1] last_span = int(last_span_str.split("-")[-1]) new_span = last_span + 1 timestamp = datetime.utcnow().strftime("%y%m%d-%H%M%S") # Get images from the provided GCS source. image_paths = tf.io.gfile.glob(f"gs://{gcs_source_bucket}/*.jpg") logging.info(image_paths) random.shuffle(image_paths) # Create train and validation splits. val_split = 0.2 split_index = int(len(image_paths) * (1 - val_split)) training_paths = image_paths[:split_index] validation_paths = image_paths[split_index:] # Write as TFRecords. write_tfrecords( training_paths, gcs_destination_bucket, tfrecord_filename=f"new_training_data_{timestamp}.tfrecord", new_span=new_span, is_train=True, ) write_tfrecords( validation_paths, gcs_destination_bucket, tfrecord_filename=f"new_validation_data_{timestamp}.tfrecord", new_span=new_span, is_train=False, ) logging.info("Removing images from batch prediction bucket.") os.system( f"gsutil mv gs://{gcs_source_bucket}/{gcs_source_prefix} gs://{gcs_source_bucket}/{gcs_source_prefix}_old" ) latest_span_id.set_string_custom_property("latest_span", str(new_span))
deep-diver/Continuous-Adaptation-for-Machine-Learning-System-to-Data-Changes
custom_components/training_pipeline_trigger.py
""" Component responsible for triggering a training job given a pipeline specification. """ import json from google.cloud import storage from kfp.v2.google.client import AIPlatformClient from tfx.dsl.component.experimental.annotations import Parameter, InputArtifact from tfx.dsl.component.experimental.decorators import component from tfx.types.experimental.simple_artifacts import Dataset from absl import logging @component def PipelineTrigger( is_retrain: InputArtifact[Dataset], latest_span_id: InputArtifact[Dataset], pipeline_spec_path: Parameter[str], project_id: Parameter[str], region: Parameter[str], ): """ :param is_retrain: Boolean to indicate if we are retraining. :param latest_span_id: Latest span id to craft training data for the model. :param pipeline_spec_path: Training pipeline specification path. :param project_id: GCP project id. :param region: GCP region. """ if is_retrain.get_string_custom_property("result") == "False": # Check if the pipeline spec exists. storage_client = storage.Client() path_parts = pipeline_spec_path.replace("gs://", "").split("/") bucket_name = path_parts[0] blob_name = "/".join(path_parts[1:]) bucket = storage_client.bucket(bucket_name) blob = storage.Blob(bucket=bucket, name=blob_name) if not blob.exists(storage_client): raise ValueError(f"{pipeline_spec_path} does not exist.") # Initialize Vertex AI API client and submit for pipeline execution. api_client = AIPlatformClient(project_id=project_id, region=region) # Fetch the latest span. latest_span = latest_span_id.get_string_custom_property("latest_span") # Create a training job from pipeline spec. response = api_client.create_run_from_job_spec( pipeline_spec_path, enable_caching=False, parameter_values={ "input-config": json.dumps( { "splits": [ { "name": "train", "pattern": f"span-[{int(latest_span)-1}{latest_span}]/train/*.tfrecord", }, { "name": "val", "pattern": f"span-[{int(latest_span)-1}{latest_span}]/test/*.tfrecord", }, ] } ), "output-config": json.dumps({}), }, ) logging.info(response)
deep-diver/Continuous-Adaptation-for-Machine-Learning-System-to-Data-Changes
notebooks/01_Dataset_Prep.ipynb
from google.colab import auth auth.authenticate_user()TARGET_ROOT_DIR = "cifar10" TARGET_TRAIN_DIR = TARGET_ROOT_DIR + "/span-1/train" TARGET_TEST_DIR = TARGET_ROOT_DIR + "/span-1/test" !mkdir -p {TARGET_TRAIN_DIR} !mkdir -p {TARGET_TEST_DIR}import tensorflow_datasets as tfds # Generate TFRecords with TFDS builder = tfds.builder("cifar10") builder.download_and_prepare()#@title GCS #@markdown You should change these values as per your preferences. The copy operation can take ~5 minutes. BUCKET_PATH = "gs://cifar10-csp-public2" #@param {type:"string"} REGION = "us-central1" #@param {type:"string"} !gsutil mb -l {REGION} {BUCKET_PATH} !gsutil -m cp -r {TARGET_ROOT_DIR}/* {BUCKET_PATH}from tfx import v1 as tfx from tfx.components.example_gen import utilsfrom tfx.proto import example_gen_pb2 _DATA_PATH = "gs://cifar10-csp-public" splits = [ example_gen_pb2.Input.Split(name="train", pattern="span-{SPAN}/train/*"), example_gen_pb2.Input.Split(name="val", pattern="span-{SPAN}/test/*"), ] _, span, version = utils.calculate_splits_fingerprint_span_and_version( _DATA_PATH, splits )span, version
deep-diver/Continuous-Adaptation-for-Machine-Learning-System-to-Data-Changes
notebooks/02_TFX_Training_Pipeline.ipynb
from google.colab import auth auth.authenticate_user()import tensorflow as tf print("TensorFlow version: {}".format(tf.__version__)) from tfx import v1 as tfx print("TFX version: {}".format(tfx.__version__)) import kfp print("KFP version: {}".format(kfp.__version__)) from google.cloud import aiplatform as vertex_ai import osGOOGLE_CLOUD_PROJECT = "gcp-ml-172005" # @param {type:"string"} GOOGLE_CLOUD_REGION = "us-central1" # @param {type:"string"} GCS_BUCKET_NAME = "cifar10-experimental-csp2" # @param {type:"string"} DATA_ROOT = "gs://cifar10-csp-public2" # @param {type:"string"} if not (GOOGLE_CLOUD_PROJECT and GOOGLE_CLOUD_REGION and GCS_BUCKET_NAME): from absl import logging logging.error("Please set all required parameters.")PIPELINE_NAME = "continuous-adaptation-for-data-changes" # Path to various pipeline artifact. PIPELINE_ROOT = "gs://{}/pipeline_root/{}".format(GCS_BUCKET_NAME, PIPELINE_NAME) # Paths for users' Python module. MODULE_ROOT = "gs://{}/pipeline_module/{}".format(GCS_BUCKET_NAME, PIPELINE_NAME) # This is the path where your model will be pushed for serving. SERVING_MODEL_DIR = "gs://{}/serving_model/{}".format(GCS_BUCKET_NAME, PIPELINE_NAME) print("PIPELINE_ROOT: {}".format(PIPELINE_ROOT))_trainer_module_file = 'trainer.py'%%writefile {_trainer_module_file} from typing import List from absl import logging from tensorflow import keras from tfx import v1 as tfx import tensorflow as tf _IMAGE_FEATURES = { "image": tf.io.FixedLenFeature([], tf.string), "label": tf.io.FixedLenFeature([], tf.int64), } _CONCRETE_INPUT = "numpy_inputs" _TRAIN_BATCH_SIZE = 64 _EVAL_BATCH_SIZE = 64 _INPUT_SHAPE = (32, 32, 3) _EPOCHS = 2 def _parse_fn(example): example = tf.io.parse_single_example(example, _IMAGE_FEATURES) image = tf.image.decode_jpeg(example["image"], channels=3) class_label = tf.cast(example["label"], tf.int32) return image, class_label def _input_fn(file_pattern: List[str], batch_size: int) -> tf.data.Dataset: print(f"Reading data from: {file_pattern}") tfrecord_filenames = tf.io.gfile.glob(file_pattern[0] + ".gz") print(tfrecord_filenames) dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP") dataset = dataset.map(_parse_fn).batch(batch_size) return dataset.repeat() def _make_keras_model() -> tf.keras.Model: """Creates a ResNet50-based model for classifying flowers data. Returns: A Keras Model. """ inputs = keras.Input(shape=_INPUT_SHAPE) base_model = keras.applications.ResNet50( include_top=False, input_shape=_INPUT_SHAPE, pooling="avg" ) base_model.trainable = False x = tf.keras.applications.resnet.preprocess_input(inputs) x = base_model( x, training=False ) # Ensures BatchNorm runs in inference model in this model outputs = keras.layers.Dense(10, activation="softmax")(x) model = keras.Model(inputs, outputs) model.compile( optimizer=keras.optimizers.Adam(), loss=tf.keras.losses.SparseCategoricalCrossentropy(), metrics=[keras.metrics.SparseCategoricalAccuracy()], ) model.summary(print_fn=logging.info) return model def _preprocess(bytes_input): decoded = tf.io.decode_jpeg(bytes_input, channels=3) resized = tf.image.resize(decoded, size=(32, 32)) return resized @tf.function(input_signature=[tf.TensorSpec([None], tf.string)]) def preprocess_fn(bytes_inputs): decoded_images = tf.map_fn( _preprocess, bytes_inputs, dtype=tf.float32, back_prop=False ) return {_CONCRETE_INPUT: decoded_images} def _model_exporter(model: tf.keras.Model): m_call = tf.function(model.call).get_concrete_function( [tf.TensorSpec(shape=[None, 32, 32, 3], dtype=tf.float32, name=_CONCRETE_INPUT)] ) @tf.function(input_signature=[tf.TensorSpec([None], tf.string)]) def serving_fn(bytes_inputs): # This function comes from the Computer Vision book from O'Reilly. labels = tf.constant( [ "airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck", ], dtype=tf.string, ) images = preprocess_fn(bytes_inputs) probs = m_call(**images) indices = tf.argmax(probs, axis=1) pred_source = tf.gather(params=labels, indices=indices) pred_confidence = tf.reduce_max(probs, axis=1) return {"label": pred_source, "confidence": pred_confidence} return serving_fn def run_fn(fn_args: tfx.components.FnArgs): print(fn_args) train_dataset = _input_fn(fn_args.train_files, batch_size=_TRAIN_BATCH_SIZE) eval_dataset = _input_fn(fn_args.eval_files, batch_size=_EVAL_BATCH_SIZE) model = _make_keras_model() model.fit( train_dataset, steps_per_epoch=fn_args.train_steps, validation_data=eval_dataset, validation_steps=fn_args.eval_steps, epochs=_EPOCHS, ) _, acc = model.evaluate(eval_dataset, steps=fn_args.eval_steps) logging.info(f"Validation accuracy: {round(acc * 100, 2)}%") # The result of the training should be saved in `fn_args.serving_model_dir` # directory. tf.saved_model.save( model, fn_args.serving_model_dir, signatures={"serving_default": _model_exporter(model)}, )os.path.join(MODULE_ROOT, _trainer_module_file)_vertex_uploader_module_file = "vertex_uploader.py" _vertex_deployer_module_file = "vertex_deployer.py"%%writefile {_vertex_uploader_module_file} import os import tensorflow as tf from tfx.dsl.component.experimental.decorators import component from tfx.dsl.component.experimental.annotations import Parameter from tfx.types.standard_artifacts import String from google.cloud import aiplatform as vertex_ai from tfx import v1 as tfx from absl import logging @component def VertexUploader( project: Parameter[str], region: Parameter[str], model_display_name: Parameter[str], pushed_model_location: Parameter[str], serving_image_uri: Parameter[str], uploaded_model: tfx.dsl.components.OutputArtifact[String], ): vertex_ai.init(project=project, location=region) pushed_model_dir = os.path.join( pushed_model_location, tf.io.gfile.listdir(pushed_model_location)[-1] ) logging.info(f"Model registry location: {pushed_model_dir}") vertex_model = vertex_ai.Model.upload( display_name=model_display_name, artifact_uri=pushed_model_dir, serving_container_image_uri=serving_image_uri, parameters_schema_uri=None, instance_schema_uri=None, explanation_metadata=None, explanation_parameters=None, ) uploaded_model.set_string_custom_property( "model_resource_name", str(vertex_model.resource_name) ) logging.info(f"Model resource: {str(vertex_model.resource_name)}")%%writefile {_vertex_deployer_module_file} from tfx.dsl.component.experimental.decorators import component from tfx.dsl.component.experimental.annotations import Parameter from tfx.types.standard_artifacts import String from google.cloud import aiplatform as vertex_ai from tfx import v1 as tfx from absl import logging @component def VertexDeployer( project: Parameter[str], region: Parameter[str], model_display_name: Parameter[str], deployed_model_display_name: Parameter[str], ): logging.info(f"Endpoint display: {deployed_model_display_name}") vertex_ai.init(project=project, location=region) endpoints = vertex_ai.Endpoint.list( filter=f"display_name={deployed_model_display_name}", order_by="update_time" ) if len(endpoints) > 0: logging.info(f"Endpoint {deployed_model_display_name} already exists.") endpoint = endpoints[-1] else: endpoint = vertex_ai.Endpoint.create(deployed_model_display_name) model = vertex_ai.Model.list( filter=f"display_name={model_display_name}", order_by="update_time" )[-1] endpoint = vertex_ai.Endpoint.list( filter=f"display_name={deployed_model_display_name}", order_by="update_time" )[-1] deployed_model = endpoint.deploy( model=model, # Syntax from here: https://git.io/JBQDP traffic_split={"0": 100}, machine_type="n1-standard-4", min_replica_count=1, max_replica_count=1, ) logging.info(f"Model deployed to: {deployed_model}")DATASET_DISPLAY_NAME = "cifar10" VERSION = "tfx-1-2-0" TFX_IMAGE_URI = f"gcr.io/{GOOGLE_CLOUD_PROJECT}/{DATASET_DISPLAY_NAME}:{VERSION}" print(f"URI of the custom image: {TFX_IMAGE_URI}")%%writefile Dockerfile FROM gcr.io/tfx-oss-public/tfx:1.2.0 RUN mkdir -p custom_components COPY custom_components/* ./custom_components/ RUN pip install --upgrade google-cloud-aiplatform# Specify training worker configurations. To minimize costs we can even specify two # different configurations: a beefier machine for the Endpoint model and slightly less # powerful machine for the mobile model. TRAINING_JOB_SPEC = { "project": GOOGLE_CLOUD_PROJECT, "worker_pool_specs": [ { "machine_spec": { "machine_type": "n1-standard-4", "accelerator_type": "NVIDIA_TESLA_K80", "accelerator_count": 1, }, "replica_count": 1, "container_spec": { "image_uri": "gcr.io/tfx-oss-public/tfx:{}".format(tfx.__version__), }, } ], }SERVING_JOB_SPEC = { "endpoint_name": PIPELINE_NAME.replace("-", "_"), # '-' is not allowed. "project_id": GOOGLE_CLOUD_PROJECT, "min_replica_count": 1, "max_replica_count": 1, "machine_type": "n1-standard-2", }from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")import tfxfrom tfx.orchestration import data_types from tfx import v1 as tfx from tfx.proto import example_gen_pb2, range_config_pb2 from tfx.components.example_gen import utils from custom_components.vertex_uploader import VertexUploader from custom_components.vertex_deployer import VertexDeployer def _create_pipeline( input_config: data_types.RuntimeParameter, output_config: data_types.RuntimeParameter, pipeline_name: str, pipeline_root: str, data_root: str, serving_model_dir: str, trainer_module: str, project_id: str, region: str, ) -> tfx.dsl.Pipeline: """Creates a three component flowers pipeline with TFX.""" example_gen = tfx.components.ImportExampleGen( input_base=data_root, input_config=input_config, output_config=output_config ) # Trainer trainer = tfx.extensions.google_cloud_ai_platform.Trainer( module_file=trainer_module, examples=example_gen.outputs["examples"], train_args=tfx.proto.TrainArgs(splits=["train"], num_steps=50000 // 64), eval_args=tfx.proto.EvalArgs(splits=["val"], num_steps=10000 // 64), custom_config={ tfx.extensions.google_cloud_ai_platform.ENABLE_VERTEX_KEY: True, tfx.extensions.google_cloud_ai_platform.VERTEX_REGION_KEY: region, tfx.extensions.google_cloud_ai_platform.TRAINING_ARGS_KEY: TRAINING_JOB_SPEC, "use_gpu": True, }, ).with_id("trainer") # Pushes the model to a filesystem destination. pushed_model_location = os.path.join(serving_model_dir, "resnet50") resnet_pusher = tfx.components.Pusher( model=trainer.outputs["model"], push_destination=tfx.proto.PushDestination( filesystem=tfx.proto.PushDestination.Filesystem( base_directory=pushed_model_location ) ), ).with_id("resnet_pusher") # Vertex AI upload. model_display_name = "resnet_cifar_latest" uploader = VertexUploader( project=project_id, region=region, model_display_name=model_display_name, pushed_model_location=pushed_model_location, serving_image_uri="us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-5:latest", ).with_id("vertex_uploader") uploader.add_upstream_node(resnet_pusher) # Create an endpoint. deployer = VertexDeployer( project=project_id, region=region, model_display_name=model_display_name, deployed_model_display_name=model_display_name + "_" + TIMESTAMP, ).with_id("vertex_deployer") deployer.add_upstream_node(uploader) components = [ example_gen, trainer, resnet_pusher, uploader, deployer, ] return tfx.dsl.Pipeline( pipeline_name=pipeline_name, pipeline_root=pipeline_root, components=components, enable_cache=True, )import os PIPELINE_DEFINITION_FILE = PIPELINE_NAME + "_pipeline.json" # Important: We need to pass the custom Docker image URI to the # `KubeflowV2DagRunnerConfig` to take effect. runner = tfx.orchestration.experimental.KubeflowV2DagRunner( config=tfx.orchestration.experimental.KubeflowV2DagRunnerConfig( default_image=TFX_IMAGE_URI ), output_filename=PIPELINE_DEFINITION_FILE, ) _ = runner.run( _create_pipeline( input_config=tfx.dsl.experimental.RuntimeParameter( name="input-config", default='{"input_config": {"splits": [{"name":"train", "pattern":"span-1/train/tfrecord"}, {"name":"val", "pattern":"span-1/test/tfrecord"}]}}', ptype=str, ), output_config=tfx.dsl.experimental.RuntimeParameter( name="output-config", default="{}", ptype=str, ), pipeline_name=PIPELINE_NAME, pipeline_root=PIPELINE_ROOT, data_root=DATA_ROOT, serving_model_dir=SERVING_MODEL_DIR, trainer_module=os.path.join(MODULE_ROOT, _trainer_module_file), project_id=GOOGLE_CLOUD_PROJECT, region=GOOGLE_CLOUD_REGION, ) )from kfp.v2.google import client pipelines_client = client.AIPlatformClient( project_id=GOOGLE_CLOUD_PROJECT, region=GOOGLE_CLOUD_REGION, )import json from tfx.orchestration import data_types _ = pipelines_client.create_run_from_job_spec( PIPELINE_DEFINITION_FILE, enable_caching=False, parameter_values={ "input-config": json.dumps( { "splits": [ {"name": "train", "pattern": "span-[12]/train/*.tfrecord"}, {"name": "val", "pattern": "span-[12]/test/*.tfrecord"}, ] } ), "output-config": json.dumps({}), }, )
deep-diver/Continuous-Adaptation-for-Machine-Learning-System-to-Data-Changes
notebooks/03_Batch_Prediction_Pipeline.ipynb
from google.colab import auth auth.authenticate_user()# @title from fastdot.core import * tfx_components = [ "FileListGen", "BatchPredictionGen", "PerformanceEvaluator", "SpanPreparator", "PipelineTrigger", ] block = "TFX Component Workflow" g = graph_items(seq_cluster(tfx_components, block)) g_file_list_gen_module_file = "file_list_gen.py"%%writefile {_file_list_gen_module_file} import tfx from tfx.dsl.component.experimental.decorators import component from tfx.dsl.component.experimental.annotations import Parameter from tfx.dsl.component.experimental.annotations import OutputArtifact from tfx.types.standard_artifacts import String from google.cloud import storage from absl import logging @component def FileListGen( outpath: OutputArtifact[String], project: Parameter[str], gcs_source_bucket: Parameter[str], gcs_source_prefix: Parameter[str] = "", output_filename: Parameter[str] = "test-images.txt", ): logging.info("FileListGen started") client = storage.Client(project=project) bucket = client.get_bucket(gcs_source_bucket) blobs = bucket.list_blobs(prefix=gcs_source_prefix) logging.info("Successfully retrieve the file(jpg) list from GCS path") f = open(output_filename, "w") for blob in blobs: if blob.name.split(".")[-1] == "jpg": prefix = "" if gcs_source_prefix != "": prefix = f"/{gcs_source_prefix}" line = f"gs://{gcs_source_bucket}{prefix}/{blob.name}\n" f.write(line) f.close() logging.info( f"Successfully created the file list file({output_filename}) in local storage" ) prefix = "" if gcs_source_prefix != "": prefix = f"{gcs_source_prefix}/" blob = bucket.blob(f"{prefix}{output_filename}") blob.upload_from_filename(output_filename) logging.info(f"Successfully uploaded the file list ({prefix}{output_filename})") outpath.value = gcs_source_bucket + "/" + prefix + output_filename_batch_pred_module_file = 'batch_prediction_vertex.py'%%writefile {_batch_pred_module_file} from google.cloud import storage from tfx.dsl.component.experimental.annotations import Parameter, InputArtifact from tfx.dsl.component.experimental.decorators import component from tfx.types import artifact_utils from tfx.types.standard_artifacts import String import google.cloud.aiplatform as vertex_ai from typing import Union, Sequence from absl import logging @component def BatchPredictionGen( gcs_source: InputArtifact[String], project: Parameter[str], location: Parameter[str], model_resource_name: Parameter[str], job_display_name: Parameter[str], gcs_destination: Parameter[str], instances_format: Parameter[str] = "file-list", machine_type: Parameter[str] = "n1-standard-2", accelerator_count: Parameter[int] = 0, accelerator_type: Parameter[str] = None, starting_replica_count: Parameter[int] = 1, max_replica_count: Parameter[int] = 1, ): storage_client = storage.Client() # Read GCS Source (gcs_source contains the full path of GCS object) # 1-1. get bucketname from gcs_source gcs_source_uri = gcs_source.uri.split("//")[1:][0].split("/") bucketname = gcs_source_uri[0] bucket = storage_client.get_bucket(bucketname) logging.info(f"bucketname: {bucketname}") # 1-2. get object path without the bucketname objectpath = "/".join(gcs_source_uri[1:]) # 1-3. read the object to get value set by OutputArtifact from FileListGen blob = bucket.blob(objectpath) logging.info(f"objectpath: {objectpath}") gcs_source = f"gs://{blob.download_as_text()}" # Get Model vertex_ai.init(project=project, location=location) model = vertex_ai.Model.list( filter=f"display_name={model_resource_name}", order_by="update_time" )[-1] # Batch Predictions logging.info("Starting batch prediction job.") logging.info(f"GCS path where file list is: {gcs_source}") batch_prediction_job = model.batch_predict( job_display_name=job_display_name, instances_format=instances_format, gcs_source=gcs_source, gcs_destination_prefix=gcs_destination, machine_type=machine_type, accelerator_count=accelerator_count, accelerator_type=accelerator_type, starting_replica_count=starting_replica_count, max_replica_count=max_replica_count, sync=True, ) logging.info(batch_prediction_job.display_name) logging.info(batch_prediction_job.resource_name) logging.info(batch_prediction_job.state)_evaluator_module_file = 'batch_pred_evaluator.py'%%writefile {_evaluator_module_file} # Reference: https://bit.ly/vertex-batch from tfx.dsl.component.experimental.annotations import Parameter from tfx.dsl.component.experimental.annotations import OutputArtifact from tfx.dsl.component.experimental.decorators import component from tfx.types.experimental.simple_artifacts import Dataset from absl import logging import os import json @component def PerformanceEvaluator( gcs_destination: Parameter[str], local_directory: Parameter[str], threshold: Parameter[float], trigger_pipeline: OutputArtifact[Dataset], ): full_gcs_results_dir = f"{gcs_destination}/{local_directory}" # Create missing directories. os.makedirs(local_directory, exist_ok=True) # Get the Cloud Storage paths for each result. os.system(f"gsutil -m cp -r {full_gcs_results_dir} {local_directory}") # Get most recently modified directory. latest_directory = max( [os.path.join(local_directory, d) for d in os.listdir(local_directory)], key=os.path.getmtime, ) # Get downloaded results in directory. results_files = [] for dirpath, subdirs, files in os.walk(latest_directory): for file in files: if file.startswith("prediction.results"): results_files.append(os.path.join(dirpath, file)) # Consolidate all the results into a list. results = [] for results_file in results_files: # Download each result. with open(results_file, "r") as file: results.extend([json.loads(line) for line in file.readlines()]) # Calculate performance. num_correct = 0 for result in results: label = os.path.basename(result["instance"]).split("_")[0] prediction = result["prediction"]["label"] if label == prediction: num_correct = num_correct + 1 accuracy = num_correct / len(results) logging.info(f"Accuracy: {accuracy*100}%") trigger_pipeline.set_string_custom_property("result", str(accuracy >= threshold))_span_preparator_module_file = 'span_preparator.py'%%writefile {_span_preparator_module_file} import tfx from tfx.dsl.component.experimental.decorators import component from tfx.dsl.component.experimental.annotations import Parameter from tfx.dsl.component.experimental.annotations import OutputArtifact, InputArtifact from tfx.types.experimental.simple_artifacts import Dataset from google.cloud import storage from absl import logging from datetime import datetime import tensorflow as tf import random import gzip import os # Label-mapping. LABEL_DICT = { "airplane": 0, "automobile": 1, "bird": 2, "cat": 3, "deer": 4, "dog": 5, "frog": 6, "horse": 7, "ship": 8, "truck": 9, } # Images are byte-strings. def _bytestring_feature(list_of_bytestrings): return tf.train.Feature(bytes_list=tf.train.BytesList(value=list_of_bytestrings)) # Classes would be integers. def _int_feature(list_of_ints): return tf.train.Feature(int64_list=tf.train.Int64List(value=list_of_ints)) # Function that prepares a record for the tfrecord file # a record contains the image and its label. def to_tfrecord(img_bytes, label): feature = { "image": _bytestring_feature([img_bytes]), "label": _int_feature([label]), } return tf.train.Example(features=tf.train.Features(feature=feature)) def write_tfrecords(filepaths, dest_gcs, tfrecord_filename, new_span, is_train): # For this project, we are serializing the images in one TFRecord only. # For more realistic purposes, this should be sharded. folder = "train" if is_train else "test" with tf.io.TFRecordWriter(tfrecord_filename) as writer: for path in filepaths: image_string = tf.io.read_file(path).numpy() class_name = path.split("/")[-1].split("_")[0] label = LABEL_DICT[class_name] example = to_tfrecord(image_string, label) writer.write(example.SerializeToString()) # Copy over the zipped TFRecord file to the GCS Bucket and # remove the temporary files. logging.info(f"gsutil cp {tfrecord_filename} {dest_gcs}/span-{new_span}/{folder}/") os.system(f"gsutil cp {tfrecord_filename} {dest_gcs}/span-{new_span}/{folder}/") os.remove(tfrecord_filename) @component def SpanPreparator( is_retrain: InputArtifact[Dataset], gcs_source_bucket: Parameter[str], gcs_destination_bucket: Parameter[str], latest_span_id: OutputArtifact[Dataset], gcs_source_prefix: Parameter[str] = "", ): if is_retrain.get_string_custom_property("result") == "False": last_span_str = tf.io.gfile.glob(f"{gcs_destination_bucket}/span-*")[-1] last_span = int(last_span_str.split("-")[-1]) new_span = last_span + 1 timestamp = datetime.utcnow().strftime("%y%m%d-%H%M%S") image_paths = tf.io.gfile.glob(f"gs://{gcs_source_bucket}/*.jpg") logging.info(image_paths) random.shuffle(image_paths) val_split = 0.2 split_index = int(len(image_paths) * (1 - val_split)) training_paths = image_paths[:split_index] validation_paths = image_paths[split_index:] write_tfrecords( training_paths, gcs_destination_bucket, tfrecord_filename=f"new_training_data_{timestamp}.tfrecord", new_span=new_span, is_train=True, ) write_tfrecords( validation_paths, gcs_destination_bucket, tfrecord_filename=f"new_validation_data_{timestamp}.tfrecord", new_span=new_span, is_train=False, ) logging.info("Removing images from batch prediction bucket.") os.system( f"gsutil mv gs://{gcs_source_bucket}/{gcs_source_prefix} gs://{gcs_source_bucket}/{gcs_source_prefix}_old" ) # os.system(f"gsutil rm -rf gs://{gcs_source_bucket}/*") latest_span_id.set_string_custom_property("latest_span", str(new_span))_pipeline_trigger_module_file = 'training_pipeline_trigger.py'%%writefile {_pipeline_trigger_module_file} import json from google.cloud import storage from kfp.v2.google.client import AIPlatformClient from tfx.dsl.component.experimental.annotations import Parameter, InputArtifact from tfx.dsl.component.experimental.decorators import component from tfx.types.experimental.simple_artifacts import Dataset from absl import logging @component def PipelineTrigger( is_retrain: InputArtifact[Dataset], latest_span_id: InputArtifact[Dataset], pipeline_spec_path: Parameter[str], project_id: Parameter[str], region: Parameter[str], ): if is_retrain.get_string_custom_property('result') == 'False': # Check if the pipeline spec exists. storage_client = storage.Client() path_parts = pipeline_spec_path.replace("gs://", "").split("/") bucket_name = path_parts[0] blob_name = "/".join(path_parts[1:]) bucket = storage_client.bucket(bucket_name) blob = storage.Blob(bucket=bucket, name=blob_name) if not blob.exists(storage_client): raise ValueError(f"{pipeline_spec_path} does not exist.") # Initialize Vertex AI API client and submit for pipeline execution. api_client = AIPlatformClient(project_id=project_id, region=region) # Fetch the latest span. latest_span = latest_span_id.get_string_custom_property('latest_span') # Create a training job from pipeline spec. response = api_client.create_run_from_job_spec(pipeline_spec_path, enable_caching=False, parameter_values={ 'input-config': json.dumps({ 'splits': [ {'name': 'train', 'pattern': f'span-[{int(latest_span)-1}{latest_span}]/train/*.tfrecord'}, {'name': 'val', 'pattern': f'span-[{int(latest_span)-1}{latest_span}]/test/*.tfrecord'} ] }), 'output-config': json.dumps({}) }) logging.info(response)# This bucket will be responsible for storing the pipeline related artifacts. GOOGLE_CLOUD_PROJECT = "gcp-ml-172005" # @param {type:"string"} GOOGLE_CLOUD_REGION = "us-central1" GCS_BUCKET_NAME = "cifar10-experimental-csp2" # @param {type:"string"} MODEL_RESOURCE_NAME = "resnet_cifar_latest" # @param {type: "string"} TEST_FILENAME = "test-images.txt" # @param {type:"string"} TEST_GCS_BUCKET = "batch-prediction-collection-3" # @param {type:"string"} TEST_GCS_PREFIX = "" # @param {type: "string"} TRAINING_PIPELINE_SPEC = "gs://cifar10-experimental-csp2/pipeline_root/continuous-adaptation-for-data-changes/continuous-adaptation-for-data-changes_pipeline.json" # @param {type: "string"} TRAINING_DATA_PATH = "gs://cifar10-csp-public2" # @param {type: "string"} if not (GOOGLE_CLOUD_PROJECT and GOOGLE_CLOUD_REGION and GCS_BUCKET_NAME): from absl import logging logging.error("Please set all required parameters.")PIPELINE_NAME = 'continuous-adaptation-for-data-changes-batch' # Path to various pipeline artifact. PIPELINE_ROOT = 'gs://{}/pipeline_root/{}'.format( GCS_BUCKET_NAME, PIPELINE_NAME) print('PIPELINE_ROOT: {}'.format(PIPELINE_ROOT))DISPLAY_NAME = "batch-predictions-pipeline" VERSION = "tfx-1-2-0-34" TFX_IMAGE_URI = f"gcr.io/{GOOGLE_CLOUD_PROJECT}/{DISPLAY_NAME}:{VERSION}" print(f"URI of the custom image: {TFX_IMAGE_URI}")%%writefile Dockerfile FROM gcr.io/tfx-oss-public/tfx:1.2.0 RUN mkdir -p custom_components COPY custom_components/* ./custom_components/ RUN pip install --upgrade google-cloud-aiplatform google-cloud-storage kfp==1.6.1from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")from tfx.orchestration import data_types from tfx import v1 as tfx from tfx.orchestration.pipeline import Pipeline from custom_components.file_list_gen import FileListGen from custom_components.batch_prediction_vertex import BatchPredictionGen from custom_components.batch_pred_evaluator import PerformanceEvaluator from custom_components.span_preparator import SpanPreparator from custom_components.training_pipeline_trigger import PipelineTrigger def _create_pipeline( pipeline_name: str, pipeline_root: str, data_gcs_bucket: str, data_gcs_prefix: data_types.RuntimeParameter, batch_job_gcs: str, job_display_name: str, model_resource_name: str, project_id: str, region: str, threshold: float, data_gcs_destination: str, training_pipeline_spec: str, ) -> Pipeline: # Generate a file list for batch preditions. # More details on the structure of this file here: # https://bit.ly/3BzfHVu. filelist_gen = FileListGen( project=project_id, gcs_source_bucket=data_gcs_bucket, gcs_source_prefix=data_gcs_prefix, ).with_id("filelist_gen") # Submit a batch prediction job. batch_pred_component = BatchPredictionGen( project=project_id, location=region, job_display_name=job_display_name, model_resource_name=model_resource_name, gcs_source=filelist_gen.outputs["outpath"], gcs_destination=f"gs://{batch_job_gcs}/results/", accelerator_count=0, accelerator_type=None, ).with_id("bulk_inferer_vertex") batch_pred_component.add_upstream_node(filelist_gen) # Evaluate the performance of the predictions. # In a real-world project, this evaluation takes place # separately, typically with the help of domain experts. final_gcs_destination = f"gs://{batch_job_gcs}/results/" evaluator = PerformanceEvaluator( gcs_destination=f'gs://{final_gcs_destination.split("/")[2]}', local_directory=final_gcs_destination.split("/")[-2], threshold=threshold, ).with_id("batch_prediction_evaluator") evaluator.add_upstream_node(batch_pred_component) span_preparator = SpanPreparator( is_retrain=evaluator.outputs["trigger_pipeline"], gcs_source_bucket=data_gcs_bucket, gcs_source_prefix=data_gcs_prefix, gcs_destination_bucket=data_gcs_destination, ).with_id("span_preparator") span_preparator.add_upstream_node(evaluator) trigger = PipelineTrigger( is_retrain=evaluator.outputs["trigger_pipeline"], latest_span_id=span_preparator.outputs["latest_span_id"], pipeline_spec_path=training_pipeline_spec, project_id=project_id, region=region, ).with_id("training_pipeline_trigger") trigger.add_upstream_node(span_preparator) components = [ filelist_gen, batch_pred_component, evaluator, span_preparator, trigger, ] return Pipeline( pipeline_name=pipeline_name, pipeline_root=pipeline_root, components=components, enable_cache=True, )import os import tfx from tfx.orchestration import data_types from tfx.orchestration.kubeflow.v2.kubeflow_v2_dag_runner import KubeflowV2DagRunner from tfx.orchestration.kubeflow.v2.kubeflow_v2_dag_runner import ( KubeflowV2DagRunnerConfig, ) PIPELINE_DEFINITION_FILE = PIPELINE_NAME + "_pipeline.json" THRESHOLD = 0.9 # Important: We need to pass the custom Docker image URI to the # `KubeflowV2DagRunnerConfig` to take effect. runner = KubeflowV2DagRunner( config=KubeflowV2DagRunnerConfig(default_image=TFX_IMAGE_URI), output_filename=PIPELINE_DEFINITION_FILE, ) _ = runner.run( _create_pipeline( pipeline_name=PIPELINE_NAME, pipeline_root=PIPELINE_ROOT, data_gcs_bucket=TEST_GCS_BUCKET, data_gcs_prefix=data_types.RuntimeParameter( name="data_gcs_prefix", default="", ptype=str ), batch_job_gcs=GCS_BUCKET_NAME, job_display_name=f"{MODEL_RESOURCE_NAME}_{TIMESTAMP}", project_id=GOOGLE_CLOUD_PROJECT, region=GOOGLE_CLOUD_REGION, model_resource_name=MODEL_RESOURCE_NAME, threshold=THRESHOLD, data_gcs_destination=TRAINING_DATA_PATH, training_pipeline_spec=TRAINING_PIPELINE_SPEC, ) )from kfp.v2.google import client pipelines_client = client.AIPlatformClient( project_id=GOOGLE_CLOUD_PROJECT, region=GOOGLE_CLOUD_REGION, ) _ = pipelines_client.create_run_from_job_spec( PIPELINE_DEFINITION_FILE, enable_caching=False, parameter_values={"data_gcs_prefix": "2021-10"}, )
deep-diver/Continuous-Adaptation-for-Machine-Learning-System-to-Data-Changes
notebooks/04_Cloud_Scheduler_Trigger.ipynb
from google.colab import auth auth.authenticate_user()GOOGLE_CLOUD_PROJECT = "gcp-ml-172005" # @param {type:"string"} GOOGLE_CLOUD_REGION = "us-central1" GCS_BUCKET_NAME = "cifar10-experimental-csp2" # @param {type:"string"} PIPELINE_NAME = "continuous-adaptation-for-data-changes-batch" # @param {type:"string"} PIPELINE_ROOT = "gs://{}/pipeline_root/{}".format(GCS_BUCKET_NAME, PIPELINE_NAME) PIPELINE_LOCATION = f"{PIPELINE_ROOT}/{PIPELINE_NAME}_pipeline.json" PUBSUB_TOPIC = f"trigger-{PIPELINE_NAME}" SCHEDULER_JOB_NAME = f"scheduler-job-{PUBSUB_TOPIC}" IMAGE_LOCATION_BUCKET = "batch-prediction-collection-3" # @param {type:"string"}IMAGE_LOCATION_BUCKET_cloud_function_dep = "cloud_function/requirements.txt"%%writefile {_cloud_function_dep} kfp==1.6.2 google-cloud-aiplatform google-cloud-storage_cloud_function_file = "cloud_function/main.py"%%writefile {_cloud_function_file} import os import re import json import logging import base64 from datetime import datetime from kfp.v2.google.client import AIPlatformClient from google.cloud import storage def get_number_of_images(storage_client, bucket, latest_directory): blobs = storage_client.list_blobs(bucket, prefix=latest_directory) count = 0 for blob in blobs: if blob.name.split(".")[-1] == "jpg": count = count + 1 return count def is_there_enough_images(storage_client, bucket, latest_directory, threshold): number_of_images = get_number_of_images(storage_client, bucket, latest_directory) print(f"number of images = {number_of_images}") return number_of_images >= threshold def get_latest_directory(storage_client, bucket): blobs = storage_client.list_blobs(bucket) folders = list( set( [ os.path.dirname(blob.name) for blob in blobs if bool( re.match( "[1-9][0-9][0-9][0-9]-[0-1][0-9]", os.path.dirname(blob.name) ) ) is True ] ) ) folders.sort(key=lambda date: datetime.strptime(date, "%Y-%m")) print(folders[0]) return folders[0] def trigger_pipeline(event, context): # Parse the environment variables. project = os.getenv("PROJECT") region = os.getenv("REGION") gcs_pipeline_file_location = os.getenv("GCS_PIPELINE_FILE_LOCATION") gcs_image_bucket = os.getenv("GCS_IMAGE_BUCKET") print(project) print(region) print(gcs_pipeline_file_location) print(gcs_image_bucket) threshold = 100 # Check if the pipeline file exists in the provided GCS Bucket. storage_client = storage.Client() latest_directory = get_latest_directory(storage_client, gcs_image_bucket) if is_there_enough_images( storage_client, gcs_image_bucket, latest_directory, threshold ): path_parts = gcs_pipeline_file_location.replace("gs://", "").split("/") pipeline_bucket = path_parts[0] pipeline_blob = "/".join(path_parts[1:]) pipeline_bucket = storage_client.bucket(pipeline_bucket) blob = storage.Blob(bucket=pipeline_bucket, name=pipeline_blob) if not blob.exists(storage_client): raise ValueError(f"{gcs_pipeline_file_location} does not exist.") # Initialize Vertex AI API client and submit for pipeline execution. api_client = AIPlatformClient(project_id=project, region=region) response = api_client.create_run_from_job_spec( job_spec_path=gcs_pipeline_file_location, parameter_values={"data_gcs_prefix": latest_directory}, enable_caching=True, ) logging.info(response)ENV_VARS=f"""\ PROJECT={GOOGLE_CLOUD_PROJECT},\ REGION={GOOGLE_CLOUD_REGION},\ GCS_PIPELINE_FILE_LOCATION={PIPELINE_LOCATION},\ GCS_IMAGE_BUCKET={IMAGE_LOCATION_BUCKET} """ !echo {ENV_VARS}BUCKET = f'gs://{GCS_BUCKET_NAME}' CLOUD_FUNCTION_NAME = f'trigger-{PIPELINE_NAME}-fn' !gcloud functions deploy {CLOUD_FUNCTION_NAME} \ --region={GOOGLE_CLOUD_REGION} \ --trigger-topic={PUBSUB_TOPIC} \ --runtime=python37 \ --source=cloud_function\ --entry-point=trigger_pipeline\ --stage-bucket={BUCKET}\ --update-env-vars={ENV_VARS}import IPython cloud_fn_url = f"https://console.cloud.google.com/functions/details/{GOOGLE_CLOUD_REGION}/{CLOUD_FUNCTION_NAME}" html = ( f'See the Cloud Function details <a href="{cloud_fn_url}" target="_blank">here</a>.' ) IPython.display.display(IPython.display.HTML(html))
deep-diver/Continuous-Adaptation-for-Machine-Learning-System-to-Data-Changes
notebooks/98_Batch_Prediction_Test.ipynb
from google.colab import auth auth.authenticate_user()GOOGLE_CLOUD_PROJECT = "central-hangar-321813" # @param {type:"string"} GOOGLE_CLOUD_REGION = "us-central1" # @param {type:"string"} MODEL_NAME = "resnet_cifar_latest" # @param {type:"string"} TEST_FILENAME = "test-images.txt" # @param {type:"string"} TEST_GCS_BUCKET = "gs://batch-prediction-collection" # @param {type:"string"} TEST_LOCAL_PATH = "Continuous-Adaptation-for-Machine-Learning-System-to-Data-Changes/notebooks/test-images" # @param {type:"string"}from os import listdir test_files = listdir(TEST_LOCAL_PATH) test_filesf = open(TEST_FILENAME, "w") for filename in test_files: f.write(f"{TEST_GCS_BUCKET}/{filename}\n") f.close()import google.cloud.aiplatform as aiplatform from typing import Union, Sequence def create_batch_prediction_job_dedicated_resources_sample( project: str, location: str, model_resource_name: str, job_display_name: str, gcs_source: Union[str, Sequence[str]], gcs_destination: str, instances_format: str = "file-list", machine_type: str = "n1-standard-2", accelerator_count: int = 1, accelerator_type: str = "NVIDIA_TESLA_K80", starting_replica_count: int = 1, max_replica_count: int = 1, sync: bool = True, ): aiplatform.init(project=project, location=location) my_model = aiplatform.Model(model_resource_name) batch_prediction_job = my_model.batch_predict( job_display_name=job_display_name, instances_format=instances_format, gcs_source=gcs_source, gcs_destination_prefix=gcs_destination, machine_type=machine_type, accelerator_count=accelerator_count, accelerator_type=accelerator_type, starting_replica_count=starting_replica_count, max_replica_count=max_replica_count, sync=sync, ) batch_prediction_job.wait() print(batch_prediction_job.display_name) print(batch_prediction_job.resource_name) print(batch_prediction_job.state) return batch_prediction_jobfrom datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")create_batch_prediction_job_dedicated_resources_sample( project=GOOGLE_CLOUD_PROJECT, location=GOOGLE_CLOUD_REGION, model_resource_name="2008244793993330688", job_display_name=f"{MODEL_NAME}-{TIMESTAMP}", gcs_source=[f"{TEST_GCS_BUCKET}/{TEST_FILENAME}"], gcs_destination=f"{TEST_GCS_BUCKET}/results/", accelerator_type=None, accelerator_count=None, )import os import json RESULTS_DIRECTORY = "results" RESULTS_DIRECTORY_FULL = f'{TEST_GCS_BUCKET}/{RESULTS_DIRECTORY}' # Create missing directories os.makedirs(RESULTS_DIRECTORY, exist_ok=True) # Get the Cloud Storage paths for each result !gsutil -m cp -r $RESULTS_DIRECTORY_FULL $RESULTS_DIRECTORY # Get most recently modified directory latest_directory = max( [ os.path.join(RESULTS_DIRECTORY, d) for d in os.listdir(RESULTS_DIRECTORY) ], key=os.path.getmtime, ) # Get downloaded results in directory results_files = [] for dirpath, subdirs, files in os.walk(latest_directory): for file in files: if file.startswith("prediction.results"): results_files.append(os.path.join(dirpath, file)) # Consolidate all the results into a list results = [] for results_file in results_files: # Download each result with open(results_file, "r") as file: results.extend([json.loads(line) for line in file.readlines()])resultsnum_correct = 0 for result in results: label = os.path.basename(result["instance"]).split("_")[0] prediction = result["prediction"]["label"] print(f"label({label})/prediction({prediction})") if label == prediction: num_correct = num_correct + 1 print() print(f"number of results: {len(results)}") print(f"number of correct: {num_correct}") print(f"Accuracy: {num_correct/len(results)}")
deep-diver/mlops-hf-tf-vision-models
advanced_part1/kubeflow_runner.py
from absl import logging from tfx import v1 as tfx from tfx.orchestration.kubeflow.v2 import kubeflow_v2_dag_runner as runner from pipeline import configs from pipeline import kubeflow_pipeline def run(): runner_config = runner.KubeflowV2DagRunnerConfig( default_image=configs.PIPELINE_IMAGE ) runner.KubeflowV2DagRunner( config=runner_config, output_filename=configs.PIPELINE_NAME + "_pipeline.json", ).run( kubeflow_pipeline.create_pipeline( pipeline_name=configs.PIPELINE_NAME, pipeline_root=configs.PIPELINE_ROOT, data_path=configs.DATA_PATH, schema_path=configs.SCHEMA_PATH, modules={ "training_fn": configs.TRAINING_FN, "preprocessing_fn": configs.PREPROCESSING_FN, }, eval_configs=configs.EVAL_CONFIGS, ai_platform_training_args=configs.GCP_AI_PLATFORM_TRAINING_ARGS, ai_platform_serving_args=configs.GCP_AI_PLATFORM_SERVING_ARGS, example_gen_beam_args=configs.EXAMPLE_GEN_BEAM_ARGS, transform_beam_args=configs.TRANSFORM_BEAM_ARGS, ) ) if __name__ == "__main__": logging.set_verbosity(logging.INFO) run()
deep-diver/mlops-hf-tf-vision-models
advanced_part1/local_runner.py
import os from absl import logging from tfx import v1 as tfx from pipeline import configs from pipeline import local_pipeline OUTPUT_DIR = "." PIPELINE_ROOT = os.path.join(OUTPUT_DIR, "tfx_pipeline_output", configs.PIPELINE_NAME) METADATA_PATH = os.path.join( OUTPUT_DIR, "tfx_metadata", configs.PIPELINE_NAME, "metadata.db" ) SERVING_MODEL_DIR = os.path.join(PIPELINE_ROOT, "serving_model") def run(): tfx.orchestration.LocalDagRunner().run( local_pipeline.create_pipeline( pipeline_name=configs.PIPELINE_NAME, pipeline_root=PIPELINE_ROOT, data_path=configs.DATA_PATH, schema_path=configs.SCHEMA_PATH, modules={ "training_fn": configs.TRAINING_FN, "preprocessing_fn": configs.PREPROCESSING_FN, }, eval_configs=configs.EVAL_CONFIGS, serving_model_dir=SERVING_MODEL_DIR, metadata_connection_config=tfx.orchestration.metadata.sqlite_metadata_connection_config( METADATA_PATH ), ) ) if __name__ == "__main__": logging.set_verbosity(logging.INFO) run()
deep-diver/mlops-hf-tf-vision-models
advanced_part2/kubeflow_runner.py
from absl import logging from tfx import v1 as tfx from tfx.orchestration.kubeflow.v2 import kubeflow_v2_dag_runner as runner from tfx.proto import tuner_pb2 from pipeline import configs from pipeline import kubeflow_pipeline def run(): runner_config = runner.KubeflowV2DagRunnerConfig( default_image=configs.PIPELINE_IMAGE ) runner.KubeflowV2DagRunner( config=runner_config, output_filename=configs.PIPELINE_NAME + "_pipeline.json", ).run( kubeflow_pipeline.create_pipeline( pipeline_name=configs.PIPELINE_NAME, pipeline_root=configs.PIPELINE_ROOT, data_path=configs.DATA_PATH, schema_path=configs.SCHEMA_PATH, modules={ "training_fn": configs.TRAINING_FN, "preprocessing_fn": configs.PREPROCESSING_FN, "tuner_fn": configs.TUNER_FN, }, eval_configs=configs.EVAL_CONFIGS, ai_platform_training_args=configs.GCP_AI_PLATFORM_TRAINING_ARGS, ai_platform_tuner_args=configs.GCP_AI_PLATFORM_TUNER_ARGS, tuner_args=tuner_pb2.TuneArgs( num_parallel_trials=configs.NUM_PARALLEL_TRIALS ), ai_platform_serving_args=configs.GCP_AI_PLATFORM_SERVING_ARGS, example_gen_beam_args=configs.EXAMPLE_GEN_BEAM_ARGS, transform_beam_args=configs.TRANSFORM_BEAM_ARGS, ) ) if __name__ == "__main__": logging.set_verbosity(logging.INFO) run()
deep-diver/mlops-hf-tf-vision-models
advanced_part2/local_runner.py
import os from absl import logging from tfx import v1 as tfx from pipeline import configs from pipeline import local_pipeline OUTPUT_DIR = "." PIPELINE_ROOT = os.path.join(OUTPUT_DIR, "tfx_pipeline_output", configs.PIPELINE_NAME) METADATA_PATH = os.path.join( OUTPUT_DIR, "tfx_metadata", configs.PIPELINE_NAME, "metadata.db" ) SERVING_MODEL_DIR = os.path.join(PIPELINE_ROOT, "serving_model") def run(): tfx.orchestration.LocalDagRunner().run( local_pipeline.create_pipeline( pipeline_name=configs.PIPELINE_NAME, pipeline_root=PIPELINE_ROOT, data_path=configs.DATA_PATH, schema_path=configs.SCHEMA_PATH, modules={ "training_fn": configs.TRAINING_FN, "preprocessing_fn": configs.PREPROCESSING_FN, "tuner_fn": configs.TUNER_FN, }, hyperparameters=configs.HYPER_PARAMETERS, eval_configs=configs.EVAL_CONFIGS, serving_model_dir=SERVING_MODEL_DIR, metadata_connection_config=tfx.orchestration.metadata.sqlite_metadata_connection_config( METADATA_PATH ), ) ) if __name__ == "__main__": logging.set_verbosity(logging.INFO) run()
deep-diver/mlops-hf-tf-vision-models
basic/kubeflow_runner.py
from absl import logging from tfx import v1 as tfx from tfx.orchestration.kubeflow.v2 import kubeflow_v2_dag_runner as runner from pipeline import configs from pipeline import kubeflow_pipeline def run(): runner_config = runner.KubeflowV2DagRunnerConfig( default_image=configs.PIPELINE_IMAGE ) runner.KubeflowV2DagRunner( config=runner_config, output_filename=configs.PIPELINE_NAME + "_pipeline.json", ).run( kubeflow_pipeline.create_pipeline( pipeline_name=configs.PIPELINE_NAME, pipeline_root=configs.PIPELINE_ROOT, data_path=configs.DATA_PATH, modules={ "training_fn": configs.TRAINING_FN, }, ai_platform_training_args=configs.GCP_AI_PLATFORM_TRAINING_ARGS, ai_platform_serving_args=configs.GCP_AI_PLATFORM_SERVING_ARGS, example_gen_beam_args=configs.EXAMPLE_GEN_BEAM_ARGS, ) ) if __name__ == "__main__": logging.set_verbosity(logging.INFO) run()
deep-diver/mlops-hf-tf-vision-models
basic/local_runner.py
import os from absl import logging from tfx import v1 as tfx from pipeline import configs from pipeline import local_pipeline OUTPUT_DIR = "." PIPELINE_ROOT = os.path.join(OUTPUT_DIR, "tfx_pipeline_output", configs.PIPELINE_NAME) METADATA_PATH = os.path.join( OUTPUT_DIR, "tfx_metadata", configs.PIPELINE_NAME, "metadata.db" ) SERVING_MODEL_DIR = os.path.join(PIPELINE_ROOT, "serving_model") def run(): tfx.orchestration.LocalDagRunner().run( local_pipeline.create_pipeline( pipeline_name=configs.PIPELINE_NAME, pipeline_root=PIPELINE_ROOT, data_path=configs.DATA_PATH, modules={ "training_fn": configs.TRAINING_FN, }, serving_model_dir=SERVING_MODEL_DIR, metadata_connection_config=tfx.orchestration.metadata.sqlite_metadata_connection_config( METADATA_PATH ), ) ) if __name__ == "__main__": logging.set_verbosity(logging.INFO) run()
deep-diver/mlops-hf-tf-vision-models
dataset/create_tfrecords.py
""" Script to generate TFRecord shards from the Sidewalks dataset as shown in this blog post: https://huggingface.co/blog/fine-tune-segformer. The recommended way to obtain TFRecord shards is via an Apache Beam Pipeline with an execution runner like Dataflow. Example: https://github.com/GoogleCloudPlatform/practical-ml-vision-book/blob/master/05_create_dataset/jpeg_to_tfrecord.py. Usage: python create_tfrecords --batch_size 16 python create_tfrecords --resize 256 # without --resize flag, no resizing is applied References: * https://github.com/GoogleCloudPlatform/practical-ml-vision-book/blob/master/05_create_dataset/05_split_tfrecord.ipynb * https://www.tensorflow.org/tutorials/images/segmentation """ import argparse import math import os from typing import Tuple import datasets import numpy as np import tensorflow as tf import tqdm from PIL import Image RESOLUTION = 256 def load_beans_dataset(args): hf_dataset_identifier = "beans" ds = datasets.load_dataset(hf_dataset_identifier) ds = ds.shuffle(seed=1) ds = ds["train"].train_test_split(test_size=args.split, seed=args.seed) train_ds = ds["train"] val_ds = ds["test"] return train_ds, val_ds def resize_img( image: tf.Tensor, label: tf.Tensor, resize: int ) -> Tuple[tf.Tensor, tf.Tensor]: image = tf.image.resize(image, (resize, resize)) return image, label def process_image( image: Image, label: Image, resize: int ) -> Tuple[tf.Tensor, tf.Tensor]: image = np.array(image) label = np.array(label) image = tf.convert_to_tensor(image) label = tf.convert_to_tensor(label) if resize: image, label = resize_img(image, label, resize) return image, label def _int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=value)) def _float_feature(value): return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def create_tfrecord(image: Image, label: Image, resize: int): image, label = process_image(image, label, resize) image_dims = image.shape image = tf.reshape(image, [-1]) # flatten to 1D array label = tf.reshape(label, [-1]) # flatten to 1D array return tf.train.Example( features=tf.train.Features( feature={ "image": _float_feature(image.numpy()), "image_shape": _int64_feature( [image_dims[0], image_dims[1], image_dims[2]] ), "label": _int64_feature(label.numpy()), } ) ).SerializeToString() def write_tfrecords(root_dir, dataset, split, batch_size, resize): print(f"Preparing TFRecords for split: {split}.") for step in tqdm.tnrange(int(math.ceil(len(dataset) / batch_size))): temp_ds = dataset[step * batch_size : (step + 1) * batch_size] shard_size = len(temp_ds["image"]) filename = os.path.join( root_dir, "{}-{:02d}-{}.tfrec".format(split, step, shard_size) ) with tf.io.TFRecordWriter(filename) as out_file: for i in range(shard_size): image = temp_ds["image"][i] label = temp_ds["labels"][i] example = create_tfrecord(image, label, resize) out_file.write(example) print("Wrote file {} containing {} records".format(filename, shard_size)) def main(args): train_ds, val_ds = load_beans_dataset(args) print("Dataset loaded from HF.") if not os.path.exists(args.root_tfrecord_dir): os.makedirs(args.root_tfrecord_dir, exist_ok=True) print(args.resize) write_tfrecords( args.root_tfrecord_dir, train_ds, "train", args.batch_size, args.resize ) write_tfrecords(args.root_tfrecord_dir, val_ds, "val", args.batch_size, args.resize) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( "--split", help="Train and test split.", default=0.2, type=float ) parser.add_argument( "--seed", help="Seed to be used while performing train-test splits.", default=2022, type=int, ) parser.add_argument( "--root_tfrecord_dir", help="Root directory where the TFRecord shards will be serialized.", default="beans-tfrecords", type=str, ) parser.add_argument( "--batch_size", help="Number of samples to process in a batch before serializing a single TFRecord shard.", default=32, type=int, ) parser.add_argument( "--resize", help="Width and height size the image will be resized to. No resizing will be applied when this isn't set.", type=int, ) args = parser.parse_args() return args if __name__ == "__main__": args = parse_args() main(args)
deep-diver/mlops-hf-tf-vision-models
hf_integration/kubeflow_runner.py
from absl import logging from tfx import v1 as tfx from tfx.orchestration.kubeflow.v2 import kubeflow_v2_dag_runner as runner from tfx.proto import tuner_pb2 from pipeline import configs from pipeline import kubeflow_pipeline def run(): runner_config = runner.KubeflowV2DagRunnerConfig( default_image=configs.PIPELINE_IMAGE ) runner.KubeflowV2DagRunner( config=runner_config, output_filename=configs.PIPELINE_NAME + "_pipeline.json", ).run( kubeflow_pipeline.create_pipeline( pipeline_name=configs.PIPELINE_NAME, pipeline_root=configs.PIPELINE_ROOT, data_path=configs.DATA_PATH, schema_path=configs.SCHEMA_PATH, modules={ "training_fn": configs.TRAINING_FN, "preprocessing_fn": configs.PREPROCESSING_FN, "tuner_fn": configs.TUNER_FN, }, eval_configs=configs.EVAL_CONFIGS, ai_platform_training_args=configs.GCP_AI_PLATFORM_TRAINING_ARGS, ai_platform_tuner_args=configs.GCP_AI_PLATFORM_TUNER_ARGS, tuner_args=tuner_pb2.TuneArgs( num_parallel_trials=configs.NUM_PARALLEL_TRIALS ), ai_platform_serving_args=configs.GCP_AI_PLATFORM_SERVING_ARGS, example_gen_beam_args=configs.EXAMPLE_GEN_BEAM_ARGS, transform_beam_args=configs.TRANSFORM_BEAM_ARGS, hf_pusher_args=configs.HF_PUSHER_ARGS, ) ) if __name__ == "__main__": logging.set_verbosity(logging.INFO) run()
deep-diver/mlops-hf-tf-vision-models
hf_integration/local_runner.py
import os from absl import logging from tfx import v1 as tfx from pipeline import configs from pipeline import local_pipeline OUTPUT_DIR = "." PIPELINE_ROOT = os.path.join(OUTPUT_DIR, "tfx_pipeline_output", configs.PIPELINE_NAME) METADATA_PATH = os.path.join( OUTPUT_DIR, "tfx_metadata", configs.PIPELINE_NAME, "metadata.db" ) SERVING_MODEL_DIR = os.path.join(PIPELINE_ROOT, "serving_model") def run(): tfx.orchestration.LocalDagRunner().run( local_pipeline.create_pipeline( pipeline_name=configs.PIPELINE_NAME, pipeline_root=PIPELINE_ROOT, data_path=configs.DATA_PATH, schema_path=configs.SCHEMA_PATH, modules={ "training_fn": configs.TRAINING_FN, "preprocessing_fn": configs.PREPROCESSING_FN, "tuner_fn": configs.TUNER_FN, }, hyperparameters=configs.HYPER_PARAMETERS, eval_configs=configs.EVAL_CONFIGS, serving_model_dir=SERVING_MODEL_DIR, metadata_connection_config=tfx.orchestration.metadata.sqlite_metadata_connection_config( METADATA_PATH ), ) ) if __name__ == "__main__": logging.set_verbosity(logging.INFO) run()
deep-diver/mlops-hf-tf-vision-models
intermediate/kubeflow_runner.py
from absl import logging from tfx import v1 as tfx from tfx.orchestration.kubeflow.v2 import kubeflow_v2_dag_runner as runner from pipeline import configs from pipeline import kubeflow_pipeline def run(): runner_config = runner.KubeflowV2DagRunnerConfig( default_image=configs.PIPELINE_IMAGE ) runner.KubeflowV2DagRunner( config=runner_config, output_filename=configs.PIPELINE_NAME + "_pipeline.json", ).run( kubeflow_pipeline.create_pipeline( pipeline_name=configs.PIPELINE_NAME, pipeline_root=configs.PIPELINE_ROOT, data_path=configs.DATA_PATH, schema_path=configs.SCHEMA_PATH, modules={ "training_fn": configs.TRAINING_FN, "preprocessing_fn": configs.PREPROCESSING_FN, }, ai_platform_training_args=configs.GCP_AI_PLATFORM_TRAINING_ARGS, ai_platform_serving_args=configs.GCP_AI_PLATFORM_SERVING_ARGS, example_gen_beam_args=configs.EXAMPLE_GEN_BEAM_ARGS, transform_beam_args=configs.TRANSFORM_BEAM_ARGS, ) ) if __name__ == "__main__": logging.set_verbosity(logging.INFO) run()
deep-diver/mlops-hf-tf-vision-models
intermediate/local_runner.py
import os from absl import logging from tfx import v1 as tfx from pipeline import configs from pipeline import local_pipeline OUTPUT_DIR = "." PIPELINE_ROOT = os.path.join(OUTPUT_DIR, "tfx_pipeline_output", configs.PIPELINE_NAME) METADATA_PATH = os.path.join( OUTPUT_DIR, "tfx_metadata", configs.PIPELINE_NAME, "metadata.db" ) SERVING_MODEL_DIR = os.path.join(PIPELINE_ROOT, "serving_model") def run(): tfx.orchestration.LocalDagRunner().run( local_pipeline.create_pipeline( pipeline_name=configs.PIPELINE_NAME, pipeline_root=PIPELINE_ROOT, data_path=configs.DATA_PATH, schema_path=configs.SCHEMA_PATH, modules={ "training_fn": configs.TRAINING_FN, "preprocessing_fn": configs.PREPROCESSING_FN, }, serving_model_dir=SERVING_MODEL_DIR, metadata_connection_config=tfx.orchestration.metadata.sqlite_metadata_connection_config( METADATA_PATH ), ) ) if __name__ == "__main__": logging.set_verbosity(logging.INFO) run()
deep-diver/mlops-hf-tf-vision-models
notebooks/advanced_part1.ipynb
data_path = "gs://beans-lowres/tfrecords" local_data_path = "data" model_file = "modules/model.py" model_fn = "modules.model.run_fn" proprocessing_file = "modules/preprocessing.py" preprocessing_fn = "modules.preprocessing.preprocessing_fn" schema_file = "schema.pbtxt"import tfx tfx.__version__from tfx import v1 as tfx from tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext from tfx.components import ImportExampleGen from tfx.components import StatisticsGen from tfx.components import SchemaGen from tfx.components import ExampleValidator from tfx.components import Transform from tfx.components import Trainer from tfx.components import Evaluator from tfx.components import Pusher from tfx.proto import example_gen_pb2 from tfx.types import Channel from tfx.types.standard_artifacts import Model from tfx.types.standard_artifacts import ModelBlessing from tfx.dsl.components.common import resolver from tfx.dsl.experimental.latest_blessed_model_resolver import LatestBlessedModelResolver import tensorflow_model_analysis as tfmacontext = InteractiveContext()input_config = example_gen_pb2.Input( splits=[ example_gen_pb2.Input.Split(name="train", pattern="train-*.tfrec"), example_gen_pb2.Input.Split(name="eval", pattern="val-*.tfrec"), ] ) example_gen = ImportExampleGen( input_base=local_data_path, input_config=input_config ) context.run(example_gen)statistics_gen = StatisticsGen(examples=example_gen.outputs['examples']) context.run(statistics_gen)context.show(statistics_gen.outputs['statistics'])%%writefile {schema_file} feature { name: "image" type: FLOAT presence { min_fraction: 1.0 } float_domain { min: 0 max: 255 } shape { dim { size: 256 } dim { size: 256 } dim { size: 3 } } } feature { name: "image_shape" type: INT presence { min_fraction: 1.0 } shape { dim { size: 3 } } } feature { name: "label" type: INT presence { min_fraction: 1.0 } int_domain { min: 0 max: 2 } shape { dim { size: 1 } } }schema_gen = tfx.components.ImportSchemaGen( schema_file=schema_file) context.run(schema_gen)example_validator = ExampleValidator( statistics=statistics_gen.outputs['statistics'], schema=schema_gen.outputs['schema'] ) context.run(example_validator)context.show(example_validator.outputs['anomalies'])%%writefile {proprocessing_file} import tensorflow as tf IMAGE_KEY = "image" LABEL_KEY = "label" MODEL_INPUT_IMAGE_KEY = "pixel_values" MODEL_INPUT_LABEL_KEY = "labels" INPUT_IMG_SIZE = 224 def preprocessing_fn(inputs): """tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. """ # print(inputs) outputs = {} inputs[IMAGE_KEY] = tf.image.resize( inputs[IMAGE_KEY], [INPUT_IMG_SIZE, INPUT_IMG_SIZE] ) inputs[IMAGE_KEY] = inputs[IMAGE_KEY] / 255.0 inputs[IMAGE_KEY] = tf.transpose(inputs[IMAGE_KEY], [0, 3, 1, 2]) outputs[MODEL_INPUT_IMAGE_KEY] = inputs[IMAGE_KEY] outputs[MODEL_INPUT_LABEL_KEY] = inputs[LABEL_KEY] return outputstransform = Transform( examples=example_gen.outputs['examples'], schema=schema_gen.outputs['schema'], preprocessing_fn=preprocessing_fn)context.run(transform)%%writefile {model_file} from typing import List, Dict, Tuple import absl import tensorflow as tf import tensorflow_transform as tft from transformers import ViTFeatureExtractor, TFViTForImageClassification from tfx.components.trainer.fn_args_utils import FnArgs from tfx_bsl.tfxio import dataset_options from tfx.components.trainer.fn_args_utils import DataAccessor feature_extractor = ViTFeatureExtractor() _TRAIN_LENGTH = 128 _EVAL_LENGTH = 128 _TRAIN_BATCH_SIZE = 8 _EVAL_BATCH_SIZE = 8 _EPOCHS = 1 _LABELS = ['angular_leaf_spot', 'bean_rust', 'healthy'] _CONCRETE_INPUT = "pixel_values" _MODEL_INPUT_LABEL_KEY = "labels" def INFO(text: str): absl.logging.info(text) def _normalize_img( img, mean=feature_extractor.image_mean, std=feature_extractor.image_std ): img = img / 255 mean = tf.constant(mean) std = tf.constant(std) return (img - mean) / std def _preprocess_serving(string_input): decoded_input = tf.io.decode_base64(string_input) decoded = tf.io.decode_jpeg(decoded_input, channels=3) resized = tf.image.resize(decoded, size=(224, 224)) normalized = _normalize_img(resized) normalized = tf.transpose( normalized, (2, 0, 1) ) # Since HF models are channel-first. return normalized @tf.function(input_signature=[tf.TensorSpec([None], tf.string)]) def _preprocess_fn(string_input): decoded_images = tf.map_fn( _preprocess_serving, string_input, dtype=tf.float32, back_prop=False ) return {_CONCRETE_INPUT: decoded_images} def _model_exporter(model: tf.keras.Model): m_call = tf.function(model.call).get_concrete_function( tf.TensorSpec( shape=[None, 3, 224, 224], dtype=tf.float32, name=_CONCRETE_INPUT ) ) @tf.function(input_signature=[tf.TensorSpec([None], tf.string)]) def serving_fn(string_input): labels = tf.constant(list(model.config.id2label.values()), dtype=tf.string) images = _preprocess_fn(string_input) predictions = m_call(**images) indices = tf.argmax(predictions.logits, axis=1) pred_source = tf.gather(params=labels, indices=indices) probs = tf.nn.softmax(predictions.logits, axis=1) pred_confidence = tf.reduce_max(probs, axis=1) return {"label": pred_source, "confidence": pred_confidence} return serving_fn ### def _transform_features_signature( model: tf.keras.Model, tf_transform_output: tft.TFTransformOutput ): """ transform_features_signature simply returns a function that transforms any data of the type of tf.Example which is denoted as the type of sta ndard_artifacts.Examples in TFX. The purpose of this function is to ap ply Transform Graph obtained from Transform component to the data prod uced by ImportExampleGen. This function will be used in the Evaluator component, so the raw evaluation inputs from ImportExampleGen can be a pporiately transformed that the model could understand. """ # basically, what Transform component emits is a SavedModel that knows # how to transform data. transform_features_layer() simply returns the # layer from the Transform. model.tft_layer = tf_transform_output.transform_features_layer() @tf.function( input_signature=[tf.TensorSpec(shape=[None], dtype=tf.string, name="examples")] ) def serve_tf_examples_fn(serialized_tf_examples): """ raw_feature_spec returns a set of feature maps(dict) for the input TFRecords based on the knowledge that Transform component has lear ned(learn doesn't mean training here). By using this information, the raw data from ImportExampleGen could be parsed with tf.io.parse _example utility function. Then, it is passed to the model.tft_layer, so the final output we get is the transformed data of the raw input. """ feature_spec = tf_transform_output.raw_feature_spec() parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec) transformed_features = model.tft_layer(parsed_features) return transformed_features return serve_tf_examples_fn def _tf_examples_serving_signature(model, tf_transform_output): """ tf_examples_serving_signature simply returns a function that performs data transformation(preprocessing) and model prediction in a sequential manner. How data transformation is done is idential to the process of transform_features_signature function. """ model.tft_layer = tf_transform_output.transform_features_layer() @tf.function( input_signature=[tf.TensorSpec(shape=[None], dtype=tf.string, name="examples")] ) def serve_tf_examples_fn( serialized_tf_example: tf.Tensor, ) -> Dict[str, tf.Tensor]: raw_feature_spec = tf_transform_output.raw_feature_spec() raw_features = tf.io.parse_example(serialized_tf_example, raw_feature_spec) transformed_features = model.tft_layer(raw_features) logits = model(transformed_features).logits return {_MODEL_INPUT_LABEL_KEY: logits} return serve_tf_examples_fn ### def _input_fn( file_pattern: List[str], data_accessor: DataAccessor, tf_transform_output: tft.TFTransformOutput, is_train: bool = False, batch_size: int = 32, ) -> tf.data.Dataset: INFO(f"Reading data from: {file_pattern}") dataset = data_accessor.tf_dataset_factory( file_pattern, dataset_options.TensorFlowDatasetOptions( batch_size=batch_size, label_key=_MODEL_INPUT_LABEL_KEY ), tf_transform_output.transformed_metadata.schema, ) return dataset def _build_model(): id2label={str(i): c for i, c in enumerate(_LABELS)} label2id={c: str(i) for i, c in enumerate(_LABELS)} model = TFViTForImageClassification.from_pretrained( "google/vit-base-patch16-224-in21k", num_labels=len(_LABELS), label2id=label2id, id2label=id2label, ) model.layers[0].trainable=False loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model.compile(optimizer='adam', loss=loss, metrics=["accuracy"]) return model def run_fn(fn_args: FnArgs): tf_transform_output = tft.TFTransformOutput(fn_args.transform_output) train_dataset = _input_fn( fn_args.train_files, fn_args.data_accessor, tf_transform_output, is_train=True, batch_size=_TRAIN_BATCH_SIZE, ) eval_dataset = _input_fn( fn_args.eval_files, fn_args.data_accessor, tf_transform_output, is_train=False, batch_size=_EVAL_BATCH_SIZE, ) model = _build_model() model.fit( train_dataset, steps_per_epoch=_TRAIN_LENGTH // _TRAIN_BATCH_SIZE, validation_data=eval_dataset, validation_steps=_EVAL_LENGTH // _EVAL_BATCH_SIZE, epochs=_EPOCHS, ) model.save( fn_args.serving_model_dir, save_format="tf", signatures={ "serving_default": _model_exporter(model), "transform_features": _transform_features_signature( model, tf_transform_output ), "from_examples": _tf_examples_serving_signature(model, tf_transform_output), }, )trainer = Trainer( run_fn=model_fn, transformed_examples=transform.outputs["transformed_examples"], transform_graph=transform.outputs["transform_graph"], schema=schema_gen.outputs["schema"], )context.run(trainer)model_resolver = resolver.Resolver( strategy_class=LatestBlessedModelResolver, model=Channel(type=Model), model_blessing=Channel(type=ModelBlessing), ).with_id("latest_blessed_model_resolver")context.run(model_resolver)eval_configs = tfma.EvalConfig( model_specs=[ tfma.ModelSpec( signature_name="from_examples", preprocessing_function_names=["transform_features"], label_key="labels", prediction_key="labels", ) ], slicing_specs=[tfma.SlicingSpec()], metrics_specs=[ tfma.MetricsSpec( metrics=[ tfma.MetricConfig( class_name="SparseCategoricalAccuracy", threshold=tfma.MetricThreshold( value_threshold=tfma.GenericValueThreshold( lower_bound={"value": 0.55} ), # Change threshold will be ignored if there is no # baseline model resolved from MLMD (first run). change_threshold=tfma.GenericChangeThreshold( direction=tfma.MetricDirection.HIGHER_IS_BETTER, absolute={"value": -1e-3}, ), ), ) ] ) ], ) evaluator = Evaluator( examples=example_gen.outputs["examples"], model=trainer.outputs["model"], baseline_model=model_resolver.outputs["model"], eval_config=eval_configs, )context.run(evaluator)
deep-diver/mlops-hf-tf-vision-models
notebooks/advanced_part2.ipynb
data_path = "gs://beans-lowres/tfrecords" local_data_path = "data" model_file = "modules/model.py" model_fn = "modules.model.run_fn" tuner_fn = "modules.model.tuner_fn" proprocessing_file = "modules/preprocessing.py" preprocessing_fn = "modules.preprocessing.preprocessing_fn" schema_file = "schema.pbtxt"import tfx tfx.__version__from tfx import v1 as tfx from tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext from tfx.components import ImportExampleGen from tfx.components import StatisticsGen from tfx.components import SchemaGen from tfx.components import ExampleValidator from tfx.components import Transform from tfx.components import Trainer from tfx.components import Tuner from tfx.components import Evaluator from tfx.components import Pusher from tfx.proto import example_gen_pb2 from tfx.types import Channel from tfx.types.standard_artifacts import Model from tfx.types.standard_artifacts import ModelBlessing from tfx.dsl.components.common import resolver from tfx.dsl.experimental.latest_blessed_model_resolver import LatestBlessedModelResolver import tensorflow_model_analysis as tfmacontext = InteractiveContext()input_config = example_gen_pb2.Input( splits=[ example_gen_pb2.Input.Split(name="train", pattern="train-*.tfrec"), example_gen_pb2.Input.Split(name="eval", pattern="val-*.tfrec"), ] ) example_gen = ImportExampleGen( input_base=local_data_path, input_config=input_config ) context.run(example_gen)statistics_gen = StatisticsGen(examples=example_gen.outputs['examples']) context.run(statistics_gen)context.show(statistics_gen.outputs['statistics'])%%writefile {schema_file} feature { name: "image" type: FLOAT presence { min_fraction: 1.0 } float_domain { min: 0 max: 255 } shape { dim { size: 256 } dim { size: 256 } dim { size: 3 } } } feature { name: "image_shape" type: INT presence { min_fraction: 1.0 } shape { dim { size: 3 } } } feature { name: "label" type: INT presence { min_fraction: 1.0 } int_domain { min: 0 max: 2 } shape { dim { size: 1 } } }schema_gen = tfx.components.ImportSchemaGen( schema_file=schema_file) context.run(schema_gen)example_validator = ExampleValidator( statistics=statistics_gen.outputs['statistics'], schema=schema_gen.outputs['schema'] ) context.run(example_validator)context.show(example_validator.outputs['anomalies'])%%writefile {proprocessing_file} import tensorflow as tf IMAGE_KEY = "image" LABEL_KEY = "label" MODEL_INPUT_IMAGE_KEY = "pixel_values" MODEL_INPUT_LABEL_KEY = "labels" INPUT_IMG_SIZE = 224 def preprocessing_fn(inputs): """tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. """ # print(inputs) outputs = {} inputs[IMAGE_KEY] = tf.image.resize( inputs[IMAGE_KEY], [INPUT_IMG_SIZE, INPUT_IMG_SIZE] ) inputs[IMAGE_KEY] = inputs[IMAGE_KEY] / 255.0 inputs[IMAGE_KEY] = tf.transpose(inputs[IMAGE_KEY], [0, 3, 1, 2]) outputs[MODEL_INPUT_IMAGE_KEY] = inputs[IMAGE_KEY] outputs[MODEL_INPUT_LABEL_KEY] = inputs[LABEL_KEY] return outputstransform = Transform( examples=example_gen.outputs['examples'], schema=schema_gen.outputs['schema'], preprocessing_fn=preprocessing_fn)context.run(transform)%%writefile {model_file} from typing import List, Dict, Tuple import absl import tensorflow as tf import keras_tuner import tensorflow_transform as tft from transformers import ViTFeatureExtractor, TFViTForImageClassification from tfx.components.trainer.fn_args_utils import FnArgs from tfx.v1.components import TunerFnResult from tfx_bsl.tfxio import dataset_options from tfx.components.trainer.fn_args_utils import DataAccessor feature_extractor = ViTFeatureExtractor() _TRAIN_LENGTH = 128 _EVAL_LENGTH = 128 _TRAIN_BATCH_SIZE = 8 _EVAL_BATCH_SIZE = 8 _EPOCHS = 1 _LABELS = ['angular_leaf_spot', 'bean_rust', 'healthy'] _CONCRETE_INPUT = "pixel_values" _MODEL_INPUT_LABEL_KEY = "labels" def INFO(text: str): absl.logging.info(text) def _normalize_img( img, mean=feature_extractor.image_mean, std=feature_extractor.image_std ): img = img / 255 mean = tf.constant(mean) std = tf.constant(std) return (img - mean) / std def _preprocess_serving(string_input): decoded_input = tf.io.decode_base64(string_input) decoded = tf.io.decode_jpeg(decoded_input, channels=3) resized = tf.image.resize(decoded, size=(224, 224)) normalized = _normalize_img(resized) normalized = tf.transpose( normalized, (2, 0, 1) ) # Since HF models are channel-first. return normalized @tf.function(input_signature=[tf.TensorSpec([None], tf.string)]) def _preprocess_fn(string_input): decoded_images = tf.map_fn( _preprocess_serving, string_input, dtype=tf.float32, back_prop=False ) return {_CONCRETE_INPUT: decoded_images} def _model_exporter(model: tf.keras.Model): m_call = tf.function(model.call).get_concrete_function( tf.TensorSpec( shape=[None, 3, 224, 224], dtype=tf.float32, name=_CONCRETE_INPUT ) ) @tf.function(input_signature=[tf.TensorSpec([None], tf.string)]) def serving_fn(string_input): labels = tf.constant(list(model.config.id2label.values()), dtype=tf.string) images = _preprocess_fn(string_input) predictions = m_call(**images) indices = tf.argmax(predictions.logits, axis=1) pred_source = tf.gather(params=labels, indices=indices) probs = tf.nn.softmax(predictions.logits, axis=1) pred_confidence = tf.reduce_max(probs, axis=1) return {"label": pred_source, "confidence": pred_confidence} return serving_fn ### def _transform_features_signature( model: tf.keras.Model, tf_transform_output: tft.TFTransformOutput ): """ transform_features_signature simply returns a function that transforms any data of the type of tf.Example which is denoted as the type of sta ndard_artifacts.Examples in TFX. The purpose of this function is to ap ply Transform Graph obtained from Transform component to the data prod uced by ImportExampleGen. This function will be used in the Evaluator component, so the raw evaluation inputs from ImportExampleGen can be a pporiately transformed that the model could understand. """ # basically, what Transform component emits is a SavedModel that knows # how to transform data. transform_features_layer() simply returns the # layer from the Transform. model.tft_layer = tf_transform_output.transform_features_layer() @tf.function( input_signature=[tf.TensorSpec(shape=[None], dtype=tf.string, name="examples")] ) def serve_tf_examples_fn(serialized_tf_examples): """ raw_feature_spec returns a set of feature maps(dict) for the input TFRecords based on the knowledge that Transform component has lear ned(learn doesn't mean training here). By using this information, the raw data from ImportExampleGen could be parsed with tf.io.parse _example utility function. Then, it is passed to the model.tft_layer, so the final output we get is the transformed data of the raw input. """ feature_spec = tf_transform_output.raw_feature_spec() parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec) transformed_features = model.tft_layer(parsed_features) return transformed_features return serve_tf_examples_fn def _tf_examples_serving_signature(model, tf_transform_output): """ tf_examples_serving_signature simply returns a function that performs data transformation(preprocessing) and model prediction in a sequential manner. How data transformation is done is idential to the process of transform_features_signature function. """ model.tft_layer = tf_transform_output.transform_features_layer() @tf.function( input_signature=[tf.TensorSpec(shape=[None], dtype=tf.string, name="examples")] ) def serve_tf_examples_fn( serialized_tf_example: tf.Tensor, ) -> Dict[str, tf.Tensor]: raw_feature_spec = tf_transform_output.raw_feature_spec() raw_features = tf.io.parse_example(serialized_tf_example, raw_feature_spec) transformed_features = model.tft_layer(raw_features) logits = model(transformed_features).logits return {_MODEL_INPUT_LABEL_KEY: logits} return serve_tf_examples_fn ### def _input_fn( file_pattern: List[str], data_accessor: DataAccessor, tf_transform_output: tft.TFTransformOutput, is_train: bool = False, batch_size: int = 32, ) -> tf.data.Dataset: INFO(f"Reading data from: {file_pattern}") dataset = data_accessor.tf_dataset_factory( file_pattern, dataset_options.TensorFlowDatasetOptions( batch_size=batch_size, label_key=_MODEL_INPUT_LABEL_KEY ), tf_transform_output.transformed_metadata.schema, ) return dataset def _get_hyperparameters() -> keras_tuner.HyperParameters: hp = keras_tuner.HyperParameters() hp.Choice("learning_rate", [1e-3, 1e-2], default=1e-3) return hp def _build_model(hparams: keras_tuner.HyperParameters): id2label={str(i): c for i, c in enumerate(_LABELS)} label2id={c: str(i) for i, c in enumerate(_LABELS)} model = TFViTForImageClassification.from_pretrained( "google/vit-base-patch16-224-in21k", num_labels=len(_LABELS), label2id=label2id, id2label=id2label, ) model.layers[0].trainable=False loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) optimizer = tf.keras.optimizers.Adam(learning_rate=hparams.get("learning_rate")) model.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"]) return model def run_fn(fn_args: FnArgs): tf_transform_output = tft.TFTransformOutput(fn_args.transform_output) train_dataset = _input_fn( fn_args.train_files, fn_args.data_accessor, tf_transform_output, is_train=True, batch_size=_TRAIN_BATCH_SIZE, ) eval_dataset = _input_fn( fn_args.eval_files, fn_args.data_accessor, tf_transform_output, is_train=False, batch_size=_EVAL_BATCH_SIZE, ) hparams = keras_tuner.HyperParameters.from_config(fn_args.hyperparameters) INFO(f"HyperParameters for training: {hparams.get_config()}") model = _build_model(hparams) model.fit( train_dataset, steps_per_epoch=_TRAIN_LENGTH // _TRAIN_BATCH_SIZE, validation_data=eval_dataset, validation_steps=_EVAL_LENGTH // _EVAL_BATCH_SIZE, epochs=_EPOCHS, ) model.save( fn_args.serving_model_dir, save_format="tf", signatures={ "serving_default": _model_exporter(model), "transform_features": _transform_features_signature( model, tf_transform_output ), "from_examples": _tf_examples_serving_signature(model, tf_transform_output), }, ) def tuner_fn(fn_args: FnArgs) -> TunerFnResult: tuner = keras_tuner.RandomSearch( _build_model, max_trials=6, hyperparameters=_get_hyperparameters(), allow_new_entries=False, objective=keras_tuner.Objective("val_accuracy", "max"), directory=fn_args.working_dir, project_name="img_classification_tuning", ) tf_transform_output = tft.TFTransformOutput(fn_args.transform_graph_path) train_dataset = _input_fn( fn_args.train_files, fn_args.data_accessor, tf_transform_output, is_train=True, batch_size=_TRAIN_BATCH_SIZE, ) eval_dataset = _input_fn( fn_args.eval_files, fn_args.data_accessor, tf_transform_output, is_train=False, batch_size=_EVAL_BATCH_SIZE, ) return TunerFnResult( tuner=tuner, fit_kwargs={ "x": train_dataset, "validation_data": eval_dataset, "steps_per_epoch": _TRAIN_LENGTH // _TRAIN_BATCH_SIZE, "validation_steps": _EVAL_LENGTH // _EVAL_BATCH_SIZE, }, ) tuner = Tuner( tuner_fn=tuner_fn, examples=transform.outputs["transformed_examples"], schema=schema_gen.outputs["schema"], transform_graph=transform.outputs["transform_graph"], )context.run(tuner)trainer = Trainer( run_fn=model_fn, transformed_examples=transform.outputs["transformed_examples"], transform_graph=transform.outputs["transform_graph"], schema=schema_gen.outputs["schema"], hyperparameters=tuner.outputs["best_hyperparameters"], )context.run(trainer)model_resolver = resolver.Resolver( strategy_class=LatestBlessedModelResolver, model=Channel(type=Model), model_blessing=Channel(type=ModelBlessing), ).with_id("latest_blessed_model_resolver")context.run(model_resolver)eval_configs = tfma.EvalConfig( model_specs=[ tfma.ModelSpec( signature_name="from_examples", preprocessing_function_names=["transform_features"], label_key="labels", prediction_key="labels", ) ], slicing_specs=[tfma.SlicingSpec()], metrics_specs=[ tfma.MetricsSpec( metrics=[ tfma.MetricConfig( class_name="SparseCategoricalAccuracy", threshold=tfma.MetricThreshold( value_threshold=tfma.GenericValueThreshold( lower_bound={"value": 0.55} ), # Change threshold will be ignored if there is no # baseline model resolved from MLMD (first run). change_threshold=tfma.GenericChangeThreshold( direction=tfma.MetricDirection.HIGHER_IS_BETTER, absolute={"value": -1e-3}, ), ), ) ] ) ], ) evaluator = Evaluator( examples=example_gen.outputs["examples"], model=trainer.outputs["model"], baseline_model=model_resolver.outputs["model"], eval_config=eval_configs, )context.run(evaluator)
deep-diver/mlops-hf-tf-vision-models
notebooks/basic.ipynb
data_path = "gs://beans-lowres/tfrecords" local_data_path = "data" model_file = "modules/model.py" model_fn = "modules.model.run_fn"import tfx tfx.__version__from tfx import v1 as tfx from tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext from tfx.components import ImportExampleGen from tfx.components import Trainer from tfx.components import Pusher from tfx.components import StatisticsGen from tfx.proto import example_gen_pb2context = InteractiveContext()input_config = example_gen_pb2.Input( splits=[ example_gen_pb2.Input.Split(name="train", pattern="train-*.tfrec"), example_gen_pb2.Input.Split(name="eval", pattern="val-*.tfrec"), ] ) example_gen = ImportExampleGen( input_base=local_data_path, input_config=input_config )context.run(example_gen)%%writefile {model_file} from typing import List, Dict, Tuple import absl import tensorflow as tf from transformers import ViTFeatureExtractor, TFViTForImageClassification from tfx.components.trainer.fn_args_utils import FnArgs feature_extractor = ViTFeatureExtractor() _TRAIN_LENGTH = 128 _EVAL_LENGTH = 128 _TRAIN_BATCH_SIZE = 8 _EVAL_BATCH_SIZE = 8 _EPOCHS = 1 _LABELS = ['angular_leaf_spot', 'bean_rust', 'healthy'] _CONCRETE_INPUT = "pixel_values" def INFO(text: str): absl.logging.info(text) ### def _normalize_img( img, mean=feature_extractor.image_mean, std=feature_extractor.image_std ): img = img / 255 mean = tf.constant(mean) std = tf.constant(std) return (img - mean) / std def _preprocess_serving(string_input): decoded_input = tf.io.decode_base64(string_input) decoded = tf.io.decode_jpeg(decoded_input, channels=3) resized = tf.image.resize(decoded, size=(224, 224)) normalized = _normalize_img(resized) normalized = tf.transpose( normalized, (2, 0, 1) ) # Since HF models are channel-first. return normalized @tf.function(input_signature=[tf.TensorSpec([None], tf.string)]) def _preprocess_fn(string_input): decoded_images = tf.map_fn( _preprocess_serving, string_input, dtype=tf.float32, back_prop=False ) return {_CONCRETE_INPUT: decoded_images} def _model_exporter(model: tf.keras.Model): m_call = tf.function(model.call).get_concrete_function( tf.TensorSpec( shape=[None, 3, 224, 224], dtype=tf.float32, name=_CONCRETE_INPUT ) ) @tf.function(input_signature=[tf.TensorSpec([None], tf.string)]) def serving_fn(string_input): labels = tf.constant(list(model.config.id2label.values()), dtype=tf.string) images = _preprocess_fn(string_input) predictions = m_call(**images) indices = tf.argmax(predictions.logits, axis=1) pred_source = tf.gather(params=labels, indices=indices) probs = tf.nn.softmax(predictions.logits, axis=1) pred_confidence = tf.reduce_max(probs, axis=1) return {"label": pred_source, "confidence": pred_confidence} return serving_fn ### def _parse_tfr(proto): feature_description = { "image": tf.io.VarLenFeature(tf.float32), "image_shape": tf.io.VarLenFeature(tf.int64), "label": tf.io.VarLenFeature(tf.int64), } rec = tf.io.parse_single_example(proto, feature_description) image_shape = tf.sparse.to_dense(rec["image_shape"]) image = tf.reshape(tf.sparse.to_dense(rec["image"]), image_shape) label = tf.sparse.to_dense(rec["label"]) return {"pixel_values": image, "labels": label} def _preprocess(example_batch): images = example_batch["pixel_values"] images = tf.transpose(images, perm=[0, 1, 2, 3]) # (batch_size, height, width, num_channels) images = tf.image.resize(images, (224, 224)) images = tf.transpose(images, perm=[0, 3, 1, 2]) labels = example_batch["labels"] labels = tf.transpose(labels, perm=[0, 1]) # So, that TF can evaluation the shapes. return {"pixel_values": images, "labels": labels} def _input_fn( file_pattern: List[str], batch_size: int = 32, is_train: bool = False, ) -> tf.data.Dataset: INFO(f"Reading data from: {file_pattern}") dataset = tf.data.TFRecordDataset( tf.io.gfile.glob(file_pattern[0] + ".gz"), num_parallel_reads=tf.data.AUTOTUNE, compression_type="GZIP", ).map(_parse_tfr, num_parallel_calls=tf.data.AUTOTUNE) if is_train: dataset = dataset.shuffle(batch_size * 2) dataset = dataset.batch(batch_size) dataset = dataset.prefetch(tf.data.AUTOTUNE) dataset = dataset.map(_preprocess) return dataset def _build_model(): id2label={str(i): c for i, c in enumerate(_LABELS)} label2id={c: str(i) for i, c in enumerate(_LABELS)} model = TFViTForImageClassification.from_pretrained( "google/vit-base-patch16-224-in21k", num_labels=len(_LABELS), label2id=label2id, id2label=id2label, ) loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model.compile(optimizer='adam', loss=loss, metrics=["accuracy"]) return model def run_fn(fn_args: FnArgs): train_dataset = _input_fn( fn_args.train_files, is_train=True, batch_size=_TRAIN_BATCH_SIZE, ) eval_dataset = _input_fn( fn_args.eval_files, is_train=False, batch_size=_EVAL_BATCH_SIZE, ) model = _build_model() model.fit( train_dataset, steps_per_epoch=_TRAIN_LENGTH // _TRAIN_BATCH_SIZE, validation_data=eval_dataset, validation_steps=_EVAL_LENGTH // _TRAIN_BATCH_SIZE, epochs=_EPOCHS, ) model.save( fn_args.serving_model_dir, save_format="tf", signatures=_model_exporter(model) )trainer = Trainer( run_fn=model_fn, examples=example_gen.outputs["examples"], )context.run(trainer)
deep-diver/mlops-hf-tf-vision-models
notebooks/intermediate.ipynb
data_path = "gs://beans-lowres/tfrecords" local_data_path = "data" model_file = "modules/model.py" model_fn = "modules.model.run_fn" proprocessing_file = "modules/preprocessing.py" preprocessing_fn = "modules.preprocessing.preprocessing_fn" schema_file = "schema.pbtxt"import tfx tfx.__version__from tfx import v1 as tfx from tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext from tfx.components import ImportExampleGen from tfx.components import StatisticsGen from tfx.components import SchemaGen from tfx.components import ExampleValidator from tfx.components import Transform from tfx.components import Trainer from tfx.components import Pusher from tfx.proto import example_gen_pb2context = InteractiveContext()input_config = example_gen_pb2.Input( splits=[ example_gen_pb2.Input.Split(name="train", pattern="train-*.tfrec"), example_gen_pb2.Input.Split(name="eval", pattern="val-*.tfrec"), ] ) example_gen = ImportExampleGen( input_base=local_data_path, input_config=input_config ) context.run(example_gen)statistics_gen = StatisticsGen(examples=example_gen.outputs['examples']) context.run(statistics_gen)context.show(statistics_gen.outputs['statistics'])%%writefile {schema_file} feature { name: "image" type: FLOAT presence { min_fraction: 1.0 } float_domain { min: 0 max: 255 } shape { dim { size: 256 } dim { size: 256 } dim { size: 3 } } } feature { name: "image_shape" type: INT presence { min_fraction: 1.0 } shape { dim { size: 3 } } } feature { name: "label" type: INT presence { min_fraction: 1.0 } int_domain { min: 0 max: 2 } shape { dim { size: 1 } } }schema_gen = tfx.components.ImportSchemaGen( schema_file=schema_file) context.run(schema_gen)example_validator = ExampleValidator( statistics=statistics_gen.outputs['statistics'], schema=schema_gen.outputs['schema'] ) context.run(example_validator)context.show(example_validator.outputs['anomalies'])%%writefile {proprocessing_file} import tensorflow as tf IMAGE_KEY = "image" LABEL_KEY = "label" MODEL_INPUT_IMAGE_KEY = "pixel_values" MODEL_INPUT_LABEL_KEY = "labels" INPUT_IMG_SIZE = 224 def preprocessing_fn(inputs): """tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. """ # print(inputs) outputs = {} inputs[IMAGE_KEY] = tf.image.resize( inputs[IMAGE_KEY], [INPUT_IMG_SIZE, INPUT_IMG_SIZE] ) inputs[IMAGE_KEY] = inputs[IMAGE_KEY] / 255.0 inputs[IMAGE_KEY] = tf.transpose(inputs[IMAGE_KEY], [0, 3, 1, 2]) outputs[MODEL_INPUT_IMAGE_KEY] = inputs[IMAGE_KEY] outputs[MODEL_INPUT_LABEL_KEY] = inputs[LABEL_KEY] return outputstransform = Transform( examples=example_gen.outputs['examples'], schema=schema_gen.outputs['schema'], preprocessing_fn=preprocessing_fn)context.run(transform)%%writefile {model_file} from typing import List, Dict, Tuple import absl import tensorflow as tf import tensorflow_transform as tft from transformers import ViTFeatureExtractor, TFViTForImageClassification from tfx.components.trainer.fn_args_utils import FnArgs from tfx_bsl.tfxio import dataset_options from tfx.components.trainer.fn_args_utils import DataAccessor feature_extractor = ViTFeatureExtractor() _TRAIN_LENGTH = 128 _EVAL_LENGTH = 128 _TRAIN_BATCH_SIZE = 8 _EVAL_BATCH_SIZE = 8 _EPOCHS = 1 _LABELS = ['angular_leaf_spot', 'bean_rust', 'healthy'] _CONCRETE_INPUT = "pixel_values" _MODEL_INPUT_LABEL_KEY = "labels" def INFO(text: str): absl.logging.info(text) def _normalize_img( img, mean=feature_extractor.image_mean, std=feature_extractor.image_std ): img = img / 255 mean = tf.constant(mean) std = tf.constant(std) return (img - mean) / std def _preprocess_serving(string_input): decoded_input = tf.io.decode_base64(string_input) decoded = tf.io.decode_jpeg(decoded_input, channels=3) resized = tf.image.resize(decoded, size=(224, 224)) normalized = _normalize_img(resized) normalized = tf.transpose( normalized, (2, 0, 1) ) # Since HF models are channel-first. return normalized @tf.function(input_signature=[tf.TensorSpec([None], tf.string)]) def _preprocess_fn(string_input): decoded_images = tf.map_fn( _preprocess_serving, string_input, dtype=tf.float32, back_prop=False ) return {_CONCRETE_INPUT: decoded_images} def _model_exporter(model: tf.keras.Model): m_call = tf.function(model.call).get_concrete_function( tf.TensorSpec( shape=[None, 3, 224, 224], dtype=tf.float32, name=_CONCRETE_INPUT ) ) @tf.function(input_signature=[tf.TensorSpec([None], tf.string)]) def serving_fn(string_input): labels = tf.constant(list(model.config.id2label.values()), dtype=tf.string) images = _preprocess_fn(string_input) predictions = m_call(**images) indices = tf.argmax(predictions.logits, axis=1) pred_source = tf.gather(params=labels, indices=indices) probs = tf.nn.softmax(predictions.logits, axis=1) pred_confidence = tf.reduce_max(probs, axis=1) return {"label": pred_source, "confidence": pred_confidence} return serving_fn def _input_fn( file_pattern: List[str], data_accessor: DataAccessor, tf_transform_output: tft.TFTransformOutput, is_train: bool = False, batch_size: int = 32, ) -> tf.data.Dataset: INFO(f"Reading data from: {file_pattern}") dataset = data_accessor.tf_dataset_factory( file_pattern, dataset_options.TensorFlowDatasetOptions( batch_size=batch_size, label_key=_MODEL_INPUT_LABEL_KEY ), tf_transform_output.transformed_metadata.schema, ) return dataset def _build_model(): id2label={str(i): c for i, c in enumerate(_LABELS)} label2id={c: str(i) for i, c in enumerate(_LABELS)} model = TFViTForImageClassification.from_pretrained( "google/vit-base-patch16-224-in21k", num_labels=len(_LABELS), label2id=label2id, id2label=id2label, ) model.layers[0].trainable=False loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model.compile(optimizer='adam', loss=loss, metrics=["accuracy"]) return model def run_fn(fn_args: FnArgs): tf_transform_output = tft.TFTransformOutput(fn_args.transform_output) train_dataset = _input_fn( fn_args.train_files, fn_args.data_accessor, tf_transform_output, is_train=True, batch_size=_TRAIN_BATCH_SIZE, ) eval_dataset = _input_fn( fn_args.eval_files, fn_args.data_accessor, tf_transform_output, is_train=False, batch_size=_EVAL_BATCH_SIZE, ) model = _build_model() model.fit( train_dataset, steps_per_epoch=_TRAIN_LENGTH // _TRAIN_BATCH_SIZE, validation_data=eval_dataset, validation_steps=_EVAL_LENGTH // _EVAL_BATCH_SIZE, epochs=_EPOCHS, ) model.save( fn_args.serving_model_dir, save_format="tf", signatures=_model_exporter(model) )trainer = Trainer( run_fn=model_fn, transformed_examples=transform.outputs["transformed_examples"], transform_graph=transform.outputs["transform_graph"], schema=schema_gen.outputs["schema"], )context.run(trainer)
deep-diver/mlops-hf-tf-vision-models
notebooks/parse_tfrecord.ipynb
import tensorflow as tfGCS_PATH_FULL_RESOUTION = "gs://beans-fullres/tfrecords" GCS_PATH_LOW_RESOLUTION = "gs://beans-lowres/tfrecords"BATCH_SIZE = 4 AUTO = tf.data.AUTOTUNEdef parse_tfr(proto): feature_description = { "image": tf.io.VarLenFeature(tf.float32), "image_shape": tf.io.VarLenFeature(tf.int64), "label": tf.io.VarLenFeature(tf.int64), } rec = tf.io.parse_single_example(proto, feature_description) image_shape = tf.sparse.to_dense(rec["image_shape"]) image = tf.reshape(tf.sparse.to_dense(rec["image"]), image_shape) label = tf.sparse.to_dense(rec["label"]) return {"pixel_values": image, "label": label} def prepare_dataset(GCS_PATH=GCS_PATH_FULL_RESOUTION, split="train", batch_size=BATCH_SIZE): if split not in ["train", "val"]: raise ValueError( "Invalid split provided. Supports splits are: `train` and `val`." ) dataset = tf.data.TFRecordDataset( [filename for filename in tf.io.gfile.glob(f"{GCS_PATH}/{split}-*")], num_parallel_reads=AUTO, ).map(parse_tfr, num_parallel_calls=AUTO) if split == "train": dataset = dataset.shuffle(batch_size * 2) dataset = dataset.batch(batch_size) dataset = dataset.prefetch(AUTO) return datasettrain_dataset = prepare_dataset() val_dataset = prepare_dataset(split="val")for batch in train_dataset.take(1): print(batch["pixel_values"].shape, batch["label"].shape)for batch in val_dataset.take(1): print(batch["pixel_values"].shape, batch["label"].shape)train_dataset = prepare_dataset(GCS_PATH_LOW_RESOLUTION) val_dataset = prepare_dataset(GCS_PATH_LOW_RESOLUTION, split="val")for batch in train_dataset.take(1): print(batch["pixel_values"].shape, batch["label"].shape)for batch in val_dataset.take(1): print(batch["pixel_values"].shape, batch["label"].shape)
deep-diver/mlops-hf-tf-vision-models
advanced_part1/modules/ViT.py
import tensorflow as tf from transformers import TFViTForImageClassification from .common import LABELS from .common import PRETRAIN_CHECKPOINT from .utils import INFO def build_model(): id2label = {str(i): c for i, c in enumerate(LABELS)} label2id = {c: str(i) for i, c in enumerate(LABELS)} model = TFViTForImageClassification.from_pretrained( PRETRAIN_CHECKPOINT, num_labels=len(LABELS), label2id=label2id, id2label=id2label, ) model.layers[0].trainable = False loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model.compile(optimizer="adam", loss=loss, metrics=["accuracy"]) INFO(model.summary()) return model
deep-diver/mlops-hf-tf-vision-models
advanced_part1/modules/common.py
IMAGE_TFREC_KEY = "image" IMAGE_SHAPE_TFREC_KEY = "image_shape" LABEL_TFREC_KEY = "label" MODEL_INPUT_IMAGE_KEY = "pixel_values" MODEL_INPUT_LABEL_KEY = "labels" IMAGE_MODEL_KEY = "pixel_values" LABEL_MODEL_KEY = "labels" CONCRETE_INPUT = "pixel_values" PRETRAIN_CHECKPOINT = "google/vit-base-patch16-224-in21k" LABELS = ["angular_leaf_spot", "bean_rust", "healthy"]
deep-diver/mlops-hf-tf-vision-models
advanced_part1/modules/hyperparams.py
EPOCHS = 1 BATCH_SIZE = 32 TRAIN_BATCH_SIZE = 32 EVAL_BATCH_SIZE = 32 TRAIN_LENGTH = 1034 EVAL_LENGTH = 128 INPUT_IMG_SIZE = 224
deep-diver/mlops-hf-tf-vision-models
advanced_part1/modules/preprocessing.py
import tensorflow as tf from .common import IMAGE_TFREC_KEY, LABEL_TFREC_KEY from .common import IMAGE_MODEL_KEY, LABEL_MODEL_KEY from .hyperparams import INPUT_IMG_SIZE def preprocessing_fn(inputs): """tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. """ # print(inputs) outputs = {} inputs[IMAGE_TFREC_KEY] = tf.image.resize( inputs[IMAGE_TFREC_KEY], [INPUT_IMG_SIZE, INPUT_IMG_SIZE] ) inputs[IMAGE_TFREC_KEY] = inputs[IMAGE_TFREC_KEY] / 255.0 inputs[IMAGE_TFREC_KEY] = tf.transpose(inputs[IMAGE_TFREC_KEY], [0, 3, 1, 2]) outputs[IMAGE_MODEL_KEY] = inputs[IMAGE_TFREC_KEY] outputs[LABEL_MODEL_KEY] = inputs[LABEL_TFREC_KEY] return outputs
deep-diver/mlops-hf-tf-vision-models
advanced_part1/modules/signatures.py
from typing import Dict import tensorflow as tf import tensorflow_transform as tft from transformers import ViTFeatureExtractor from .common import PRETRAIN_CHECKPOINT from .common import CONCRETE_INPUT from .common import LABEL_MODEL_KEY feature_extractor = ViTFeatureExtractor.from_pretrained(PRETRAIN_CHECKPOINT) def _normalize_img( img, mean=feature_extractor.image_mean, std=feature_extractor.image_std ): img = img / 255 mean = tf.constant(mean) std = tf.constant(std) return (img - mean) / std def _preprocess_serving(string_input): decoded_input = tf.io.decode_base64(string_input) decoded = tf.io.decode_jpeg(decoded_input, channels=3) resized = tf.image.resize(decoded, size=(224, 224)) normalized = _normalize_img(resized) normalized = tf.transpose( normalized, (2, 0, 1) ) # Since HF models are channel-first. return normalized @tf.function(input_signature=[tf.TensorSpec([None], tf.string)]) def _preprocess_fn(string_input): decoded_images = tf.map_fn( _preprocess_serving, string_input, dtype=tf.float32, back_prop=False ) return {CONCRETE_INPUT: decoded_images} def model_exporter(model: tf.keras.Model): m_call = tf.function(model.call).get_concrete_function( tf.TensorSpec(shape=[None, 3, 224, 224], dtype=tf.float32, name=CONCRETE_INPUT) ) @tf.function(input_signature=[tf.TensorSpec([None], tf.string)]) def serving_fn(string_input): labels = tf.constant(list(model.config.id2label.values()), dtype=tf.string) images = _preprocess_fn(string_input) predictions = m_call(**images) indices = tf.argmax(predictions.logits, axis=1) pred_source = tf.gather(params=labels, indices=indices) probs = tf.nn.softmax(predictions.logits, axis=1) pred_confidence = tf.reduce_max(probs, axis=1) return {"label": pred_source, "confidence": pred_confidence} return serving_fn def transform_features_signature( model: tf.keras.Model, tf_transform_output: tft.TFTransformOutput ): """ transform_features_signature simply returns a function that transforms any data of the type of tf.Example which is denoted as the type of sta ndard_artifacts.Examples in TFX. The purpose of this function is to ap ply Transform Graph obtained from Transform component to the data prod uced by ImportExampleGen. This function will be used in the Evaluator component, so the raw evaluation inputs from ImportExampleGen can be a pporiately transformed that the model could understand. """ # basically, what Transform component emits is a SavedModel that knows # how to transform data. transform_features_layer() simply returns the # layer from the Transform. model.tft_layer = tf_transform_output.transform_features_layer() @tf.function( input_signature=[tf.TensorSpec(shape=[None], dtype=tf.string, name="examples")] ) def serve_tf_examples_fn(serialized_tf_examples): """ raw_feature_spec returns a set of feature maps(dict) for the input TFRecords based on the knowledge that Transform component has lear ned(learn doesn't mean training here). By using this information, the raw data from ImportExampleGen could be parsed with tf.io.parse _example utility function. Then, it is passed to the model.tft_layer, so the final output we get is the transformed data of the raw input. """ feature_spec = tf_transform_output.raw_feature_spec() parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec) transformed_features = model.tft_layer(parsed_features) return transformed_features return serve_tf_examples_fn def tf_examples_serving_signature(model, tf_transform_output): """ tf_examples_serving_signature simply returns a function that performs data transformation(preprocessing) and model prediction in a sequential manner. How data transformation is done is idential to the process of transform_features_signature function. """ model.tft_layer = tf_transform_output.transform_features_layer() @tf.function( input_signature=[tf.TensorSpec(shape=[None], dtype=tf.string, name="examples")] ) def serve_tf_examples_fn( serialized_tf_example: tf.Tensor, ) -> Dict[str, tf.Tensor]: raw_feature_spec = tf_transform_output.raw_feature_spec() raw_features = tf.io.parse_example(serialized_tf_example, raw_feature_spec) transformed_features = model.tft_layer(raw_features) logits = model(transformed_features).logits return {LABEL_MODEL_KEY: logits} return serve_tf_examples_fn
deep-diver/mlops-hf-tf-vision-models
advanced_part1/modules/train.py
import tensorflow_transform as tft from tfx.components.trainer.fn_args_utils import FnArgs from .train_data import input_fn from .ViT import build_model from .signatures import ( model_exporter, transform_features_signature, tf_examples_serving_signature, ) from .hyperparams import TRAIN_BATCH_SIZE, EVAL_BATCH_SIZE from .hyperparams import TRAIN_LENGTH, EVAL_LENGTH from .hyperparams import EPOCHS def run_fn(fn_args: FnArgs): tf_transform_output = tft.TFTransformOutput(fn_args.transform_output) train_dataset = input_fn( fn_args.train_files, fn_args.data_accessor, tf_transform_output, is_train=True, batch_size=TRAIN_BATCH_SIZE, ) eval_dataset = input_fn( fn_args.eval_files, fn_args.data_accessor, tf_transform_output, is_train=False, batch_size=EVAL_BATCH_SIZE, ) model = build_model() model.fit( train_dataset, steps_per_epoch=TRAIN_LENGTH // TRAIN_BATCH_SIZE, validation_data=eval_dataset, validation_steps=EVAL_LENGTH // TRAIN_BATCH_SIZE, epochs=EPOCHS, ) model.save( fn_args.serving_model_dir, save_format="tf", signatures={ "serving_default": model_exporter(model), "transform_features": transform_features_signature( model, tf_transform_output ), "from_examples": tf_examples_serving_signature(model, tf_transform_output), }, )
deep-diver/mlops-hf-tf-vision-models
advanced_part1/modules/train_data.py
from typing import List import tensorflow as tf import tensorflow_transform as tft from tfx_bsl.tfxio import dataset_options from tfx.components.trainer.fn_args_utils import DataAccessor from .utils import INFO from .common import LABEL_MODEL_KEY from .hyperparams import BATCH_SIZE def input_fn( file_pattern: List[str], data_accessor: DataAccessor, tf_transform_output: tft.TFTransformOutput, is_train: bool = False, batch_size: int = BATCH_SIZE, ) -> tf.data.Dataset: INFO(f"Reading data from: {file_pattern}") dataset = data_accessor.tf_dataset_factory( file_pattern, dataset_options.TensorFlowDatasetOptions( batch_size=batch_size, label_key=LABEL_MODEL_KEY ), tf_transform_output.transformed_metadata.schema, ) return dataset
deep-diver/mlops-hf-tf-vision-models
advanced_part1/modules/utils.py
import absl def INFO(text: str): absl.logging.info(text)
deep-diver/mlops-hf-tf-vision-models
advanced_part1/pipeline/configs.py
import os import tensorflow_model_analysis as tfma import tfx.extensions.google_cloud_ai_platform.constants as vertex_const import tfx.extensions.google_cloud_ai_platform.trainer.executor as vertex_training_const PIPELINE_NAME = "vit-e2e-pipeline-advanced-part1" try: import google.auth # pylint: disable=g-import-not-at-top # pytype: disable=import-error try: _, GOOGLE_CLOUD_PROJECT = google.auth.default() except google.auth.exceptions.DefaultCredentialsError: GOOGLE_CLOUD_PROJECT = "gcp-ml-172005" except ImportError: GOOGLE_CLOUD_PROJECT = "gcp-ml-172005" GOOGLE_CLOUD_REGION = "us-central1" GCS_BUCKET_NAME = GOOGLE_CLOUD_PROJECT + "-complete-mlops" PIPELINE_IMAGE = f"gcr.io/{GOOGLE_CLOUD_PROJECT}/{PIPELINE_NAME}" OUTPUT_DIR = os.path.join("gs://", GCS_BUCKET_NAME) PIPELINE_ROOT = os.path.join(OUTPUT_DIR, "tfx_pipeline_output", PIPELINE_NAME) DATA_PATH = "gs://beans-lowres/tfrecords/" SCHEMA_PATH = "pipeline/schema.pbtxt" TRAINING_FN = "modules.train.run_fn" PREPROCESSING_FN = "modules.preprocessing.preprocessing_fn" EXAMPLE_GEN_BEAM_ARGS = None TRANSFORM_BEAM_ARGS = None EVAL_CONFIGS = tfma.EvalConfig( model_specs=[ tfma.ModelSpec( signature_name="from_examples", preprocessing_function_names=["transform_features"], label_key="labels", prediction_key="labels", ) ], slicing_specs=[tfma.SlicingSpec()], metrics_specs=[ tfma.MetricsSpec( metrics=[ tfma.MetricConfig( class_name="SparseCategoricalAccuracy", threshold=tfma.MetricThreshold( value_threshold=tfma.GenericValueThreshold( lower_bound={"value": 0.55} ), # Change threshold will be ignored if there is no # baseline model resolved from MLMD (first run). change_threshold=tfma.GenericChangeThreshold( direction=tfma.MetricDirection.HIGHER_IS_BETTER, absolute={"value": -1e-3}, ), ), ) ] ) ], ) GCP_AI_PLATFORM_TRAINING_ARGS = { vertex_const.ENABLE_VERTEX_KEY: True, vertex_const.VERTEX_REGION_KEY: GOOGLE_CLOUD_REGION, vertex_training_const.TRAINING_ARGS_KEY: { "project": GOOGLE_CLOUD_PROJECT, "worker_pool_specs": [ { "machine_spec": { "machine_type": "n1-standard-4", "accelerator_type": "NVIDIA_TESLA_K80", "accelerator_count": 1, }, "replica_count": 1, "container_spec": { "image_uri": PIPELINE_IMAGE, }, } ], }, "use_gpu": True, } fullres_data = os.environ.get("FULL_RES_DATA", "false") if fullres_data.lower() == "true": DATA_PATH = "gs://beans-fullres/tfrecords/" DATAFLOW_SERVICE_ACCOUNT = "csp-gde-dataflow@gcp-ml-172005.iam.gserviceaccount.com" DATAFLOW_MACHINE_TYPE = "n1-standard-4" DATAFLOW_MAX_WORKERS = 4 DATAFLOW_DISK_SIZE_GB = 100 EXAMPLE_GEN_BEAM_ARGS = [ "--runner=DataflowRunner", "--project=" + GOOGLE_CLOUD_PROJECT, "--region=" + GOOGLE_CLOUD_REGION, "--service_account_email=" + DATAFLOW_SERVICE_ACCOUNT, "--machine_type=" + DATAFLOW_MACHINE_TYPE, "--experiments=use_runner_v2", "--max_num_workers=" + str(DATAFLOW_MAX_WORKERS), "--disk_size_gb=" + str(DATAFLOW_DISK_SIZE_GB), ] TRANSFORM_BEAM_ARGS = [ "--runner=DataflowRunner", "--project=" + GOOGLE_CLOUD_PROJECT, "--region=" + GOOGLE_CLOUD_REGION, "--service_account_email=" + DATAFLOW_SERVICE_ACCOUNT, "--machine_type=" + DATAFLOW_MACHINE_TYPE, "--experiments=use_runner_v2", "--max_num_workers=" + str(DATAFLOW_MAX_WORKERS), "--disk_size_gb=" + str(DATAFLOW_DISK_SIZE_GB), "--worker_harness_container_image=" + PIPELINE_IMAGE, ] GCP_AI_PLATFORM_TRAINING_ARGS[vertex_training_const.TRAINING_ARGS_KEY][ "worker_pool_specs" ] = [ { "machine_spec": { "machine_type": "n1-standard-8", "accelerator_type": "NVIDIA_TESLA_V100", "accelerator_count": 1, }, "replica_count": 1, "container_spec": { "image_uri": PIPELINE_IMAGE, }, } ] GCP_AI_PLATFORM_SERVING_ARGS = { vertex_const.ENABLE_VERTEX_KEY: True, vertex_const.VERTEX_REGION_KEY: GOOGLE_CLOUD_REGION, vertex_const.VERTEX_CONTAINER_IMAGE_URI_KEY: "us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-8:latest", vertex_const.SERVING_ARGS_KEY: { "project_id": GOOGLE_CLOUD_PROJECT, "deployed_model_display_name": PIPELINE_NAME.replace("-", "_"), "endpoint_name": "prediction-" + PIPELINE_NAME.replace("-", "_"), "traffic_split": {"0": 100}, "machine_type": "n1-standard-4", "min_replica_count": 1, "max_replica_count": 1, }, }
deep-diver/mlops-hf-tf-vision-models
advanced_part1/pipeline/kubeflow_pipeline.py
from typing import Any, Dict, List, Optional, Text import tensorflow_model_analysis as tfma from tfx import v1 as tfx from ml_metadata.proto import metadata_store_pb2 from tfx.proto import example_gen_pb2 from tfx.components import ImportExampleGen from tfx.components import StatisticsGen from tfx.components import ExampleValidator from tfx.components import Transform from tfx.components import Evaluator from tfx.extensions.google_cloud_ai_platform.trainer.component import ( Trainer as VertexTrainer, ) from tfx.extensions.google_cloud_ai_platform.pusher.component import ( Pusher as VertexPusher, ) from tfx.orchestration import pipeline from tfx.proto import example_gen_pb2 from tfx.types import Channel from tfx.types.standard_artifacts import Model from tfx.types.standard_artifacts import ModelBlessing from tfx.dsl.components.common import resolver from tfx.dsl.experimental.latest_blessed_model_resolver import ( LatestBlessedModelResolver, ) def create_pipeline( pipeline_name: Text, pipeline_root: Text, data_path: Text, schema_path: Text, modules: Dict[Text, Text], eval_configs: tfma.EvalConfig, metadata_connection_config: Optional[metadata_store_pb2.ConnectionConfig] = None, ai_platform_training_args: Optional[Dict[Text, Text]] = None, ai_platform_serving_args: Optional[Dict[Text, Any]] = None, example_gen_beam_args: Optional[List] = None, transform_beam_args: Optional[List] = None, ) -> tfx.dsl.Pipeline: components = [] input_config = example_gen_pb2.Input( splits=[ example_gen_pb2.Input.Split(name="train", pattern="train-*.tfrec"), example_gen_pb2.Input.Split(name="eval", pattern="val-*.tfrec"), ] ) example_gen = ImportExampleGen(input_base=data_path, input_config=input_config) if example_gen_beam_args is not None: example_gen.with_beam_pipeline_args(example_gen_beam_args) components.append(example_gen) statistics_gen = StatisticsGen(examples=example_gen.outputs["examples"]) components.append(statistics_gen) schema_gen = tfx.components.ImportSchemaGen(schema_file=schema_path) components.append(schema_gen) example_validator = ExampleValidator( statistics=statistics_gen.outputs["statistics"], schema=schema_gen.outputs["schema"], ) components.append(example_validator) transform_args = { "examples": example_gen.outputs["examples"], "schema": schema_gen.outputs["schema"], "preprocessing_fn": modules["preprocessing_fn"], } transform = Transform(**transform_args) if transform_beam_args is not None: transform.with_beam_pipeline_args(transform_beam_args) components.append(transform) trainer_args = { "run_fn": modules["training_fn"], "transformed_examples": transform.outputs["transformed_examples"], "transform_graph": transform.outputs["transform_graph"], "schema": schema_gen.outputs["schema"], "custom_config": ai_platform_training_args, } trainer = VertexTrainer(**trainer_args) components.append(trainer) model_resolver = resolver.Resolver( strategy_class=LatestBlessedModelResolver, model=Channel(type=Model), model_blessing=Channel(type=ModelBlessing), ).with_id("latest_blessed_model_resolver") components.append(model_resolver) evaluator = Evaluator( examples=example_gen.outputs["examples"], model=trainer.outputs["model"], baseline_model=model_resolver.outputs["model"], eval_config=eval_configs, ) components.append(evaluator) pusher_args = { "model": trainer.outputs["model"], "model_blessing": evaluator.outputs["blessing"], "custom_config": ai_platform_serving_args, } pusher = VertexPusher(**pusher_args) # pylint: disable=unused-variable components.append(pusher) return pipeline.Pipeline( pipeline_name=pipeline_name, pipeline_root=pipeline_root, components=components, enable_cache=True, metadata_connection_config=metadata_connection_config, )
deep-diver/mlops-hf-tf-vision-models
advanced_part1/pipeline/local_pipeline.py
from typing import Dict, Optional, Text import tensorflow_model_analysis as tfma from tfx import v1 as tfx from ml_metadata.proto import metadata_store_pb2 from tfx.proto import example_gen_pb2 from tfx.components import ImportExampleGen from tfx.components import StatisticsGen from tfx.components import ExampleValidator from tfx.components import Transform from tfx.components import Trainer from tfx.components import Evaluator from tfx.components import Pusher from tfx.orchestration import pipeline from tfx.proto import example_gen_pb2 from tfx.types import Channel from tfx.types.standard_artifacts import Model from tfx.types.standard_artifacts import ModelBlessing from tfx.dsl.components.common import resolver from tfx.dsl.experimental.latest_blessed_model_resolver import ( LatestBlessedModelResolver, ) def create_pipeline( pipeline_name: Text, pipeline_root: Text, data_path: Text, schema_path: Text, modules: Dict[Text, Text], eval_configs: tfma.EvalConfig, serving_model_dir: Text, metadata_connection_config: Optional[metadata_store_pb2.ConnectionConfig] = None, ) -> tfx.dsl.Pipeline: components = [] input_config = example_gen_pb2.Input( splits=[ example_gen_pb2.Input.Split(name="train", pattern="train-00-*.tfrec"), example_gen_pb2.Input.Split(name="eval", pattern="val-00-*.tfrec"), ] ) example_gen = ImportExampleGen(input_base=data_path, input_config=input_config) components.append(example_gen) statistics_gen = StatisticsGen(examples=example_gen.outputs["examples"]) components.append(statistics_gen) schema_gen = tfx.components.ImportSchemaGen(schema_file=schema_path) components.append(schema_gen) example_validator = ExampleValidator( statistics=statistics_gen.outputs["statistics"], schema=schema_gen.outputs["schema"], ) components.append(example_validator) transform_args = { "examples": example_gen.outputs["examples"], "schema": schema_gen.outputs["schema"], "preprocessing_fn": modules["preprocessing_fn"], } transform = Transform(**transform_args) components.append(transform) trainer_args = { "run_fn": modules["training_fn"], "transformed_examples": transform.outputs["transformed_examples"], "transform_graph": transform.outputs["transform_graph"], "schema": schema_gen.outputs["schema"], } trainer = Trainer(**trainer_args) components.append(trainer) model_resolver = resolver.Resolver( strategy_class=LatestBlessedModelResolver, model=Channel(type=Model), model_blessing=Channel(type=ModelBlessing), ).with_id("latest_blessed_model_resolver") components.append(model_resolver) evaluator = Evaluator( examples=example_gen.outputs["examples"], model=trainer.outputs["model"], baseline_model=model_resolver.outputs["model"], eval_config=eval_configs, ) components.append(evaluator) pusher_args = { "model": trainer.outputs["model"], "model_blessing": evaluator.outputs["blessing"], "push_destination": tfx.proto.PushDestination( filesystem=tfx.proto.PushDestination.Filesystem( base_directory=serving_model_dir ) ), } pusher = Pusher(**pusher_args) # pylint: disable=unused-variable components.append(pusher) return pipeline.Pipeline( pipeline_name=pipeline_name, pipeline_root=pipeline_root, components=components, enable_cache=False, metadata_connection_config=metadata_connection_config, )
deep-diver/mlops-hf-tf-vision-models
advanced_part2/modules/ViT.py
import tensorflow as tf import keras_tuner from transformers import TFViTForImageClassification from .common import LABELS from .common import PRETRAIN_CHECKPOINT from .utils import INFO def build_model(hparams: keras_tuner.HyperParameters): id2label = {str(i): c for i, c in enumerate(LABELS)} label2id = {c: str(i) for i, c in enumerate(LABELS)} model = TFViTForImageClassification.from_pretrained( PRETRAIN_CHECKPOINT, num_labels=len(LABELS), label2id=label2id, id2label=id2label, ) model.layers[0].trainable = False loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) optimizer = tf.keras.optimizers.Adam(learning_rate=hparams.get("learning_rate")) model.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"]) INFO(model.summary()) return model
deep-diver/mlops-hf-tf-vision-models
advanced_part2/modules/common.py
IMAGE_TFREC_KEY = "image" IMAGE_SHAPE_TFREC_KEY = "image_shape" LABEL_TFREC_KEY = "label" MODEL_INPUT_IMAGE_KEY = "pixel_values" MODEL_INPUT_LABEL_KEY = "labels" IMAGE_MODEL_KEY = "pixel_values" LABEL_MODEL_KEY = "labels" CONCRETE_INPUT = "pixel_values" PRETRAIN_CHECKPOINT = "google/vit-base-patch16-224-in21k" LABELS = ["angular_leaf_spot", "bean_rust", "healthy"]
deep-diver/mlops-hf-tf-vision-models
advanced_part2/modules/hyperparams.py
import keras_tuner EPOCHS = 10 BATCH_SIZE = 32 TRAIN_BATCH_SIZE = 32 EVAL_BATCH_SIZE = 32 TRAIN_LENGTH = 1034 EVAL_LENGTH = 128 INPUT_IMG_SIZE = 224 def get_hyperparameters(hyperparameters) -> keras_tuner.HyperParameters: hp_set = keras_tuner.HyperParameters() for hp in hyperparameters: hp_set.Choice( hp, hyperparameters[hp]["values"], default=hyperparameters[hp]["default"] ) return hp_set
deep-diver/mlops-hf-tf-vision-models
advanced_part2/modules/preprocessing.py
import tensorflow as tf from .common import IMAGE_TFREC_KEY, LABEL_TFREC_KEY from .common import IMAGE_MODEL_KEY, LABEL_MODEL_KEY from .hyperparams import INPUT_IMG_SIZE def preprocessing_fn(inputs): """tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. """ # print(inputs) outputs = {} inputs[IMAGE_TFREC_KEY] = tf.image.resize( inputs[IMAGE_TFREC_KEY], [INPUT_IMG_SIZE, INPUT_IMG_SIZE] ) inputs[IMAGE_TFREC_KEY] = inputs[IMAGE_TFREC_KEY] / 255.0 inputs[IMAGE_TFREC_KEY] = tf.transpose(inputs[IMAGE_TFREC_KEY], [0, 3, 1, 2]) outputs[IMAGE_MODEL_KEY] = inputs[IMAGE_TFREC_KEY] outputs[LABEL_MODEL_KEY] = inputs[LABEL_TFREC_KEY] return outputs
deep-diver/mlops-hf-tf-vision-models
advanced_part2/modules/signatures.py
from typing import Dict import tensorflow as tf import tensorflow_transform as tft from transformers import ViTFeatureExtractor from .common import PRETRAIN_CHECKPOINT from .common import CONCRETE_INPUT from .common import LABEL_MODEL_KEY feature_extractor = ViTFeatureExtractor.from_pretrained(PRETRAIN_CHECKPOINT) def _normalize_img( img, mean=feature_extractor.image_mean, std=feature_extractor.image_std ): img = img / 255 mean = tf.constant(mean) std = tf.constant(std) return (img - mean) / std def _preprocess_serving(string_input): decoded_input = tf.io.decode_base64(string_input) decoded = tf.io.decode_jpeg(decoded_input, channels=3) resized = tf.image.resize(decoded, size=(224, 224)) normalized = _normalize_img(resized) normalized = tf.transpose( normalized, (2, 0, 1) ) # Since HF models are channel-first. return normalized @tf.function(input_signature=[tf.TensorSpec([None], tf.string)]) def _preprocess_fn(string_input): decoded_images = tf.map_fn( _preprocess_serving, string_input, dtype=tf.float32, back_prop=False ) return {CONCRETE_INPUT: decoded_images} def model_exporter(model: tf.keras.Model): m_call = tf.function(model.call).get_concrete_function( tf.TensorSpec(shape=[None, 3, 224, 224], dtype=tf.float32, name=CONCRETE_INPUT) ) @tf.function(input_signature=[tf.TensorSpec([None], tf.string)]) def serving_fn(string_input): labels = tf.constant(list(model.config.id2label.values()), dtype=tf.string) images = _preprocess_fn(string_input) predictions = m_call(**images) indices = tf.argmax(predictions.logits, axis=1) pred_source = tf.gather(params=labels, indices=indices) probs = tf.nn.softmax(predictions.logits, axis=1) pred_confidence = tf.reduce_max(probs, axis=1) return {"label": pred_source, "confidence": pred_confidence} return serving_fn def transform_features_signature( model: tf.keras.Model, tf_transform_output: tft.TFTransformOutput ): """ transform_features_signature simply returns a function that transforms any data of the type of tf.Example which is denoted as the type of sta ndard_artifacts.Examples in TFX. The purpose of this function is to ap ply Transform Graph obtained from Transform component to the data prod uced by ImportExampleGen. This function will be used in the Evaluator component, so the raw evaluation inputs from ImportExampleGen can be a pporiately transformed that the model could understand. """ # basically, what Transform component emits is a SavedModel that knows # how to transform data. transform_features_layer() simply returns the # layer from the Transform. model.tft_layer = tf_transform_output.transform_features_layer() @tf.function( input_signature=[tf.TensorSpec(shape=[None], dtype=tf.string, name="examples")] ) def serve_tf_examples_fn(serialized_tf_examples): """ raw_feature_spec returns a set of feature maps(dict) for the input TFRecords based on the knowledge that Transform component has lear ned(learn doesn't mean training here). By using this information, the raw data from ImportExampleGen could be parsed with tf.io.parse _example utility function. Then, it is passed to the model.tft_layer, so the final output we get is the transformed data of the raw input. """ feature_spec = tf_transform_output.raw_feature_spec() parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec) transformed_features = model.tft_layer(parsed_features) return transformed_features return serve_tf_examples_fn def tf_examples_serving_signature(model, tf_transform_output): """ tf_examples_serving_signature simply returns a function that performs data transformation(preprocessing) and model prediction in a sequential manner. How data transformation is done is idential to the process of transform_features_signature function. """ model.tft_layer = tf_transform_output.transform_features_layer() @tf.function( input_signature=[tf.TensorSpec(shape=[None], dtype=tf.string, name="examples")] ) def serve_tf_examples_fn( serialized_tf_example: tf.Tensor, ) -> Dict[str, tf.Tensor]: raw_feature_spec = tf_transform_output.raw_feature_spec() raw_features = tf.io.parse_example(serialized_tf_example, raw_feature_spec) transformed_features = model.tft_layer(raw_features) logits = model(transformed_features).logits return {LABEL_MODEL_KEY: logits} return serve_tf_examples_fn
deep-diver/mlops-hf-tf-vision-models
advanced_part2/modules/train.py
import keras_tuner import tensorflow_transform as tft from tfx.components.trainer.fn_args_utils import FnArgs from .train_data import input_fn from .ViT import build_model from .signatures import ( model_exporter, transform_features_signature, tf_examples_serving_signature, ) from .hyperparams import TRAIN_BATCH_SIZE, EVAL_BATCH_SIZE from .hyperparams import TRAIN_LENGTH, EVAL_LENGTH from .hyperparams import EPOCHS from .utils import INFO def run_fn(fn_args: FnArgs): custom_config = fn_args.custom_config epochs = EPOCHS if custom_config is not None: if "is_local" in custom_config: epochs = 1 tf_transform_output = tft.TFTransformOutput(fn_args.transform_output) train_dataset = input_fn( fn_args.train_files, fn_args.data_accessor, tf_transform_output, is_train=True, batch_size=TRAIN_BATCH_SIZE, ) eval_dataset = input_fn( fn_args.eval_files, fn_args.data_accessor, tf_transform_output, is_train=False, batch_size=EVAL_BATCH_SIZE, ) hparams = keras_tuner.HyperParameters.from_config(fn_args.hyperparameters) INFO(f"HyperParameters for training: {hparams.get_config()}") model = build_model(hparams) model.fit( train_dataset, steps_per_epoch=TRAIN_LENGTH // TRAIN_BATCH_SIZE, validation_data=eval_dataset, validation_steps=EVAL_LENGTH // TRAIN_BATCH_SIZE, epochs=epochs, ) model.save( fn_args.serving_model_dir, save_format="tf", signatures={ "serving_default": model_exporter(model), "transform_features": transform_features_signature( model, tf_transform_output ), "from_examples": tf_examples_serving_signature(model, tf_transform_output), }, )
deep-diver/mlops-hf-tf-vision-models
advanced_part2/modules/train_data.py
from typing import List import tensorflow as tf import tensorflow_transform as tft from tfx_bsl.tfxio import dataset_options from tfx.components.trainer.fn_args_utils import DataAccessor from .utils import INFO from .common import LABEL_MODEL_KEY from .hyperparams import BATCH_SIZE def input_fn( file_pattern: List[str], data_accessor: DataAccessor, tf_transform_output: tft.TFTransformOutput, is_train: bool = False, batch_size: int = BATCH_SIZE, ) -> tf.data.Dataset: INFO(f"Reading data from: {file_pattern}") dataset = data_accessor.tf_dataset_factory( file_pattern, dataset_options.TensorFlowDatasetOptions( batch_size=batch_size, label_key=LABEL_MODEL_KEY ), tf_transform_output.transformed_metadata.schema, ) return dataset
deep-diver/mlops-hf-tf-vision-models
advanced_part2/modules/tuning.py
import keras_tuner import tensorflow_transform as tft from tfx.components.trainer.fn_args_utils import FnArgs from tfx.v1.components import TunerFnResult from .train_data import input_fn from .ViT import build_model from .hyperparams import TRAIN_BATCH_SIZE, EVAL_BATCH_SIZE from .hyperparams import TRAIN_LENGTH, EVAL_LENGTH from .hyperparams import get_hyperparameters def tuner_fn(fn_args: FnArgs) -> TunerFnResult: hyperparameters = fn_args.custom_config["hyperparameters"] tuner = keras_tuner.RandomSearch( build_model, max_trials=6, hyperparameters=get_hyperparameters(hyperparameters), allow_new_entries=False, objective=keras_tuner.Objective("val_accuracy", "max"), directory=fn_args.working_dir, project_name="ViT MLOps Advanced Part2", ) tf_transform_output = tft.TFTransformOutput(fn_args.transform_graph_path) train_dataset = input_fn( fn_args.train_files, fn_args.data_accessor, tf_transform_output, is_train=True, batch_size=TRAIN_BATCH_SIZE, ) eval_dataset = input_fn( fn_args.eval_files, fn_args.data_accessor, tf_transform_output, is_train=False, batch_size=EVAL_BATCH_SIZE, ) return TunerFnResult( tuner=tuner, fit_kwargs={ "x": train_dataset, "validation_data": eval_dataset, "steps_per_epoch": TRAIN_LENGTH // TRAIN_BATCH_SIZE, "validation_steps": EVAL_LENGTH // EVAL_BATCH_SIZE, }, )
deep-diver/mlops-hf-tf-vision-models
advanced_part2/modules/utils.py
import absl def INFO(text: str): absl.logging.info(text)
deep-diver/mlops-hf-tf-vision-models
advanced_part2/pipeline/configs.py
import os import tensorflow_model_analysis as tfma import tfx.extensions.google_cloud_ai_platform.constants as vertex_const import tfx.extensions.google_cloud_ai_platform.trainer.executor as vertex_training_const import tfx.extensions.google_cloud_ai_platform.tuner.executor as vertex_tuner_const PIPELINE_NAME = "vit-e2e-pipeline-advanced-part2" try: import google.auth # pylint: disable=g-import-not-at-top # pytype: disable=import-error try: _, GOOGLE_CLOUD_PROJECT = google.auth.default() except google.auth.exceptions.DefaultCredentialsError: GOOGLE_CLOUD_PROJECT = "gcp-ml-172005" except ImportError: GOOGLE_CLOUD_PROJECT = "gcp-ml-172005" GOOGLE_CLOUD_REGION = "us-central1" GCS_BUCKET_NAME = GOOGLE_CLOUD_PROJECT + "-complete-mlops" PIPELINE_IMAGE = f"gcr.io/{GOOGLE_CLOUD_PROJECT}/{PIPELINE_NAME}" OUTPUT_DIR = os.path.join("gs://", GCS_BUCKET_NAME) PIPELINE_ROOT = os.path.join(OUTPUT_DIR, "tfx_pipeline_output", PIPELINE_NAME) DATA_PATH = "gs://beans-lowres/tfrecords/" SCHEMA_PATH = "pipeline/schema.pbtxt" TRAINING_FN = "modules.train.run_fn" TUNER_FN = "modules.tuning.tuner_fn" PREPROCESSING_FN = "modules.preprocessing.preprocessing_fn" EXAMPLE_GEN_BEAM_ARGS = None TRANSFORM_BEAM_ARGS = None TRAIN_BATCH_SIZE = 32 EVAL_BATCH_SIZE = 32 TRAIN_LENGTH = 1034 EVAL_LENGTH = 128 HYPER_PARAMETERS = { "learning_rate": {"values": [1e-3, 1e-2, 1e-1], "default": 1e-3}, } EVAL_CONFIGS = tfma.EvalConfig( model_specs=[ tfma.ModelSpec( signature_name="from_examples", preprocessing_function_names=["transform_features"], label_key="labels", prediction_key="labels", ) ], slicing_specs=[tfma.SlicingSpec()], metrics_specs=[ tfma.MetricsSpec( metrics=[ tfma.MetricConfig( class_name="SparseCategoricalAccuracy", threshold=tfma.MetricThreshold( value_threshold=tfma.GenericValueThreshold( lower_bound={"value": 0.55} ), # Change threshold will be ignored if there is no # baseline model resolved from MLMD (first run). change_threshold=tfma.GenericChangeThreshold( direction=tfma.MetricDirection.HIGHER_IS_BETTER, absolute={"value": -1e-3}, ), ), ) ] ) ], ) GCP_AI_PLATFORM_TRAINING_ARGS = { vertex_const.ENABLE_VERTEX_KEY: True, vertex_const.VERTEX_REGION_KEY: GOOGLE_CLOUD_REGION, vertex_training_const.TRAINING_ARGS_KEY: { "project": GOOGLE_CLOUD_PROJECT, "worker_pool_specs": [ { "machine_spec": { "machine_type": "n1-standard-4", "accelerator_type": "NVIDIA_TESLA_K80", "accelerator_count": 1, }, "replica_count": 1, "container_spec": { "image_uri": PIPELINE_IMAGE, }, } ], }, "use_gpu": True, } fullres_data = os.environ.get("FULL_RES_DATA", "false") if fullres_data.lower() == "true": DATA_PATH = "gs://beans-fullres/tfrecords/" DATAFLOW_SERVICE_ACCOUNT = "csp-gde-dataflow@gcp-ml-172005.iam.gserviceaccount.com" DATAFLOW_MACHINE_TYPE = "n1-standard-4" DATAFLOW_MAX_WORKERS = 4 DATAFLOW_DISK_SIZE_GB = 100 EXAMPLE_GEN_BEAM_ARGS = [ "--runner=DataflowRunner", "--project=" + GOOGLE_CLOUD_PROJECT, "--region=" + GOOGLE_CLOUD_REGION, "--service_account_email=" + DATAFLOW_SERVICE_ACCOUNT, "--machine_type=" + DATAFLOW_MACHINE_TYPE, "--experiments=use_runner_v2", "--max_num_workers=" + str(DATAFLOW_MAX_WORKERS), "--disk_size_gb=" + str(DATAFLOW_DISK_SIZE_GB), ] TRANSFORM_BEAM_ARGS = [ "--runner=DataflowRunner", "--project=" + GOOGLE_CLOUD_PROJECT, "--region=" + GOOGLE_CLOUD_REGION, "--service_account_email=" + DATAFLOW_SERVICE_ACCOUNT, "--machine_type=" + DATAFLOW_MACHINE_TYPE, "--experiments=use_runner_v2", "--max_num_workers=" + str(DATAFLOW_MAX_WORKERS), "--disk_size_gb=" + str(DATAFLOW_DISK_SIZE_GB), "--worker_harness_container_image=" + PIPELINE_IMAGE, ] GCP_AI_PLATFORM_TRAINING_ARGS[vertex_training_const.TRAINING_ARGS_KEY][ "worker_pool_specs" ] = [ { "machine_spec": { "machine_type": "n1-standard-8", "accelerator_type": "NVIDIA_TESLA_V100", "accelerator_count": 1, }, "replica_count": 1, "container_spec": { "image_uri": PIPELINE_IMAGE, }, } ] NUM_PARALLEL_TRIALS = 3 GCP_AI_PLATFORM_TUNER_ARGS = { vertex_const.ENABLE_VERTEX_KEY: True, vertex_const.VERTEX_REGION_KEY: GOOGLE_CLOUD_REGION, vertex_tuner_const.TUNING_ARGS_KEY: { "project": GOOGLE_CLOUD_PROJECT, "job_spec": { "worker_pool_specs": [ { "machine_spec": { "machine_type": "n1-standard-4", "accelerator_type": "NVIDIA_TESLA_K80", "accelerator_count": 1, }, "replica_count": 1, "container_spec": { "image_uri": PIPELINE_IMAGE, }, } ], }, }, vertex_tuner_const.REMOTE_TRIALS_WORKING_DIR_KEY: os.path.join( PIPELINE_ROOT, "trials" ), "use_gpu": True, "hyperparameters": HYPER_PARAMETERS, } GCP_AI_PLATFORM_SERVING_ARGS = { vertex_const.ENABLE_VERTEX_KEY: True, vertex_const.VERTEX_REGION_KEY: GOOGLE_CLOUD_REGION, vertex_const.VERTEX_CONTAINER_IMAGE_URI_KEY: "us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-8:latest", vertex_const.SERVING_ARGS_KEY: { "project_id": GOOGLE_CLOUD_PROJECT, "deployed_model_display_name": PIPELINE_NAME.replace("-", "_"), "endpoint_name": "prediction-" + PIPELINE_NAME.replace("-", "_"), "traffic_split": {"0": 100}, "machine_type": "n1-standard-4", "min_replica_count": 1, "max_replica_count": 1, }, }
deep-diver/mlops-hf-tf-vision-models
advanced_part2/pipeline/kubeflow_pipeline.py
from typing import Any, Dict, List, Optional, Text import tensorflow_model_analysis as tfma from tfx import v1 as tfx from ml_metadata.proto import metadata_store_pb2 from tfx.proto import example_gen_pb2 from tfx.components import ImportExampleGen from tfx.components import StatisticsGen from tfx.components import ExampleValidator from tfx.components import Transform from tfx.components import Evaluator from tfx.extensions.google_cloud_ai_platform.trainer.component import ( Trainer as VertexTrainer, ) from tfx.extensions.google_cloud_ai_platform.pusher.component import ( Pusher as VertexPusher, ) from tfx.extensions.google_cloud_ai_platform.tuner.component import Tuner as VertexTuner from tfx.orchestration import pipeline from tfx.proto import example_gen_pb2 from tfx.proto import tuner_pb2 from tfx.types import Channel from tfx.types.standard_artifacts import Model from tfx.types.standard_artifacts import ModelBlessing from tfx.dsl.components.common import resolver from tfx.dsl.experimental.latest_blessed_model_resolver import ( LatestBlessedModelResolver, ) def create_pipeline( pipeline_name: Text, pipeline_root: Text, data_path: Text, schema_path: Text, modules: Dict[Text, Text], eval_configs: tfma.EvalConfig, metadata_connection_config: Optional[metadata_store_pb2.ConnectionConfig] = None, ai_platform_training_args: Optional[Dict[Text, Text]] = None, ai_platform_tuner_args: Optional[Dict[Text, Text]] = None, tuner_args: tuner_pb2.TuneArgs = None, ai_platform_serving_args: Optional[Dict[Text, Any]] = None, example_gen_beam_args: Optional[List] = None, transform_beam_args: Optional[List] = None, ) -> tfx.dsl.Pipeline: components = [] input_config = example_gen_pb2.Input( splits=[ example_gen_pb2.Input.Split(name="train", pattern="train-*.tfrec"), example_gen_pb2.Input.Split(name="eval", pattern="val-*.tfrec"), ] ) example_gen = ImportExampleGen(input_base=data_path, input_config=input_config) if example_gen_beam_args is not None: example_gen.with_beam_pipeline_args(example_gen_beam_args) components.append(example_gen) statistics_gen = StatisticsGen(examples=example_gen.outputs["examples"]) components.append(statistics_gen) schema_gen = tfx.components.ImportSchemaGen(schema_file=schema_path) components.append(schema_gen) example_validator = ExampleValidator( statistics=statistics_gen.outputs["statistics"], schema=schema_gen.outputs["schema"], ) components.append(example_validator) transform_args = { "examples": example_gen.outputs["examples"], "schema": schema_gen.outputs["schema"], "preprocessing_fn": modules["preprocessing_fn"], } transform = Transform(**transform_args) if transform_beam_args is not None: transform.with_beam_pipeline_args(transform_beam_args) components.append(transform) tuner = VertexTuner( tuner_fn=modules["tuner_fn"], examples=transform.outputs["transformed_examples"], transform_graph=transform.outputs["transform_graph"], tune_args=tuner_args, custom_config=ai_platform_tuner_args, ) components.append(tuner) trainer_args = { "run_fn": modules["training_fn"], "transformed_examples": transform.outputs["transformed_examples"], "transform_graph": transform.outputs["transform_graph"], "schema": schema_gen.outputs["schema"], "hyperparameters": tuner.outputs["best_hyperparameters"], "custom_config": ai_platform_training_args, } trainer = VertexTrainer(**trainer_args) components.append(trainer) model_resolver = resolver.Resolver( strategy_class=LatestBlessedModelResolver, model=Channel(type=Model), model_blessing=Channel(type=ModelBlessing), ).with_id("latest_blessed_model_resolver") components.append(model_resolver) evaluator = Evaluator( examples=example_gen.outputs["examples"], model=trainer.outputs["model"], baseline_model=model_resolver.outputs["model"], eval_config=eval_configs, ) components.append(evaluator) pusher_args = { "model": trainer.outputs["model"], "model_blessing": evaluator.outputs["blessing"], "custom_config": ai_platform_serving_args, } pusher = VertexPusher(**pusher_args) # pylint: disable=unused-variable components.append(pusher) return pipeline.Pipeline( pipeline_name=pipeline_name, pipeline_root=pipeline_root, components=components, enable_cache=True, metadata_connection_config=metadata_connection_config, )
deep-diver/mlops-hf-tf-vision-models
advanced_part2/pipeline/local_pipeline.py
from typing import Dict, Optional, Text import tensorflow_model_analysis as tfma from tfx import v1 as tfx from ml_metadata.proto import metadata_store_pb2 from tfx.proto import example_gen_pb2 from tfx.components import ImportExampleGen from tfx.components import StatisticsGen from tfx.components import ExampleValidator from tfx.components import Transform from tfx.components import Tuner from tfx.components import Trainer from tfx.components import Evaluator from tfx.components import Pusher from tfx.orchestration import pipeline from tfx.proto import example_gen_pb2 from tfx.types import Channel from tfx.types.standard_artifacts import Model from tfx.types.standard_artifacts import ModelBlessing from tfx.dsl.components.common import resolver from tfx.dsl.experimental.latest_blessed_model_resolver import ( LatestBlessedModelResolver, ) def create_pipeline( pipeline_name: Text, pipeline_root: Text, data_path: Text, schema_path: Text, modules: Dict[Text, Text], hyperparameters: Dict[Text, Text], eval_configs: tfma.EvalConfig, serving_model_dir: Text, metadata_connection_config: Optional[metadata_store_pb2.ConnectionConfig] = None, ) -> tfx.dsl.Pipeline: components = [] input_config = example_gen_pb2.Input( splits=[ example_gen_pb2.Input.Split(name="train", pattern="train-00-*.tfrec"), example_gen_pb2.Input.Split(name="eval", pattern="val-00-*.tfrec"), ] ) example_gen = ImportExampleGen(input_base=data_path, input_config=input_config) components.append(example_gen) statistics_gen = StatisticsGen(examples=example_gen.outputs["examples"]) components.append(statistics_gen) schema_gen = tfx.components.ImportSchemaGen(schema_file=schema_path) components.append(schema_gen) example_validator = ExampleValidator( statistics=statistics_gen.outputs["statistics"], schema=schema_gen.outputs["schema"], ) components.append(example_validator) transform_args = { "examples": example_gen.outputs["examples"], "schema": schema_gen.outputs["schema"], "preprocessing_fn": modules["preprocessing_fn"], } transform = Transform(**transform_args) components.append(transform) tuner = Tuner( tuner_fn=modules["tuner_fn"], examples=transform.outputs["transformed_examples"], schema=schema_gen.outputs["schema"], transform_graph=transform.outputs["transform_graph"], custom_config={"hyperparameters": hyperparameters}, ) components.append(tuner) trainer_args = { "run_fn": modules["training_fn"], "transformed_examples": transform.outputs["transformed_examples"], "transform_graph": transform.outputs["transform_graph"], "schema": schema_gen.outputs["schema"], "hyperparameters": tuner.outputs["best_hyperparameters"], "custom_config": {"is_local": True}, } trainer = Trainer(**trainer_args) components.append(trainer) model_resolver = resolver.Resolver( strategy_class=LatestBlessedModelResolver, model=Channel(type=Model), model_blessing=Channel(type=ModelBlessing), ).with_id("latest_blessed_model_resolver") components.append(model_resolver) evaluator = Evaluator( examples=example_gen.outputs["examples"], model=trainer.outputs["model"], baseline_model=model_resolver.outputs["model"], eval_config=eval_configs, ) components.append(evaluator) pusher_args = { "model": trainer.outputs["model"], "model_blessing": evaluator.outputs["blessing"], "push_destination": tfx.proto.PushDestination( filesystem=tfx.proto.PushDestination.Filesystem( base_directory=serving_model_dir ) ), } pusher = Pusher(**pusher_args) # pylint: disable=unused-variable components.append(pusher) return pipeline.Pipeline( pipeline_name=pipeline_name, pipeline_root=pipeline_root, components=components, enable_cache=False, metadata_connection_config=metadata_connection_config, )
deep-diver/mlops-hf-tf-vision-models
basic/modules/ViT.py
import tensorflow as tf from transformers import TFViTForImageClassification from .common import LABELS from .common import PRETRAIN_CHECKPOINT from .utils import INFO def build_model(): id2label = {str(i): c for i, c in enumerate(LABELS)} label2id = {c: str(i) for i, c in enumerate(LABELS)} model = TFViTForImageClassification.from_pretrained( PRETRAIN_CHECKPOINT, num_labels=len(LABELS), label2id=label2id, id2label=id2label, ) model.layers[0].trainable = False loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model.compile(optimizer="adam", loss=loss, metrics=["accuracy"]) INFO(model.summary()) return model
deep-diver/mlops-hf-tf-vision-models
basic/modules/common.py
IMAGE_TFREC_KEY = "image" IMAGE_SHAPE_TFREC_KEY = "image_shape" LABEL_TFREC_KEY = "label" IMAGE_MODEL_KEY = "pixel_values" LABEL_MODEL_KEY = "labels" CONCRETE_INPUT = "pixel_values" PRETRAIN_CHECKPOINT = "google/vit-base-patch16-224-in21k" LABELS = ["angular_leaf_spot", "bean_rust", "healthy"]
deep-diver/mlops-hf-tf-vision-models
basic/modules/hyperparams.py
EPOCHS = 1 BATCH_SIZE = 32 TRAIN_BATCH_SIZE = 32 EVAL_BATCH_SIZE = 32 TRAIN_LENGTH = 1034 EVAL_LENGTH = 128
deep-diver/mlops-hf-tf-vision-models
basic/modules/signatures.py
import tensorflow as tf from transformers import ViTFeatureExtractor from .common import PRETRAIN_CHECKPOINT from .common import CONCRETE_INPUT feature_extractor = ViTFeatureExtractor.from_pretrained(PRETRAIN_CHECKPOINT) def _normalize_img( img, mean=feature_extractor.image_mean, std=feature_extractor.image_std ): img = img / 255 mean = tf.constant(mean) std = tf.constant(std) return (img - mean) / std def _preprocess_serving(string_input): decoded_input = tf.io.decode_base64(string_input) decoded = tf.io.decode_jpeg(decoded_input, channels=3) resized = tf.image.resize(decoded, size=(224, 224)) normalized = _normalize_img(resized) normalized = tf.transpose( normalized, (2, 0, 1) ) # Since HF models are channel-first. return normalized @tf.function(input_signature=[tf.TensorSpec([None], tf.string)]) def _preprocess_fn(string_input): decoded_images = tf.map_fn( _preprocess_serving, string_input, dtype=tf.float32, back_prop=False ) return {CONCRETE_INPUT: decoded_images} def model_exporter(model: tf.keras.Model): m_call = tf.function(model.call).get_concrete_function( tf.TensorSpec(shape=[None, 3, 224, 224], dtype=tf.float32, name=CONCRETE_INPUT) ) @tf.function(input_signature=[tf.TensorSpec([None], tf.string)]) def serving_fn(string_input): labels = tf.constant(list(model.config.id2label.values()), dtype=tf.string) images = _preprocess_fn(string_input) predictions = m_call(**images) indices = tf.argmax(predictions.logits, axis=1) pred_source = tf.gather(params=labels, indices=indices) probs = tf.nn.softmax(predictions.logits, axis=1) pred_confidence = tf.reduce_max(probs, axis=1) return {"label": pred_source, "confidence": pred_confidence} return serving_fn
deep-diver/mlops-hf-tf-vision-models
basic/modules/train.py
from tfx.components.trainer.fn_args_utils import FnArgs from .train_data import input_fn from .ViT import build_model from .signatures import model_exporter from .hyperparams import TRAIN_BATCH_SIZE, EVAL_BATCH_SIZE from .hyperparams import TRAIN_LENGTH, EVAL_LENGTH from .hyperparams import EPOCHS def run_fn(fn_args: FnArgs): train_dataset = input_fn( fn_args.train_files, is_train=True, batch_size=TRAIN_BATCH_SIZE, ) eval_dataset = input_fn( fn_args.eval_files, is_train=False, batch_size=EVAL_BATCH_SIZE, ) model = build_model() model.fit( train_dataset, steps_per_epoch=TRAIN_LENGTH // TRAIN_BATCH_SIZE, validation_data=eval_dataset, validation_steps=EVAL_LENGTH // TRAIN_BATCH_SIZE, epochs=EPOCHS, ) model.save( fn_args.serving_model_dir, save_format="tf", signatures=model_exporter(model) )
deep-diver/mlops-hf-tf-vision-models
basic/modules/train_data.py
from typing import List import tensorflow as tf from .utils import INFO from .common import IMAGE_TFREC_KEY, IMAGE_SHAPE_TFREC_KEY, LABEL_TFREC_KEY from .common import IMAGE_MODEL_KEY, LABEL_MODEL_KEY from .hyperparams import BATCH_SIZE def _parse_tfr(proto): feature_description = { IMAGE_TFREC_KEY: tf.io.VarLenFeature(tf.float32), IMAGE_SHAPE_TFREC_KEY: tf.io.VarLenFeature(tf.int64), LABEL_TFREC_KEY: tf.io.VarLenFeature(tf.int64), } rec = tf.io.parse_single_example(proto, feature_description) image_shape = tf.sparse.to_dense(rec[IMAGE_SHAPE_TFREC_KEY]) image = tf.reshape(tf.sparse.to_dense(rec[IMAGE_TFREC_KEY]), image_shape) label = tf.sparse.to_dense(rec[LABEL_TFREC_KEY]) return {IMAGE_MODEL_KEY: image, LABEL_MODEL_KEY: label} def _preprocess(example_batch): images = example_batch[IMAGE_MODEL_KEY] images = tf.transpose( images, perm=[0, 1, 2, 3] ) # (batch_size, height, width, num_channels) images = tf.image.resize(images, (224, 224)) images = tf.transpose(images, perm=[0, 3, 1, 2]) labels = example_batch[LABEL_MODEL_KEY] labels = tf.transpose(labels, perm=[0, 1]) # So, that TF can evaluation the shapes. return {IMAGE_MODEL_KEY: images, LABEL_MODEL_KEY: labels} def input_fn( file_pattern: List[str], batch_size: int = BATCH_SIZE, is_train: bool = False, ) -> tf.data.Dataset: INFO(f"Reading data from: {file_pattern}") dataset = tf.data.TFRecordDataset( tf.io.gfile.glob(file_pattern[0] + ".gz"), num_parallel_reads=tf.data.AUTOTUNE, compression_type="GZIP", ).map(_parse_tfr, num_parallel_calls=tf.data.AUTOTUNE) if is_train: dataset = dataset.shuffle(batch_size * 2) dataset = dataset.batch(batch_size) dataset = dataset.prefetch(tf.data.AUTOTUNE) dataset = dataset.map(_preprocess) return dataset
deep-diver/mlops-hf-tf-vision-models
basic/modules/utils.py
import absl def INFO(text: str): absl.logging.info(text)
deep-diver/mlops-hf-tf-vision-models
basic/pipeline/configs.py
import os # pylint: disable=unused-import import tfx.extensions.google_cloud_ai_platform.constants as vertex_const import tfx.extensions.google_cloud_ai_platform.trainer.executor as vertex_training_const PIPELINE_NAME = "vit-e2e-pipeline-basic" try: import google.auth # pylint: disable=g-import-not-at-top # pytype: disable=import-error try: _, GOOGLE_CLOUD_PROJECT = google.auth.default() except google.auth.exceptions.DefaultCredentialsError: GOOGLE_CLOUD_PROJECT = "gcp-ml-172005" except ImportError: GOOGLE_CLOUD_PROJECT = "gcp-ml-172005" GOOGLE_CLOUD_REGION = "us-central1" GCS_BUCKET_NAME = GOOGLE_CLOUD_PROJECT + "-complete-mlops" PIPELINE_IMAGE = f"gcr.io/{GOOGLE_CLOUD_PROJECT}/{PIPELINE_NAME}" OUTPUT_DIR = os.path.join("gs://", GCS_BUCKET_NAME) PIPELINE_ROOT = os.path.join(OUTPUT_DIR, "tfx_pipeline_output", PIPELINE_NAME) DATA_PATH = "gs://beans-lowres/tfrecords/" TRAINING_FN = "modules.train.run_fn" EXAMPLE_GEN_BEAM_ARGS = None GCP_AI_PLATFORM_TRAINING_ARGS = { vertex_const.ENABLE_VERTEX_KEY: True, vertex_const.VERTEX_REGION_KEY: GOOGLE_CLOUD_REGION, vertex_training_const.TRAINING_ARGS_KEY: { "project": GOOGLE_CLOUD_PROJECT, "worker_pool_specs": [ { "machine_spec": { "machine_type": "n1-standard-4", "accelerator_type": "NVIDIA_TESLA_K80", "accelerator_count": 1, }, "replica_count": 1, "container_spec": { "image_uri": PIPELINE_IMAGE, }, } ], }, "use_gpu": True, } fullres_data = os.environ.get("FULL_RES_DATA", "false") if fullres_data.lower() == "true": DATA_PATH = "gs://beans-fullres/tfrecords/" DATAFLOW_SERVICE_ACCOUNT = "csp-gde-dataflow@gcp-ml-172005.iam.gserviceaccount.com" DATAFLOW_MACHINE_TYPE = "n1-standard-4" DATAFLOW_MAX_WORKERS = 4 DATAFLOW_DISK_SIZE_GB = 100 EXAMPLE_GEN_BEAM_ARGS = [ "--runner=DataflowRunner", "--project=" + GOOGLE_CLOUD_PROJECT, "--region=" + GOOGLE_CLOUD_REGION, "--service_account_email=" + DATAFLOW_SERVICE_ACCOUNT, "--machine_type=" + DATAFLOW_MACHINE_TYPE, "--experiments=use_runner_v2", "--max_num_workers=" + str(DATAFLOW_MAX_WORKERS), "--disk_size_gb=" + str(DATAFLOW_DISK_SIZE_GB), ] GCP_AI_PLATFORM_TRAINING_ARGS[vertex_training_const.TRAINING_ARGS_KEY][ "worker_pool_specs" ] = [ { "machine_spec": { "machine_type": "n1-standard-8", "accelerator_type": "NVIDIA_TESLA_V100", "accelerator_count": 1, }, "replica_count": 1, "container_spec": { "image_uri": PIPELINE_IMAGE, }, } ] GCP_AI_PLATFORM_SERVING_ARGS = { vertex_const.ENABLE_VERTEX_KEY: True, vertex_const.VERTEX_REGION_KEY: GOOGLE_CLOUD_REGION, vertex_const.VERTEX_CONTAINER_IMAGE_URI_KEY: "us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-8:latest", vertex_const.SERVING_ARGS_KEY: { "project_id": GOOGLE_CLOUD_PROJECT, "deployed_model_display_name": PIPELINE_NAME.replace("-", "_"), "endpoint_name": "prediction-" + PIPELINE_NAME.replace("-", "_"), "traffic_split": {"0": 100}, "machine_type": "n1-standard-4", "min_replica_count": 1, "max_replica_count": 1, }, }
deep-diver/mlops-hf-tf-vision-models
basic/pipeline/kubeflow_pipeline.py
from typing import Any, Dict, List, Optional, Text from tfx import v1 as tfx from ml_metadata.proto import metadata_store_pb2 from tfx.proto import example_gen_pb2 from tfx.components import ImportExampleGen from tfx.extensions.google_cloud_ai_platform.trainer.component import ( Trainer as VertexTrainer, ) from tfx.extensions.google_cloud_ai_platform.pusher.component import ( Pusher as VertexPusher, ) from tfx.orchestration import pipeline from tfx.proto import example_gen_pb2 def create_pipeline( pipeline_name: Text, pipeline_root: Text, data_path: Text, modules: Dict[Text, Text], metadata_connection_config: Optional[metadata_store_pb2.ConnectionConfig] = None, ai_platform_training_args: Optional[Dict[Text, Text]] = None, ai_platform_serving_args: Optional[Dict[Text, Any]] = None, example_gen_beam_args: Optional[List] = None, ) -> tfx.dsl.Pipeline: components = [] input_config = example_gen_pb2.Input( splits=[ example_gen_pb2.Input.Split(name="train", pattern="train-*.tfrec"), example_gen_pb2.Input.Split(name="eval", pattern="val-*.tfrec"), ] ) example_gen = ImportExampleGen(input_base=data_path, input_config=input_config) if example_gen_beam_args is not None: example_gen.with_beam_pipeline_args(example_gen_beam_args) components.append(example_gen) trainer_args = { "run_fn": modules["training_fn"], "examples": example_gen.outputs["examples"], "custom_config": ai_platform_training_args, } trainer = VertexTrainer(**trainer_args) components.append(trainer) pusher_args = { "model": trainer.outputs["model"], "custom_config": ai_platform_serving_args, } pusher = VertexPusher(**pusher_args) # pylint: disable=unused-variable components.append(pusher) return pipeline.Pipeline( pipeline_name=pipeline_name, pipeline_root=pipeline_root, components=components, enable_cache=True, metadata_connection_config=metadata_connection_config, )
deep-diver/mlops-hf-tf-vision-models
basic/pipeline/local_pipeline.py
from typing import Dict, Optional, Text from tfx import v1 as tfx from ml_metadata.proto import metadata_store_pb2 from tfx.proto import example_gen_pb2 from tfx.components import ImportExampleGen from tfx.components import Trainer from tfx.components import Pusher from tfx.orchestration import pipeline from tfx.proto import example_gen_pb2 def create_pipeline( pipeline_name: Text, pipeline_root: Text, data_path: Text, modules: Dict[Text, Text], serving_model_dir: Text, metadata_connection_config: Optional[metadata_store_pb2.ConnectionConfig] = None, ) -> tfx.dsl.Pipeline: components = [] input_config = example_gen_pb2.Input( splits=[ example_gen_pb2.Input.Split(name="train", pattern="train-00-*.tfrec"), example_gen_pb2.Input.Split(name="eval", pattern="val-00-*.tfrec"), ] ) example_gen = ImportExampleGen(input_base=data_path, input_config=input_config) components.append(example_gen) trainer = Trainer( run_fn=modules["training_fn"], examples=example_gen.outputs["examples"], ) components.append(trainer) pusher_args = { "model": trainer.outputs["model"], "push_destination": tfx.proto.PushDestination( filesystem=tfx.proto.PushDestination.Filesystem( base_directory=serving_model_dir ) ), } pusher = Pusher(**pusher_args) # pylint: disable=unused-variable components.append(pusher) return pipeline.Pipeline( pipeline_name=pipeline_name, pipeline_root=pipeline_root, components=components, enable_cache=False, metadata_connection_config=metadata_connection_config, )
deep-diver/mlops-hf-tf-vision-models
hf_integration/modules/ViT.py
import tensorflow as tf import keras_tuner from transformers import TFViTForImageClassification from .common import LABELS from .common import PRETRAIN_CHECKPOINT from .utils import INFO def build_model(hparams: keras_tuner.HyperParameters): id2label = {str(i): c for i, c in enumerate(LABELS)} label2id = {c: str(i) for i, c in enumerate(LABELS)} model = TFViTForImageClassification.from_pretrained( PRETRAIN_CHECKPOINT, num_labels=len(LABELS), label2id=label2id, id2label=id2label, ) model.layers[0].trainable = False loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) optimizer = tf.keras.optimizers.Adam(learning_rate=hparams.get("learning_rate")) model.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"]) INFO(model.summary()) return model

Dataset Card for "mini-code-corpus"

More Information needed

Downloads last month
53
Edit dataset card