{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### TFX Components\n",
    "\n",
    "This notebook shows how to create pipeline that uses TFX components:\n",
    "\n",
    "* CsvExampleGen\n",
    "* StatisticsGen\n",
    "* SchemaGen\n",
    "* ExampleValidator\n",
    "* Transform\n",
    "* Trainer\n",
    "* Evaluator"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Put your KFP cluster endpoint URL here if working from GCP notebooks (or local notebooks). ('https://xxxxx.notebooks.googleusercontent.com/')\n",
    "kfp_endpoint='https://XXXXX.notebooks.googleusercontent.com/'\n",
    "\n",
    "# Replace with your GCS bucket, project ID and GCP region\n",
    "root_output_uri = '<your gcs bucket>'\n",
    "project_id = '<your project id>'\n",
    "gcp_region = '<your gcp region>'\n",
    "\n",
    "beam_pipeline_args = [\n",
    "    '--runner=DataflowRunner',\n",
    "    '--experiments=shuffle_mode=auto',\n",
    "    '--project=' + project_id,\n",
    "    '--temp_location=' + root_output_uri + '/tmp',\n",
    "    '--region=' + gcp_region,\n",
    "    '--disk_size_gb=50',\n",
    "]\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "input_data_uri = 'gs://ml-pipeline-playground/tensorflow-tfx-repo/tfx/components/testdata/external/csv'\n",
    "\n",
    "#Only S3/GCS is supported for now.\n",
    "module_file = 'gs://ml-pipeline-playground/tensorflow-tfx-repo/v0.21.4/tfx/examples/chicago_taxi_pipeline/taxi_utils.py'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import kfp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\r\n",
    "from kfp.components import load_component_from_url\r\n",
    "\r\n",
    "CsvExampleGen_op    = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/9b0d47a226c61f96e1ebe7a8ba427df38f8734e1/components/deprecated/tfx/ExampleGen/CsvExampleGen/with_URI_IO/component.yaml')\r\n",
    "StatisticsGen_op    = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/9b0d47a226c61f96e1ebe7a8ba427df38f8734e1/components/deprecated/tfx/StatisticsGen/with_URI_IO/component.yaml')\r\n",
    "SchemaGen_op        = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/9b0d47a226c61f96e1ebe7a8ba427df38f8734e1/components/deprecated/tfx/SchemaGen/with_URI_IO/component.yaml')\r\n",
    "ExampleValidator_op = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/9b0d47a226c61f96e1ebe7a8ba427df38f8734e1/components/deprecated/tfx/ExampleValidator/with_URI_IO/component.yaml')\r\n",
    "Transform_op        = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/9b0d47a226c61f96e1ebe7a8ba427df38f8734e1/components/deprecated/tfx/Transform/with_URI_IO/component.yaml')\r\n",
    "Trainer_op          = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/9b0d47a226c61f96e1ebe7a8ba427df38f8734e1/components/deprecated/tfx/Trainer/with_URI_IO/component.yaml')\r\n",
    "Evaluator_op        = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/9b0d47a226c61f96e1ebe7a8ba427df38f8734e1/components/deprecated/tfx/Evaluator/with_URI_IO/component.yaml')\r\n",
    "\r\n",
    "def tfx_pipeline(\r\n",
    "    input_data_uri,\r\n",
    "    root_output_uri,\r\n",
    "):\r\n",
    "    generated_output_uri = str(root_output_uri) + kfp.dsl.EXECUTION_ID_PLACEHOLDER\r\n",
    "\r\n",
    "    examples_task = CsvExampleGen_op(\r\n",
    "        input_base=input_data_uri,\r\n",
    "        input_config=json.dumps({\r\n",
    "            \"splits\": [\r\n",
    "                {'name': 'data', 'pattern': '*.csv'},\r\n",
    "            ]\r\n",
    "        }),\r\n",
    "        output_config=json.dumps({\r\n",
    "            \"splitConfig\": {\r\n",
    "                \"splits\": [\r\n",
    "                    {'name': 'train', 'hash_buckets': 2},\r\n",
    "                    {'name': 'eval', 'hash_buckets': 1},\r\n",
    "                ]\r\n",
    "            }\r\n",
    "        }),\r\n",
    "        beam_pipeline_args=beam_pipeline_args,\r\n",
    "\r\n",
    "        output_examples_uri=generated_output_uri,\r\n",
    "    )\r\n",
    "    \r\n",
    "    statistics_task = StatisticsGen_op(\r\n",
    "        examples_uri=examples_task.outputs['examples_uri'],\r\n",
    "        beam_pipeline_args=beam_pipeline_args,\r\n",
    "\r\n",
    "        output_statistics_uri=generated_output_uri,\r\n",
    "    )\r\n",
    "    \r\n",
    "    schema_task = SchemaGen_op(\r\n",
    "        statistics_uri=statistics_task.outputs['statistics_uri'],\r\n",
    "        beam_pipeline_args=beam_pipeline_args,\r\n",
    "\r\n",
    "        output_schema_uri=generated_output_uri,\r\n",
    "    )\r\n",
    "\r\n",
    "    # Performs anomaly detection based on statistics and data schema.\r\n",
    "    validator_task = ExampleValidator_op(\r\n",
    "        statistics_uri=statistics_task.outputs['statistics_uri'],\r\n",
    "        schema_uri=schema_task.outputs['schema_uri'],\r\n",
    "        beam_pipeline_args=beam_pipeline_args,\r\n",
    "\r\n",
    "        output_anomalies_uri=generated_output_uri,\r\n",
    "    )\r\n",
    "\r\n",
    "    # Performs transformations and feature engineering in training and serving.\r\n",
    "    transform_task = Transform_op(\r\n",
    "        examples_uri=examples_task.outputs['examples_uri'],\r\n",
    "        schema_uri=schema_task.outputs['schema_uri'],\r\n",
    "        module_file=module_file,\r\n",
    "        beam_pipeline_args=beam_pipeline_args,\r\n",
    "\r\n",
    "        output_transform_graph_uri=generated_output_uri + '/transform_graph',\r\n",
    "        output_transformed_examples_uri=generated_output_uri + '/transformed_examples',\r\n",
    "        output_updated_analyzer_cache_uri=generated_output_uri + '/transformed_examples',\r\n",
    "    )\r\n",
    "\r\n",
    "    trainer_task = Trainer_op(\r\n",
    "        module_file=module_file,\r\n",
    "        examples_uri=transform_task.outputs['transformed_examples_uri'],\r\n",
    "        schema_uri=schema_task.outputs['schema_uri'],\r\n",
    "        transform_graph_uri=transform_task.outputs['transform_graph_uri'],\r\n",
    "        train_args=json.dumps({'num_steps': 10000}),\r\n",
    "        eval_args=json.dumps({'num_steps': 5000}),\r\n",
    "        beam_pipeline_args=beam_pipeline_args,\r\n",
    "\r\n",
    "        output_model_uri=generated_output_uri + '/model',\r\n",
    "        output_model_run_uri=generated_output_uri + '/model_run',\r\n",
    "    )\r\n",
    "\r\n",
    "    # Uses TFMA to compute a evaluation statistics over features of a model and\r\n",
    "    # perform quality validation of a candidate model (compared to a baseline).\r\n",
    "    eval_config = {\r\n",
    "        'model_specs': [{'signature_name': 'eval'}],\r\n",
    "        'slicing_specs': [\r\n",
    "            {},\r\n",
    "            {'feature_keys': ['trip_start_hour']},\r\n",
    "        ],\r\n",
    "        'metrics_specs': [{\r\n",
    "            'thresholds': {\r\n",
    "                'accuracy': {\r\n",
    "                    'value_threshold': {\r\n",
    "                        'lower_bound': 0.6,\r\n",
    "                    },\r\n",
    "                    'change_threshold': {\r\n",
    "                        'direction': 2,  # tfma.MetricDirection.HIGHER_IS_BETTER\r\n",
    "                        'absolute': -1e-10,\r\n",
    "                    }\r\n",
    "                }\r\n",
    "            }\r\n",
    "        }],\r\n",
    "    }\r\n",
    "    model_analyzer = Evaluator_op(\r\n",
    "        examples_uri=examples_task.outputs['examples_uri'],\r\n",
    "        model_uri=trainer_task.outputs['model_uri'],\r\n",
    "        eval_config=json.dumps(eval_config),\r\n",
    "        beam_pipeline_args=beam_pipeline_args,\r\n",
    "\r\n",
    "        output_evaluation_uri=generated_output_uri + '/evaluation',\r\n",
    "        output_blessing_uri=generated_output_uri + '/blessing',\r\n",
    "    )\r\n",
    "\r\n",
    "\r\n",
    "kfp.Client(host=kfp_endpoint).create_run_from_pipeline_func(\r\n",
    "    tfx_pipeline,\r\n",
    "    arguments=dict(\r\n",
    "        input_data_uri=input_data_uri,\r\n",
    "        root_output_uri=root_output_uri,\r\n",
    "    ),\r\n",
    ")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}