<html><body>
<style>

body, h1, h2, h3, div, span, p, pre, a {
  margin: 0;
  padding: 0;
  border: 0;
  font-weight: inherit;
  font-style: inherit;
  font-size: 100%;
  font-family: inherit;
  vertical-align: baseline;
}

body {
  font-size: 13px;
  padding: 1em;
}

h1 {
  font-size: 26px;
  margin-bottom: 1em;
}

h2 {
  font-size: 24px;
  margin-bottom: 1em;
}

h3 {
  font-size: 20px;
  margin-bottom: 1em;
  margin-top: 1em;
}

pre, code {
  line-height: 1.5;
  font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace;
}

pre {
  margin-top: 0.5em;
}

h1, h2, h3, p {
  font-family: Arial, sans serif;
}

h1, h2, h3 {
  border-bottom: solid #CCC 1px;
}

.toc_element {
  margin-top: 0.5em;
}

.firstline {
  margin-left: 2 em;
}

.method  {
  margin-top: 1em;
  border: solid 1px #CCC;
  padding: 1em;
  background: #EEE;
}

.details {
  font-weight: bold;
  font-size: 14px;
}

</style>

<h1><a href="dataflow_v1b3.html">Google Dataflow API</a> . <a href="dataflow_v1b3.projects.html">projects</a> . <a href="dataflow_v1b3.projects.jobs.html">jobs</a></h1>
<h2>Instance Methods</h2>
<p class="toc_element">
  <code><a href="dataflow_v1b3.projects.jobs.messages.html">messages()</a></code>
</p>
<p class="firstline">Returns the messages Resource.</p>

<p class="toc_element">
  <code><a href="dataflow_v1b3.projects.jobs.workItems.html">workItems()</a></code>
</p>
<p class="firstline">Returns the workItems Resource.</p>

<p class="toc_element">
  <code><a href="#create">create(projectId, body, x__xgafv=None, replaceJobId=None, view=None)</a></code></p>
<p class="firstline">Creates a dataflow job.</p>
<p class="toc_element">
  <code><a href="#get">get(projectId, jobId, x__xgafv=None, view=None)</a></code></p>
<p class="firstline">Gets the state of the specified dataflow job.</p>
<p class="toc_element">
  <code><a href="#getMetrics">getMetrics(projectId, jobId, startTime=None, x__xgafv=None)</a></code></p>
<p class="firstline">Request the job status.</p>
<p class="toc_element">
  <code><a href="#list">list(projectId, pageSize=None, pageToken=None, x__xgafv=None, view=None)</a></code></p>
<p class="firstline">List the jobs of a project</p>
<p class="toc_element">
  <code><a href="#list_next">list_next(previous_request, previous_response)</a></code></p>
<p class="firstline">Retrieves the next page of results.</p>
<p class="toc_element">
  <code><a href="#update">update(projectId, jobId, body, x__xgafv=None)</a></code></p>
<p class="firstline">Updates the state of an existing dataflow job.</p>
<h3>Method Details</h3>
<div class="method">
    <code class="details" id="create">create(projectId, body, x__xgafv=None, replaceJobId=None, view=None)</code>
  <pre>Creates a dataflow job.

Args:
  projectId: string, The project which owns the job. (required)
  body: object, The request body. (required)
    The object takes the form of:

{ # Defines a job to be run by the Dataflow service.
    "clientRequestId": "A String", # Client's unique identifier of the job, re-used by SDK across retried attempts. If this field is set, the service will ensure its uniqueness. That is, the request to create a job will fail if the service has knowledge of a previously submitted job with the same client's id and job name. The caller may, for example, use this field to ensure idempotence of job creation across retried attempts to create a job. By default, the field is empty and, in that case, the service ignores it.
    "requestedState": "A String", # The job's requested state. UpdateJob may be used to switch between the JOB_STATE_STOPPED and JOB_STATE_RUNNING states, by setting requested_state. UpdateJob may also be used to directly set a job's requested state to JOB_STATE_CANCELLED or JOB_STATE_DONE, irrevocably terminating the job if it has not already reached a terminal state.
    "name": "A String", # The user-specified Dataflow job name. Only one Job with a given name may exist in a project at any given time. If a caller attempts to create a Job with the same name as an already-existing Job, the attempt will return the existing Job. The name must match the regular expression [a-z]([-a-z0-9]{0,38}[a-z0-9])?
    "replacedByJobId": "A String", # If another job is an update of this job (and thus, this job is in JOB_STATE_UPDATED), this field will contain the ID of that job.
    "projectId": "A String", # The project which owns the job.
    "transformNameMapping": { # Map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job.
      "a_key": "A String",
    },
    "createTime": "A String", # Timestamp when job was initially created. Immutable, set by the Dataflow service.
    "environment": { # Describes the environment in which a Dataflow Job runs. # Environment for the job.
      "version": { # A structure describing which components and their versions of the service are required in order to run the job.
        "a_key": "", # Properties of the object.
      },
      "tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The system will append the suffix "/temp-{JOBNAME} to this resource prefix, where {JOBNAME} is the value of the job_name field. The resulting bucket and object prefix is used as the prefix of the resources used to store temporary data needed during the job execution. NOTE: This will override the value in taskrunner_settings. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
      "internalExperiments": { # Experimental settings.
        "a_key": "", # Properties of the object. Contains field @ype with type URL.
      },
      "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset}
      "experiments": [ # The list of experiments to enable.
        "A String",
      ],
      "sdkPipelineOptions": { # The Dataflow SDK pipeline options specified by the user. These options are passed through the service and are used to recreate the SDK pipeline options on the worker in a language agnostic and platform independent way.
        "a_key": "", # Properties of the object.
      },
      "userAgent": { # A description of the process that generated the request.
        "a_key": "", # Properties of the object.
      },
      "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com".
      "workerPools": [ # Worker pools. At least one "harness" worker pool must be specified in order for the job to have workers.
        { # Describes one particular pool of Dataflow workers to be instantiated by the Dataflow service in order to perform the computations required by a job. Note that a workflow job may use multiple pools, in order to match the various computational requirements of the various stages of the job.
          "diskSourceImage": "A String", # Fully qualified source image for disks.
          "taskrunnerSettings": { # Taskrunner configuration settings. # Settings passed through to Google Compute Engine workers when using the standard Dataflow task runner. Users should ignore this field.
            "workflowFileName": "A String", # Store the workflow in this file.
            "logUploadLocation": "A String", # Indicates where to put logs. If this is not specified, the logs will not be uploaded. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
            "commandlinesFileName": "A String", # Store preprocessing commands in this file.
            "parallelWorkerSettings": { # Provides data to pass through to the worker harness. # Settings to pass to the parallel worker harness.
              "reportingEnabled": True or False, # Send work progress updates to service.
              "shuffleServicePath": "A String", # The Shuffle service path relative to the root URL, for example, "shuffle/v1beta1".
              "workerId": "A String", # ID of the worker running this pipeline.
              "baseUrl": "A String", # The base URL for accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/"
              "servicePath": "A String", # The Dataflow service path relative to the root URL, for example, "dataflow/v1b3/projects".
              "tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
            },
            "vmId": "A String", # ID string of VM.
            "baseTaskDir": "A String", # Location on the worker for task-specific subdirectories.
            "continueOnException": True or False, # Do we continue taskrunner if an exception is hit?
            "baseUrl": "A String", # The base URL for the taskrunner to use when accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/"
            "taskUser": "A String", # The UNIX user ID on the worker VM to use for tasks launched by taskrunner; e.g. "root".
            "taskGroup": "A String", # The UNIX group ID on the worker VM to use for tasks launched by taskrunner; e.g. "wheel".
            "oauthScopes": [ # OAuth2 scopes to be requested by the taskrunner in order to access the dataflow API.
              "A String",
            ],
            "languageHint": "A String", # Suggested backend language.
            "logToSerialconsole": True or False, # Send taskrunner log into to Google Compute Engine VM serial console?
            "streamingWorkerMainClass": "A String", # Streaming worker main class name.
            "logDir": "A String", # Directory on the VM to store logs.
            "dataflowApiVersion": "A String", # API version of endpoint, e.g. "v1b3"
            "harnessCommand": "A String", # Command to launch the worker harness.
            "tempStoragePrefix": "A String", # The prefix of the resources the taskrunner should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
            "alsologtostderr": True or False, # Also send taskrunner log info to stderr?
          },
          "kind": "A String", # The kind of the worker pool; currently only 'harness' and 'shuffle' are supported.
          "machineType": "A String", # Machine type (e.g. "n1-standard-1"). If empty or unspecified, the service will attempt to choose a reasonable default.
          "network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
          "zone": "A String", # Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default.
          "onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google Compute Engine API.
          "diskType": "A String", # Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default.
          "teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool. Allowed values are: TEARDOWN_ALWAYS, TEARDOWN_ON_SUCCESS, and TEARDOWN_NEVER. TEARDOWN_ALWAYS means workers are always torn down regardless of whether the job succeeds. TEARDOWN_ON_SUCCESS means workers are torn down if the job succeeds. TEARDOWN_NEVER means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the TEARDOWN_ALWAYS policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default.
          "diskSizeGb": 42, # Size of root disk for VMs, in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
          "metadata": { # Metadata to set on the Google Compute Engine VMs.
            "a_key": "A String",
          },
          "poolArgs": { # Extra arguments for this worker pool.
            "a_key": "", # Properties of the object. Contains field @ype with type URL.
          },
          "numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default.
          "workerHarnessContainerImage": "A String", # Docker container image that executes Dataflow worker harness, residing in Google Container Registry. Required.
          "defaultPackageSet": "A String", # The default package set to install. This allows the service to select a default set of packages which are useful to worker harnesses written in a particular language.
          "packages": [ # Packages to be installed on workers.
            { # Packages that need to be installed in order for a worker to run the steps of the Dataflow job which will be assigned to its worker pool. This is the mechanism by which the SDK causes code to be loaded onto the workers. For example, the Dataflow Java SDK might use this to install jars containing the user's code and all of the various dependencies (libraries, data files, etc) required in order for that code to run.
              "name": "A String", # The name of the package.
              "location": "A String", # The resource to read the package from. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket} bucket.storage.googleapis.com/
            },
          ],
          "autoscalingSettings": { # Settings for WorkerPool autoscaling. # Settings for autoscaling of this WorkerPool.
            "maxNumWorkers": 42, # The maximum number of workers to cap scaling at.
            "algorithm": "A String", # The algorithm to use for autoscaling.
          },
          "dataDisks": [ # Data disks that are used by a VM in this workflow.
            { # Describes the data disk used by a workflow job.
              "mountPoint": "A String", # Directory in a VM where disk is mounted.
              "sizeGb": 42, # Size of disk in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
              "diskType": "A String", # Disk storage type, as defined by Google Compute Engine. This must be a disk type appropriate to the project and zone in which the workers will run. If unknown or unspecified, the service will attempt to choose a reasonable default. For example, the standard persistent disk type is a resource name typically ending in "pd-standard". If SSD persistent disks are available, the resource name typically ends with "pd-ssd". The actual valid values are defined the Google Compute Engine API, not by the Dataflow API; consult the Google Compute Engine documentation for more information about determining the set of available disk types for a particular project and zone. Google Compute Engine Disk types are local to a particular project in a particular zone, and so the resource name will typically look something like this: compute.googleapis.com/projects/
                  # /zones//diskTypes/pd-standard
            },
          ],
        },
      ],
    },
    "replaceJobId": "A String", # If this job is an update of an existing job, this field will be the ID of the job it replaced. When sending a CreateJobRequest, you can update a job by specifying it here. The job named here will be stopped, and its intermediate state transferred to this job.
    "steps": [ # The top-level steps that constitute the entire job.
      { # Defines a particular step within a Dataflow job. A job consists of multiple steps, each of which performs some specific operation as part of the overall job. Data is typically passed from one step to another as part of the job. Here's an example of a sequence of steps which together implement a Map-Reduce job: * Read a collection of data from some source, parsing the collection's elements. * Validate the elements. * Apply a user-defined function to map each element to some value and extract an element-specific key value. * Group elements with the same key into a single element with that key, transforming a multiply-keyed collection into a uniquely-keyed collection. * Write the elements out to some data sink. (Note that the Dataflow service may be used to run many different types of jobs, not just Map-Reduce).
        "kind": "A String", # The kind of step in the dataflow Job.
        "name": "A String", # Name identifying the step. This must be unique for each step with respect to all other steps in the dataflow Job.
        "properties": { # Named properties associated with the step. Each kind of predefined step has its own required set of properties.
          "a_key": "", # Properties of the object.
        },
      },
    ],
    "currentStateTime": "A String", # The timestamp associated with the current state.
    "tempFiles": [ # A set of files the system should be aware of that are used for temporary storage. These temporary files will be removed on job completion. No duplicates are allowed. No file patterns are supported. The supported files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
      "A String",
    ],
    "type": "A String", # The type of dataflow job.
    "id": "A String", # The unique ID of this job. This field is set by the Dataflow service when the Job is created, and is immutable for the life of the Job.
    "currentState": "A String", # The current state of the job. Jobs are created in the JOB_STATE_STOPPED state unless otherwise specified. A job in the JOB_STATE_RUNNING state may asynchronously enter a terminal state. Once a job has reached a terminal state, no further state updates may be made. This field may be mutated by the Dataflow service; callers cannot mutate it.
    "executionInfo": { # Additional information about how a Dataflow job will be executed which isn’t contained in the submitted job. # Information about how the Dataflow service will actually run the job.
      "stages": { # A mapping from each stage to the information about that stage.
        "a_key": { # Contains information about how a particular google.dataflow.v1beta3.Step will be executed.
          "stepName": [ # The steps associated with the execution stage. Note that stages may have several steps, and that a given step might be run by more than one stage.
            "A String",
          ],
        },
      },
    },
  }

  x__xgafv: string, V1 error format.
  replaceJobId: string, DEPRECATED. This field is now on the Job message.
  view: string, Level of information requested in response.

Returns:
  An object of the form:

    { # Defines a job to be run by the Dataflow service.
      "clientRequestId": "A String", # Client's unique identifier of the job, re-used by SDK across retried attempts. If this field is set, the service will ensure its uniqueness. That is, the request to create a job will fail if the service has knowledge of a previously submitted job with the same client's id and job name. The caller may, for example, use this field to ensure idempotence of job creation across retried attempts to create a job. By default, the field is empty and, in that case, the service ignores it.
      "requestedState": "A String", # The job's requested state. UpdateJob may be used to switch between the JOB_STATE_STOPPED and JOB_STATE_RUNNING states, by setting requested_state. UpdateJob may also be used to directly set a job's requested state to JOB_STATE_CANCELLED or JOB_STATE_DONE, irrevocably terminating the job if it has not already reached a terminal state.
      "name": "A String", # The user-specified Dataflow job name. Only one Job with a given name may exist in a project at any given time. If a caller attempts to create a Job with the same name as an already-existing Job, the attempt will return the existing Job. The name must match the regular expression [a-z]([-a-z0-9]{0,38}[a-z0-9])?
      "replacedByJobId": "A String", # If another job is an update of this job (and thus, this job is in JOB_STATE_UPDATED), this field will contain the ID of that job.
      "projectId": "A String", # The project which owns the job.
      "transformNameMapping": { # Map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job.
        "a_key": "A String",
      },
      "createTime": "A String", # Timestamp when job was initially created. Immutable, set by the Dataflow service.
      "environment": { # Describes the environment in which a Dataflow Job runs. # Environment for the job.
        "version": { # A structure describing which components and their versions of the service are required in order to run the job.
          "a_key": "", # Properties of the object.
        },
        "tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The system will append the suffix "/temp-{JOBNAME} to this resource prefix, where {JOBNAME} is the value of the job_name field. The resulting bucket and object prefix is used as the prefix of the resources used to store temporary data needed during the job execution. NOTE: This will override the value in taskrunner_settings. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
        "internalExperiments": { # Experimental settings.
          "a_key": "", # Properties of the object. Contains field @ype with type URL.
        },
        "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset}
        "experiments": [ # The list of experiments to enable.
          "A String",
        ],
        "sdkPipelineOptions": { # The Dataflow SDK pipeline options specified by the user. These options are passed through the service and are used to recreate the SDK pipeline options on the worker in a language agnostic and platform independent way.
          "a_key": "", # Properties of the object.
        },
        "userAgent": { # A description of the process that generated the request.
          "a_key": "", # Properties of the object.
        },
        "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com".
        "workerPools": [ # Worker pools. At least one "harness" worker pool must be specified in order for the job to have workers.
          { # Describes one particular pool of Dataflow workers to be instantiated by the Dataflow service in order to perform the computations required by a job. Note that a workflow job may use multiple pools, in order to match the various computational requirements of the various stages of the job.
            "diskSourceImage": "A String", # Fully qualified source image for disks.
            "taskrunnerSettings": { # Taskrunner configuration settings. # Settings passed through to Google Compute Engine workers when using the standard Dataflow task runner. Users should ignore this field.
              "workflowFileName": "A String", # Store the workflow in this file.
              "logUploadLocation": "A String", # Indicates where to put logs. If this is not specified, the logs will not be uploaded. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
              "commandlinesFileName": "A String", # Store preprocessing commands in this file.
              "parallelWorkerSettings": { # Provides data to pass through to the worker harness. # Settings to pass to the parallel worker harness.
                "reportingEnabled": True or False, # Send work progress updates to service.
                "shuffleServicePath": "A String", # The Shuffle service path relative to the root URL, for example, "shuffle/v1beta1".
                "workerId": "A String", # ID of the worker running this pipeline.
                "baseUrl": "A String", # The base URL for accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/"
                "servicePath": "A String", # The Dataflow service path relative to the root URL, for example, "dataflow/v1b3/projects".
                "tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
              },
              "vmId": "A String", # ID string of VM.
              "baseTaskDir": "A String", # Location on the worker for task-specific subdirectories.
              "continueOnException": True or False, # Do we continue taskrunner if an exception is hit?
              "baseUrl": "A String", # The base URL for the taskrunner to use when accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/"
              "taskUser": "A String", # The UNIX user ID on the worker VM to use for tasks launched by taskrunner; e.g. "root".
              "taskGroup": "A String", # The UNIX group ID on the worker VM to use for tasks launched by taskrunner; e.g. "wheel".
              "oauthScopes": [ # OAuth2 scopes to be requested by the taskrunner in order to access the dataflow API.
                "A String",
              ],
              "languageHint": "A String", # Suggested backend language.
              "logToSerialconsole": True or False, # Send taskrunner log into to Google Compute Engine VM serial console?
              "streamingWorkerMainClass": "A String", # Streaming worker main class name.
              "logDir": "A String", # Directory on the VM to store logs.
              "dataflowApiVersion": "A String", # API version of endpoint, e.g. "v1b3"
              "harnessCommand": "A String", # Command to launch the worker harness.
              "tempStoragePrefix": "A String", # The prefix of the resources the taskrunner should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
              "alsologtostderr": True or False, # Also send taskrunner log info to stderr?
            },
            "kind": "A String", # The kind of the worker pool; currently only 'harness' and 'shuffle' are supported.
            "machineType": "A String", # Machine type (e.g. "n1-standard-1"). If empty or unspecified, the service will attempt to choose a reasonable default.
            "network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
            "zone": "A String", # Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default.
            "onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google Compute Engine API.
            "diskType": "A String", # Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default.
            "teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool. Allowed values are: TEARDOWN_ALWAYS, TEARDOWN_ON_SUCCESS, and TEARDOWN_NEVER. TEARDOWN_ALWAYS means workers are always torn down regardless of whether the job succeeds. TEARDOWN_ON_SUCCESS means workers are torn down if the job succeeds. TEARDOWN_NEVER means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the TEARDOWN_ALWAYS policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default.
            "diskSizeGb": 42, # Size of root disk for VMs, in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
            "metadata": { # Metadata to set on the Google Compute Engine VMs.
              "a_key": "A String",
            },
            "poolArgs": { # Extra arguments for this worker pool.
              "a_key": "", # Properties of the object. Contains field @ype with type URL.
            },
            "numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default.
            "workerHarnessContainerImage": "A String", # Docker container image that executes Dataflow worker harness, residing in Google Container Registry. Required.
            "defaultPackageSet": "A String", # The default package set to install. This allows the service to select a default set of packages which are useful to worker harnesses written in a particular language.
            "packages": [ # Packages to be installed on workers.
              { # Packages that need to be installed in order for a worker to run the steps of the Dataflow job which will be assigned to its worker pool. This is the mechanism by which the SDK causes code to be loaded onto the workers. For example, the Dataflow Java SDK might use this to install jars containing the user's code and all of the various dependencies (libraries, data files, etc) required in order for that code to run.
                "name": "A String", # The name of the package.
                "location": "A String", # The resource to read the package from. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket} bucket.storage.googleapis.com/
              },
            ],
            "autoscalingSettings": { # Settings for WorkerPool autoscaling. # Settings for autoscaling of this WorkerPool.
              "maxNumWorkers": 42, # The maximum number of workers to cap scaling at.
              "algorithm": "A String", # The algorithm to use for autoscaling.
            },
            "dataDisks": [ # Data disks that are used by a VM in this workflow.
              { # Describes the data disk used by a workflow job.
                "mountPoint": "A String", # Directory in a VM where disk is mounted.
                "sizeGb": 42, # Size of disk in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
                "diskType": "A String", # Disk storage type, as defined by Google Compute Engine. This must be a disk type appropriate to the project and zone in which the workers will run. If unknown or unspecified, the service will attempt to choose a reasonable default. For example, the standard persistent disk type is a resource name typically ending in "pd-standard". If SSD persistent disks are available, the resource name typically ends with "pd-ssd". The actual valid values are defined the Google Compute Engine API, not by the Dataflow API; consult the Google Compute Engine documentation for more information about determining the set of available disk types for a particular project and zone. Google Compute Engine Disk types are local to a particular project in a particular zone, and so the resource name will typically look something like this: compute.googleapis.com/projects/
                    # /zones//diskTypes/pd-standard
              },
            ],
          },
        ],
      },
      "replaceJobId": "A String", # If this job is an update of an existing job, this field will be the ID of the job it replaced. When sending a CreateJobRequest, you can update a job by specifying it here. The job named here will be stopped, and its intermediate state transferred to this job.
      "steps": [ # The top-level steps that constitute the entire job.
        { # Defines a particular step within a Dataflow job. A job consists of multiple steps, each of which performs some specific operation as part of the overall job. Data is typically passed from one step to another as part of the job. Here's an example of a sequence of steps which together implement a Map-Reduce job: * Read a collection of data from some source, parsing the collection's elements. * Validate the elements. * Apply a user-defined function to map each element to some value and extract an element-specific key value. * Group elements with the same key into a single element with that key, transforming a multiply-keyed collection into a uniquely-keyed collection. * Write the elements out to some data sink. (Note that the Dataflow service may be used to run many different types of jobs, not just Map-Reduce).
          "kind": "A String", # The kind of step in the dataflow Job.
          "name": "A String", # Name identifying the step. This must be unique for each step with respect to all other steps in the dataflow Job.
          "properties": { # Named properties associated with the step. Each kind of predefined step has its own required set of properties.
            "a_key": "", # Properties of the object.
          },
        },
      ],
      "currentStateTime": "A String", # The timestamp associated with the current state.
      "tempFiles": [ # A set of files the system should be aware of that are used for temporary storage. These temporary files will be removed on job completion. No duplicates are allowed. No file patterns are supported. The supported files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
        "A String",
      ],
      "type": "A String", # The type of dataflow job.
      "id": "A String", # The unique ID of this job. This field is set by the Dataflow service when the Job is created, and is immutable for the life of the Job.
      "currentState": "A String", # The current state of the job. Jobs are created in the JOB_STATE_STOPPED state unless otherwise specified. A job in the JOB_STATE_RUNNING state may asynchronously enter a terminal state. Once a job has reached a terminal state, no further state updates may be made. This field may be mutated by the Dataflow service; callers cannot mutate it.
      "executionInfo": { # Additional information about how a Dataflow job will be executed which isn’t contained in the submitted job. # Information about how the Dataflow service will actually run the job.
        "stages": { # A mapping from each stage to the information about that stage.
          "a_key": { # Contains information about how a particular google.dataflow.v1beta3.Step will be executed.
            "stepName": [ # The steps associated with the execution stage. Note that stages may have several steps, and that a given step might be run by more than one stage.
              "A String",
            ],
          },
        },
      },
    }</pre>
</div>

<div class="method">
    <code class="details" id="get">get(projectId, jobId, x__xgafv=None, view=None)</code>
  <pre>Gets the state of the specified dataflow job.

Args:
  projectId: string, The project which owns the job. (required)
  jobId: string, Identifies a single job. (required)
  x__xgafv: string, V1 error format.
  view: string, Level of information requested in response.

Returns:
  An object of the form:

    { # Defines a job to be run by the Dataflow service.
      "clientRequestId": "A String", # Client's unique identifier of the job, re-used by SDK across retried attempts. If this field is set, the service will ensure its uniqueness. That is, the request to create a job will fail if the service has knowledge of a previously submitted job with the same client's id and job name. The caller may, for example, use this field to ensure idempotence of job creation across retried attempts to create a job. By default, the field is empty and, in that case, the service ignores it.
      "requestedState": "A String", # The job's requested state. UpdateJob may be used to switch between the JOB_STATE_STOPPED and JOB_STATE_RUNNING states, by setting requested_state. UpdateJob may also be used to directly set a job's requested state to JOB_STATE_CANCELLED or JOB_STATE_DONE, irrevocably terminating the job if it has not already reached a terminal state.
      "name": "A String", # The user-specified Dataflow job name. Only one Job with a given name may exist in a project at any given time. If a caller attempts to create a Job with the same name as an already-existing Job, the attempt will return the existing Job. The name must match the regular expression [a-z]([-a-z0-9]{0,38}[a-z0-9])?
      "replacedByJobId": "A String", # If another job is an update of this job (and thus, this job is in JOB_STATE_UPDATED), this field will contain the ID of that job.
      "projectId": "A String", # The project which owns the job.
      "transformNameMapping": { # Map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job.
        "a_key": "A String",
      },
      "createTime": "A String", # Timestamp when job was initially created. Immutable, set by the Dataflow service.
      "environment": { # Describes the environment in which a Dataflow Job runs. # Environment for the job.
        "version": { # A structure describing which components and their versions of the service are required in order to run the job.
          "a_key": "", # Properties of the object.
        },
        "tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The system will append the suffix "/temp-{JOBNAME} to this resource prefix, where {JOBNAME} is the value of the job_name field. The resulting bucket and object prefix is used as the prefix of the resources used to store temporary data needed during the job execution. NOTE: This will override the value in taskrunner_settings. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
        "internalExperiments": { # Experimental settings.
          "a_key": "", # Properties of the object. Contains field @ype with type URL.
        },
        "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset}
        "experiments": [ # The list of experiments to enable.
          "A String",
        ],
        "sdkPipelineOptions": { # The Dataflow SDK pipeline options specified by the user. These options are passed through the service and are used to recreate the SDK pipeline options on the worker in a language agnostic and platform independent way.
          "a_key": "", # Properties of the object.
        },
        "userAgent": { # A description of the process that generated the request.
          "a_key": "", # Properties of the object.
        },
        "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com".
        "workerPools": [ # Worker pools. At least one "harness" worker pool must be specified in order for the job to have workers.
          { # Describes one particular pool of Dataflow workers to be instantiated by the Dataflow service in order to perform the computations required by a job. Note that a workflow job may use multiple pools, in order to match the various computational requirements of the various stages of the job.
            "diskSourceImage": "A String", # Fully qualified source image for disks.
            "taskrunnerSettings": { # Taskrunner configuration settings. # Settings passed through to Google Compute Engine workers when using the standard Dataflow task runner. Users should ignore this field.
              "workflowFileName": "A String", # Store the workflow in this file.
              "logUploadLocation": "A String", # Indicates where to put logs. If this is not specified, the logs will not be uploaded. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
              "commandlinesFileName": "A String", # Store preprocessing commands in this file.
              "parallelWorkerSettings": { # Provides data to pass through to the worker harness. # Settings to pass to the parallel worker harness.
                "reportingEnabled": True or False, # Send work progress updates to service.
                "shuffleServicePath": "A String", # The Shuffle service path relative to the root URL, for example, "shuffle/v1beta1".
                "workerId": "A String", # ID of the worker running this pipeline.
                "baseUrl": "A String", # The base URL for accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/"
                "servicePath": "A String", # The Dataflow service path relative to the root URL, for example, "dataflow/v1b3/projects".
                "tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
              },
              "vmId": "A String", # ID string of VM.
              "baseTaskDir": "A String", # Location on the worker for task-specific subdirectories.
              "continueOnException": True or False, # Do we continue taskrunner if an exception is hit?
              "baseUrl": "A String", # The base URL for the taskrunner to use when accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/"
              "taskUser": "A String", # The UNIX user ID on the worker VM to use for tasks launched by taskrunner; e.g. "root".
              "taskGroup": "A String", # The UNIX group ID on the worker VM to use for tasks launched by taskrunner; e.g. "wheel".
              "oauthScopes": [ # OAuth2 scopes to be requested by the taskrunner in order to access the dataflow API.
                "A String",
              ],
              "languageHint": "A String", # Suggested backend language.
              "logToSerialconsole": True or False, # Send taskrunner log into to Google Compute Engine VM serial console?
              "streamingWorkerMainClass": "A String", # Streaming worker main class name.
              "logDir": "A String", # Directory on the VM to store logs.
              "dataflowApiVersion": "A String", # API version of endpoint, e.g. "v1b3"
              "harnessCommand": "A String", # Command to launch the worker harness.
              "tempStoragePrefix": "A String", # The prefix of the resources the taskrunner should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
              "alsologtostderr": True or False, # Also send taskrunner log info to stderr?
            },
            "kind": "A String", # The kind of the worker pool; currently only 'harness' and 'shuffle' are supported.
            "machineType": "A String", # Machine type (e.g. "n1-standard-1"). If empty or unspecified, the service will attempt to choose a reasonable default.
            "network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
            "zone": "A String", # Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default.
            "onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google Compute Engine API.
            "diskType": "A String", # Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default.
            "teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool. Allowed values are: TEARDOWN_ALWAYS, TEARDOWN_ON_SUCCESS, and TEARDOWN_NEVER. TEARDOWN_ALWAYS means workers are always torn down regardless of whether the job succeeds. TEARDOWN_ON_SUCCESS means workers are torn down if the job succeeds. TEARDOWN_NEVER means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the TEARDOWN_ALWAYS policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default.
            "diskSizeGb": 42, # Size of root disk for VMs, in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
            "metadata": { # Metadata to set on the Google Compute Engine VMs.
              "a_key": "A String",
            },
            "poolArgs": { # Extra arguments for this worker pool.
              "a_key": "", # Properties of the object. Contains field @ype with type URL.
            },
            "numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default.
            "workerHarnessContainerImage": "A String", # Docker container image that executes Dataflow worker harness, residing in Google Container Registry. Required.
            "defaultPackageSet": "A String", # The default package set to install. This allows the service to select a default set of packages which are useful to worker harnesses written in a particular language.
            "packages": [ # Packages to be installed on workers.
              { # Packages that need to be installed in order for a worker to run the steps of the Dataflow job which will be assigned to its worker pool. This is the mechanism by which the SDK causes code to be loaded onto the workers. For example, the Dataflow Java SDK might use this to install jars containing the user's code and all of the various dependencies (libraries, data files, etc) required in order for that code to run.
                "name": "A String", # The name of the package.
                "location": "A String", # The resource to read the package from. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket} bucket.storage.googleapis.com/
              },
            ],
            "autoscalingSettings": { # Settings for WorkerPool autoscaling. # Settings for autoscaling of this WorkerPool.
              "maxNumWorkers": 42, # The maximum number of workers to cap scaling at.
              "algorithm": "A String", # The algorithm to use for autoscaling.
            },
            "dataDisks": [ # Data disks that are used by a VM in this workflow.
              { # Describes the data disk used by a workflow job.
                "mountPoint": "A String", # Directory in a VM where disk is mounted.
                "sizeGb": 42, # Size of disk in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
                "diskType": "A String", # Disk storage type, as defined by Google Compute Engine. This must be a disk type appropriate to the project and zone in which the workers will run. If unknown or unspecified, the service will attempt to choose a reasonable default. For example, the standard persistent disk type is a resource name typically ending in "pd-standard". If SSD persistent disks are available, the resource name typically ends with "pd-ssd". The actual valid values are defined the Google Compute Engine API, not by the Dataflow API; consult the Google Compute Engine documentation for more information about determining the set of available disk types for a particular project and zone. Google Compute Engine Disk types are local to a particular project in a particular zone, and so the resource name will typically look something like this: compute.googleapis.com/projects/
                    # /zones//diskTypes/pd-standard
              },
            ],
          },
        ],
      },
      "replaceJobId": "A String", # If this job is an update of an existing job, this field will be the ID of the job it replaced. When sending a CreateJobRequest, you can update a job by specifying it here. The job named here will be stopped, and its intermediate state transferred to this job.
      "steps": [ # The top-level steps that constitute the entire job.
        { # Defines a particular step within a Dataflow job. A job consists of multiple steps, each of which performs some specific operation as part of the overall job. Data is typically passed from one step to another as part of the job. Here's an example of a sequence of steps which together implement a Map-Reduce job: * Read a collection of data from some source, parsing the collection's elements. * Validate the elements. * Apply a user-defined function to map each element to some value and extract an element-specific key value. * Group elements with the same key into a single element with that key, transforming a multiply-keyed collection into a uniquely-keyed collection. * Write the elements out to some data sink. (Note that the Dataflow service may be used to run many different types of jobs, not just Map-Reduce).
          "kind": "A String", # The kind of step in the dataflow Job.
          "name": "A String", # Name identifying the step. This must be unique for each step with respect to all other steps in the dataflow Job.
          "properties": { # Named properties associated with the step. Each kind of predefined step has its own required set of properties.
            "a_key": "", # Properties of the object.
          },
        },
      ],
      "currentStateTime": "A String", # The timestamp associated with the current state.
      "tempFiles": [ # A set of files the system should be aware of that are used for temporary storage. These temporary files will be removed on job completion. No duplicates are allowed. No file patterns are supported. The supported files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
        "A String",
      ],
      "type": "A String", # The type of dataflow job.
      "id": "A String", # The unique ID of this job. This field is set by the Dataflow service when the Job is created, and is immutable for the life of the Job.
      "currentState": "A String", # The current state of the job. Jobs are created in the JOB_STATE_STOPPED state unless otherwise specified. A job in the JOB_STATE_RUNNING state may asynchronously enter a terminal state. Once a job has reached a terminal state, no further state updates may be made. This field may be mutated by the Dataflow service; callers cannot mutate it.
      "executionInfo": { # Additional information about how a Dataflow job will be executed which isn’t contained in the submitted job. # Information about how the Dataflow service will actually run the job.
        "stages": { # A mapping from each stage to the information about that stage.
          "a_key": { # Contains information about how a particular google.dataflow.v1beta3.Step will be executed.
            "stepName": [ # The steps associated with the execution stage. Note that stages may have several steps, and that a given step might be run by more than one stage.
              "A String",
            ],
          },
        },
      },
    }</pre>
</div>

<div class="method">
    <code class="details" id="getMetrics">getMetrics(projectId, jobId, startTime=None, x__xgafv=None)</code>
  <pre>Request the job status.

Args:
  projectId: string, A project id. (required)
  jobId: string, The job to get messages for. (required)
  startTime: string, Return only metric data that has changed since this time. Default is to return all information about all metrics for the job.
  x__xgafv: string, V1 error format.

Returns:
  An object of the form:

    { # JobMetrics contains a collection of metrics descibing the detailed progress of a Dataflow job. Metrics correspond to user-defined and system-defined metrics in the job. This resource captures only the most recent values of each metric; time-series data can be queried for them (under the same metric names) from Cloud Monitoring.
    "metrics": [ # All metrics for this job.
      { # Describes the state of a metric.
        "meanCount": "", # Worker-computed aggregate value for the "Mean" aggregation kind. This holds the count of the aggregated values and is used in combination with mean_sum above to obtain the actual mean aggregate value. The only possible value type is Long.
        "kind": "A String", # Metric aggregation kind. The possible metric aggregation kinds are "Sum", "Max", "Min", "Mean", "Set", "And", and "Or". The specified aggregation kind is case-insensitive. If omitted, this is not an aggregated value but instead a single metric sample value.
        "set": "", # Worker-computed aggregate value for the "Set" aggregation kind. The only possible value type is a list of Values whose type can be Long, Double, or String, according to the metric's type. All Values in the list must be of the same type.
        "name": { # Identifies a metric, by describing the source which generated the metric. # Name of the metric.
          "origin": "A String", # Origin (namespace) of metric name. May be blank for user-define metrics; will be "dataflow" for metrics defined by the Dataflow service or SDK.
          "name": "A String", # Worker-defined metric name.
          "context": { # Zero or more labeled fields which identify the part of the job this metric is associated with, such as the name of a step or collection. For example, built-in counters associated with steps will have context['step'] = . Counters associated with PCollections in the SDK will have context['pcollection'] =
              # .
            "a_key": "A String",
          },
        },
        "cumulative": True or False, # True if this metric is reported as the total cumulative aggregate value accumulated since the worker started working on this WorkItem. By default this is false, indicating that this metric is reported as a delta that is not associated with any WorkItem.
        "updateTime": "A String", # Timestamp associated with the metric value. Optional when workers are reporting work progress; it will be filled in responses from the metrics API.
        "scalar": "", # Worker-computed aggregate value for aggregation kinds "Sum", "Max", "Min", "And", and "Or". The possible value types are Long, Double, and Boolean.
        "meanSum": "", # Worker-computed aggregate value for the "Mean" aggregation kind. This holds the sum of the aggregated values and is used in combination with mean_count below to obtain the actual mean aggregate value. The only possible value types are Long and Double.
        "internal": "", # Worker-computed aggregate value for internal use by the Dataflow service.
      },
    ],
    "metricTime": "A String", # Timestamp as of which metric values are current.
  }</pre>
</div>

<div class="method">
    <code class="details" id="list">list(projectId, pageSize=None, pageToken=None, x__xgafv=None, view=None)</code>
  <pre>List the jobs of a project

Args:
  projectId: string, The project which owns the jobs. (required)
  pageSize: integer, If there are many jobs, limit response to at most this many. The actual number of jobs returned will be the lesser of max_responses and an unspecified server-defined limit.
  pageToken: string, Set this to the 'next_page_token' field of a previous response to request additional results in a long list.
  x__xgafv: string, V1 error format.
  view: string, Level of information requested in response. Default is SUMMARY.

Returns:
  An object of the form:

    { # Response to a request to list Dataflow jobs. This may be a partial response, depending on the page size in the ListJobsRequest.
    "nextPageToken": "A String", # Set if there may be more results than fit in this response.
    "jobs": [ # A subset of the requested job information.
      { # Defines a job to be run by the Dataflow service.
          "clientRequestId": "A String", # Client's unique identifier of the job, re-used by SDK across retried attempts. If this field is set, the service will ensure its uniqueness. That is, the request to create a job will fail if the service has knowledge of a previously submitted job with the same client's id and job name. The caller may, for example, use this field to ensure idempotence of job creation across retried attempts to create a job. By default, the field is empty and, in that case, the service ignores it.
          "requestedState": "A String", # The job's requested state. UpdateJob may be used to switch between the JOB_STATE_STOPPED and JOB_STATE_RUNNING states, by setting requested_state. UpdateJob may also be used to directly set a job's requested state to JOB_STATE_CANCELLED or JOB_STATE_DONE, irrevocably terminating the job if it has not already reached a terminal state.
          "name": "A String", # The user-specified Dataflow job name. Only one Job with a given name may exist in a project at any given time. If a caller attempts to create a Job with the same name as an already-existing Job, the attempt will return the existing Job. The name must match the regular expression [a-z]([-a-z0-9]{0,38}[a-z0-9])?
          "replacedByJobId": "A String", # If another job is an update of this job (and thus, this job is in JOB_STATE_UPDATED), this field will contain the ID of that job.
          "projectId": "A String", # The project which owns the job.
          "transformNameMapping": { # Map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job.
            "a_key": "A String",
          },
          "createTime": "A String", # Timestamp when job was initially created. Immutable, set by the Dataflow service.
          "environment": { # Describes the environment in which a Dataflow Job runs. # Environment for the job.
            "version": { # A structure describing which components and their versions of the service are required in order to run the job.
              "a_key": "", # Properties of the object.
            },
            "tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The system will append the suffix "/temp-{JOBNAME} to this resource prefix, where {JOBNAME} is the value of the job_name field. The resulting bucket and object prefix is used as the prefix of the resources used to store temporary data needed during the job execution. NOTE: This will override the value in taskrunner_settings. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
            "internalExperiments": { # Experimental settings.
              "a_key": "", # Properties of the object. Contains field @ype with type URL.
            },
            "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset}
            "experiments": [ # The list of experiments to enable.
              "A String",
            ],
            "sdkPipelineOptions": { # The Dataflow SDK pipeline options specified by the user. These options are passed through the service and are used to recreate the SDK pipeline options on the worker in a language agnostic and platform independent way.
              "a_key": "", # Properties of the object.
            },
            "userAgent": { # A description of the process that generated the request.
              "a_key": "", # Properties of the object.
            },
            "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com".
            "workerPools": [ # Worker pools. At least one "harness" worker pool must be specified in order for the job to have workers.
              { # Describes one particular pool of Dataflow workers to be instantiated by the Dataflow service in order to perform the computations required by a job. Note that a workflow job may use multiple pools, in order to match the various computational requirements of the various stages of the job.
                "diskSourceImage": "A String", # Fully qualified source image for disks.
                "taskrunnerSettings": { # Taskrunner configuration settings. # Settings passed through to Google Compute Engine workers when using the standard Dataflow task runner. Users should ignore this field.
                  "workflowFileName": "A String", # Store the workflow in this file.
                  "logUploadLocation": "A String", # Indicates where to put logs. If this is not specified, the logs will not be uploaded. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
                  "commandlinesFileName": "A String", # Store preprocessing commands in this file.
                  "parallelWorkerSettings": { # Provides data to pass through to the worker harness. # Settings to pass to the parallel worker harness.
                    "reportingEnabled": True or False, # Send work progress updates to service.
                    "shuffleServicePath": "A String", # The Shuffle service path relative to the root URL, for example, "shuffle/v1beta1".
                    "workerId": "A String", # ID of the worker running this pipeline.
                    "baseUrl": "A String", # The base URL for accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/"
                    "servicePath": "A String", # The Dataflow service path relative to the root URL, for example, "dataflow/v1b3/projects".
                    "tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
                  },
                  "vmId": "A String", # ID string of VM.
                  "baseTaskDir": "A String", # Location on the worker for task-specific subdirectories.
                  "continueOnException": True or False, # Do we continue taskrunner if an exception is hit?
                  "baseUrl": "A String", # The base URL for the taskrunner to use when accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/"
                  "taskUser": "A String", # The UNIX user ID on the worker VM to use for tasks launched by taskrunner; e.g. "root".
                  "taskGroup": "A String", # The UNIX group ID on the worker VM to use for tasks launched by taskrunner; e.g. "wheel".
                  "oauthScopes": [ # OAuth2 scopes to be requested by the taskrunner in order to access the dataflow API.
                    "A String",
                  ],
                  "languageHint": "A String", # Suggested backend language.
                  "logToSerialconsole": True or False, # Send taskrunner log into to Google Compute Engine VM serial console?
                  "streamingWorkerMainClass": "A String", # Streaming worker main class name.
                  "logDir": "A String", # Directory on the VM to store logs.
                  "dataflowApiVersion": "A String", # API version of endpoint, e.g. "v1b3"
                  "harnessCommand": "A String", # Command to launch the worker harness.
                  "tempStoragePrefix": "A String", # The prefix of the resources the taskrunner should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
                  "alsologtostderr": True or False, # Also send taskrunner log info to stderr?
                },
                "kind": "A String", # The kind of the worker pool; currently only 'harness' and 'shuffle' are supported.
                "machineType": "A String", # Machine type (e.g. "n1-standard-1"). If empty or unspecified, the service will attempt to choose a reasonable default.
                "network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
                "zone": "A String", # Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default.
                "onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google Compute Engine API.
                "diskType": "A String", # Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default.
                "teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool. Allowed values are: TEARDOWN_ALWAYS, TEARDOWN_ON_SUCCESS, and TEARDOWN_NEVER. TEARDOWN_ALWAYS means workers are always torn down regardless of whether the job succeeds. TEARDOWN_ON_SUCCESS means workers are torn down if the job succeeds. TEARDOWN_NEVER means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the TEARDOWN_ALWAYS policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default.
                "diskSizeGb": 42, # Size of root disk for VMs, in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
                "metadata": { # Metadata to set on the Google Compute Engine VMs.
                  "a_key": "A String",
                },
                "poolArgs": { # Extra arguments for this worker pool.
                  "a_key": "", # Properties of the object. Contains field @ype with type URL.
                },
                "numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default.
                "workerHarnessContainerImage": "A String", # Docker container image that executes Dataflow worker harness, residing in Google Container Registry. Required.
                "defaultPackageSet": "A String", # The default package set to install. This allows the service to select a default set of packages which are useful to worker harnesses written in a particular language.
                "packages": [ # Packages to be installed on workers.
                  { # Packages that need to be installed in order for a worker to run the steps of the Dataflow job which will be assigned to its worker pool. This is the mechanism by which the SDK causes code to be loaded onto the workers. For example, the Dataflow Java SDK might use this to install jars containing the user's code and all of the various dependencies (libraries, data files, etc) required in order for that code to run.
                    "name": "A String", # The name of the package.
                    "location": "A String", # The resource to read the package from. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket} bucket.storage.googleapis.com/
                  },
                ],
                "autoscalingSettings": { # Settings for WorkerPool autoscaling. # Settings for autoscaling of this WorkerPool.
                  "maxNumWorkers": 42, # The maximum number of workers to cap scaling at.
                  "algorithm": "A String", # The algorithm to use for autoscaling.
                },
                "dataDisks": [ # Data disks that are used by a VM in this workflow.
                  { # Describes the data disk used by a workflow job.
                    "mountPoint": "A String", # Directory in a VM where disk is mounted.
                    "sizeGb": 42, # Size of disk in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
                    "diskType": "A String", # Disk storage type, as defined by Google Compute Engine. This must be a disk type appropriate to the project and zone in which the workers will run. If unknown or unspecified, the service will attempt to choose a reasonable default. For example, the standard persistent disk type is a resource name typically ending in "pd-standard". If SSD persistent disks are available, the resource name typically ends with "pd-ssd". The actual valid values are defined the Google Compute Engine API, not by the Dataflow API; consult the Google Compute Engine documentation for more information about determining the set of available disk types for a particular project and zone. Google Compute Engine Disk types are local to a particular project in a particular zone, and so the resource name will typically look something like this: compute.googleapis.com/projects/
                        # /zones//diskTypes/pd-standard
                  },
                ],
              },
            ],
          },
          "replaceJobId": "A String", # If this job is an update of an existing job, this field will be the ID of the job it replaced. When sending a CreateJobRequest, you can update a job by specifying it here. The job named here will be stopped, and its intermediate state transferred to this job.
          "steps": [ # The top-level steps that constitute the entire job.
            { # Defines a particular step within a Dataflow job. A job consists of multiple steps, each of which performs some specific operation as part of the overall job. Data is typically passed from one step to another as part of the job. Here's an example of a sequence of steps which together implement a Map-Reduce job: * Read a collection of data from some source, parsing the collection's elements. * Validate the elements. * Apply a user-defined function to map each element to some value and extract an element-specific key value. * Group elements with the same key into a single element with that key, transforming a multiply-keyed collection into a uniquely-keyed collection. * Write the elements out to some data sink. (Note that the Dataflow service may be used to run many different types of jobs, not just Map-Reduce).
              "kind": "A String", # The kind of step in the dataflow Job.
              "name": "A String", # Name identifying the step. This must be unique for each step with respect to all other steps in the dataflow Job.
              "properties": { # Named properties associated with the step. Each kind of predefined step has its own required set of properties.
                "a_key": "", # Properties of the object.
              },
            },
          ],
          "currentStateTime": "A String", # The timestamp associated with the current state.
          "tempFiles": [ # A set of files the system should be aware of that are used for temporary storage. These temporary files will be removed on job completion. No duplicates are allowed. No file patterns are supported. The supported files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
            "A String",
          ],
          "type": "A String", # The type of dataflow job.
          "id": "A String", # The unique ID of this job. This field is set by the Dataflow service when the Job is created, and is immutable for the life of the Job.
          "currentState": "A String", # The current state of the job. Jobs are created in the JOB_STATE_STOPPED state unless otherwise specified. A job in the JOB_STATE_RUNNING state may asynchronously enter a terminal state. Once a job has reached a terminal state, no further state updates may be made. This field may be mutated by the Dataflow service; callers cannot mutate it.
          "executionInfo": { # Additional information about how a Dataflow job will be executed which isn’t contained in the submitted job. # Information about how the Dataflow service will actually run the job.
            "stages": { # A mapping from each stage to the information about that stage.
              "a_key": { # Contains information about how a particular google.dataflow.v1beta3.Step will be executed.
                "stepName": [ # The steps associated with the execution stage. Note that stages may have several steps, and that a given step might be run by more than one stage.
                  "A String",
                ],
              },
            },
          },
        },
    ],
  }</pre>
</div>

<div class="method">
    <code class="details" id="list_next">list_next(previous_request, previous_response)</code>
  <pre>Retrieves the next page of results.

Args:
  previous_request: The request for the previous page. (required)
  previous_response: The response from the request for the previous page. (required)

Returns:
  A request object that you can call 'execute()' on to request the next
  page. Returns None if there are no more items in the collection.
    </pre>
</div>

<div class="method">
    <code class="details" id="update">update(projectId, jobId, body, x__xgafv=None)</code>
  <pre>Updates the state of an existing dataflow job.

Args:
  projectId: string, The project which owns the job. (required)
  jobId: string, Identifies a single job. (required)
  body: object, The request body. (required)
    The object takes the form of:

{ # Defines a job to be run by the Dataflow service.
    "clientRequestId": "A String", # Client's unique identifier of the job, re-used by SDK across retried attempts. If this field is set, the service will ensure its uniqueness. That is, the request to create a job will fail if the service has knowledge of a previously submitted job with the same client's id and job name. The caller may, for example, use this field to ensure idempotence of job creation across retried attempts to create a job. By default, the field is empty and, in that case, the service ignores it.
    "requestedState": "A String", # The job's requested state. UpdateJob may be used to switch between the JOB_STATE_STOPPED and JOB_STATE_RUNNING states, by setting requested_state. UpdateJob may also be used to directly set a job's requested state to JOB_STATE_CANCELLED or JOB_STATE_DONE, irrevocably terminating the job if it has not already reached a terminal state.
    "name": "A String", # The user-specified Dataflow job name. Only one Job with a given name may exist in a project at any given time. If a caller attempts to create a Job with the same name as an already-existing Job, the attempt will return the existing Job. The name must match the regular expression [a-z]([-a-z0-9]{0,38}[a-z0-9])?
    "replacedByJobId": "A String", # If another job is an update of this job (and thus, this job is in JOB_STATE_UPDATED), this field will contain the ID of that job.
    "projectId": "A String", # The project which owns the job.
    "transformNameMapping": { # Map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job.
      "a_key": "A String",
    },
    "createTime": "A String", # Timestamp when job was initially created. Immutable, set by the Dataflow service.
    "environment": { # Describes the environment in which a Dataflow Job runs. # Environment for the job.
      "version": { # A structure describing which components and their versions of the service are required in order to run the job.
        "a_key": "", # Properties of the object.
      },
      "tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The system will append the suffix "/temp-{JOBNAME} to this resource prefix, where {JOBNAME} is the value of the job_name field. The resulting bucket and object prefix is used as the prefix of the resources used to store temporary data needed during the job execution. NOTE: This will override the value in taskrunner_settings. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
      "internalExperiments": { # Experimental settings.
        "a_key": "", # Properties of the object. Contains field @ype with type URL.
      },
      "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset}
      "experiments": [ # The list of experiments to enable.
        "A String",
      ],
      "sdkPipelineOptions": { # The Dataflow SDK pipeline options specified by the user. These options are passed through the service and are used to recreate the SDK pipeline options on the worker in a language agnostic and platform independent way.
        "a_key": "", # Properties of the object.
      },
      "userAgent": { # A description of the process that generated the request.
        "a_key": "", # Properties of the object.
      },
      "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com".
      "workerPools": [ # Worker pools. At least one "harness" worker pool must be specified in order for the job to have workers.
        { # Describes one particular pool of Dataflow workers to be instantiated by the Dataflow service in order to perform the computations required by a job. Note that a workflow job may use multiple pools, in order to match the various computational requirements of the various stages of the job.
          "diskSourceImage": "A String", # Fully qualified source image for disks.
          "taskrunnerSettings": { # Taskrunner configuration settings. # Settings passed through to Google Compute Engine workers when using the standard Dataflow task runner. Users should ignore this field.
            "workflowFileName": "A String", # Store the workflow in this file.
            "logUploadLocation": "A String", # Indicates where to put logs. If this is not specified, the logs will not be uploaded. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
            "commandlinesFileName": "A String", # Store preprocessing commands in this file.
            "parallelWorkerSettings": { # Provides data to pass through to the worker harness. # Settings to pass to the parallel worker harness.
              "reportingEnabled": True or False, # Send work progress updates to service.
              "shuffleServicePath": "A String", # The Shuffle service path relative to the root URL, for example, "shuffle/v1beta1".
              "workerId": "A String", # ID of the worker running this pipeline.
              "baseUrl": "A String", # The base URL for accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/"
              "servicePath": "A String", # The Dataflow service path relative to the root URL, for example, "dataflow/v1b3/projects".
              "tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
            },
            "vmId": "A String", # ID string of VM.
            "baseTaskDir": "A String", # Location on the worker for task-specific subdirectories.
            "continueOnException": True or False, # Do we continue taskrunner if an exception is hit?
            "baseUrl": "A String", # The base URL for the taskrunner to use when accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/"
            "taskUser": "A String", # The UNIX user ID on the worker VM to use for tasks launched by taskrunner; e.g. "root".
            "taskGroup": "A String", # The UNIX group ID on the worker VM to use for tasks launched by taskrunner; e.g. "wheel".
            "oauthScopes": [ # OAuth2 scopes to be requested by the taskrunner in order to access the dataflow API.
              "A String",
            ],
            "languageHint": "A String", # Suggested backend language.
            "logToSerialconsole": True or False, # Send taskrunner log into to Google Compute Engine VM serial console?
            "streamingWorkerMainClass": "A String", # Streaming worker main class name.
            "logDir": "A String", # Directory on the VM to store logs.
            "dataflowApiVersion": "A String", # API version of endpoint, e.g. "v1b3"
            "harnessCommand": "A String", # Command to launch the worker harness.
            "tempStoragePrefix": "A String", # The prefix of the resources the taskrunner should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
            "alsologtostderr": True or False, # Also send taskrunner log info to stderr?
          },
          "kind": "A String", # The kind of the worker pool; currently only 'harness' and 'shuffle' are supported.
          "machineType": "A String", # Machine type (e.g. "n1-standard-1"). If empty or unspecified, the service will attempt to choose a reasonable default.
          "network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
          "zone": "A String", # Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default.
          "onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google Compute Engine API.
          "diskType": "A String", # Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default.
          "teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool. Allowed values are: TEARDOWN_ALWAYS, TEARDOWN_ON_SUCCESS, and TEARDOWN_NEVER. TEARDOWN_ALWAYS means workers are always torn down regardless of whether the job succeeds. TEARDOWN_ON_SUCCESS means workers are torn down if the job succeeds. TEARDOWN_NEVER means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the TEARDOWN_ALWAYS policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default.
          "diskSizeGb": 42, # Size of root disk for VMs, in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
          "metadata": { # Metadata to set on the Google Compute Engine VMs.
            "a_key": "A String",
          },
          "poolArgs": { # Extra arguments for this worker pool.
            "a_key": "", # Properties of the object. Contains field @ype with type URL.
          },
          "numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default.
          "workerHarnessContainerImage": "A String", # Docker container image that executes Dataflow worker harness, residing in Google Container Registry. Required.
          "defaultPackageSet": "A String", # The default package set to install. This allows the service to select a default set of packages which are useful to worker harnesses written in a particular language.
          "packages": [ # Packages to be installed on workers.
            { # Packages that need to be installed in order for a worker to run the steps of the Dataflow job which will be assigned to its worker pool. This is the mechanism by which the SDK causes code to be loaded onto the workers. For example, the Dataflow Java SDK might use this to install jars containing the user's code and all of the various dependencies (libraries, data files, etc) required in order for that code to run.
              "name": "A String", # The name of the package.
              "location": "A String", # The resource to read the package from. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket} bucket.storage.googleapis.com/
            },
          ],
          "autoscalingSettings": { # Settings for WorkerPool autoscaling. # Settings for autoscaling of this WorkerPool.
            "maxNumWorkers": 42, # The maximum number of workers to cap scaling at.
            "algorithm": "A String", # The algorithm to use for autoscaling.
          },
          "dataDisks": [ # Data disks that are used by a VM in this workflow.
            { # Describes the data disk used by a workflow job.
              "mountPoint": "A String", # Directory in a VM where disk is mounted.
              "sizeGb": 42, # Size of disk in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
              "diskType": "A String", # Disk storage type, as defined by Google Compute Engine. This must be a disk type appropriate to the project and zone in which the workers will run. If unknown or unspecified, the service will attempt to choose a reasonable default. For example, the standard persistent disk type is a resource name typically ending in "pd-standard". If SSD persistent disks are available, the resource name typically ends with "pd-ssd". The actual valid values are defined the Google Compute Engine API, not by the Dataflow API; consult the Google Compute Engine documentation for more information about determining the set of available disk types for a particular project and zone. Google Compute Engine Disk types are local to a particular project in a particular zone, and so the resource name will typically look something like this: compute.googleapis.com/projects/
                  # /zones//diskTypes/pd-standard
            },
          ],
        },
      ],
    },
    "replaceJobId": "A String", # If this job is an update of an existing job, this field will be the ID of the job it replaced. When sending a CreateJobRequest, you can update a job by specifying it here. The job named here will be stopped, and its intermediate state transferred to this job.
    "steps": [ # The top-level steps that constitute the entire job.
      { # Defines a particular step within a Dataflow job. A job consists of multiple steps, each of which performs some specific operation as part of the overall job. Data is typically passed from one step to another as part of the job. Here's an example of a sequence of steps which together implement a Map-Reduce job: * Read a collection of data from some source, parsing the collection's elements. * Validate the elements. * Apply a user-defined function to map each element to some value and extract an element-specific key value. * Group elements with the same key into a single element with that key, transforming a multiply-keyed collection into a uniquely-keyed collection. * Write the elements out to some data sink. (Note that the Dataflow service may be used to run many different types of jobs, not just Map-Reduce).
        "kind": "A String", # The kind of step in the dataflow Job.
        "name": "A String", # Name identifying the step. This must be unique for each step with respect to all other steps in the dataflow Job.
        "properties": { # Named properties associated with the step. Each kind of predefined step has its own required set of properties.
          "a_key": "", # Properties of the object.
        },
      },
    ],
    "currentStateTime": "A String", # The timestamp associated with the current state.
    "tempFiles": [ # A set of files the system should be aware of that are used for temporary storage. These temporary files will be removed on job completion. No duplicates are allowed. No file patterns are supported. The supported files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
      "A String",
    ],
    "type": "A String", # The type of dataflow job.
    "id": "A String", # The unique ID of this job. This field is set by the Dataflow service when the Job is created, and is immutable for the life of the Job.
    "currentState": "A String", # The current state of the job. Jobs are created in the JOB_STATE_STOPPED state unless otherwise specified. A job in the JOB_STATE_RUNNING state may asynchronously enter a terminal state. Once a job has reached a terminal state, no further state updates may be made. This field may be mutated by the Dataflow service; callers cannot mutate it.
    "executionInfo": { # Additional information about how a Dataflow job will be executed which isn’t contained in the submitted job. # Information about how the Dataflow service will actually run the job.
      "stages": { # A mapping from each stage to the information about that stage.
        "a_key": { # Contains information about how a particular google.dataflow.v1beta3.Step will be executed.
          "stepName": [ # The steps associated with the execution stage. Note that stages may have several steps, and that a given step might be run by more than one stage.
            "A String",
          ],
        },
      },
    },
  }

  x__xgafv: string, V1 error format.

Returns:
  An object of the form:

    { # Defines a job to be run by the Dataflow service.
      "clientRequestId": "A String", # Client's unique identifier of the job, re-used by SDK across retried attempts. If this field is set, the service will ensure its uniqueness. That is, the request to create a job will fail if the service has knowledge of a previously submitted job with the same client's id and job name. The caller may, for example, use this field to ensure idempotence of job creation across retried attempts to create a job. By default, the field is empty and, in that case, the service ignores it.
      "requestedState": "A String", # The job's requested state. UpdateJob may be used to switch between the JOB_STATE_STOPPED and JOB_STATE_RUNNING states, by setting requested_state. UpdateJob may also be used to directly set a job's requested state to JOB_STATE_CANCELLED or JOB_STATE_DONE, irrevocably terminating the job if it has not already reached a terminal state.
      "name": "A String", # The user-specified Dataflow job name. Only one Job with a given name may exist in a project at any given time. If a caller attempts to create a Job with the same name as an already-existing Job, the attempt will return the existing Job. The name must match the regular expression [a-z]([-a-z0-9]{0,38}[a-z0-9])?
      "replacedByJobId": "A String", # If another job is an update of this job (and thus, this job is in JOB_STATE_UPDATED), this field will contain the ID of that job.
      "projectId": "A String", # The project which owns the job.
      "transformNameMapping": { # Map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job.
        "a_key": "A String",
      },
      "createTime": "A String", # Timestamp when job was initially created. Immutable, set by the Dataflow service.
      "environment": { # Describes the environment in which a Dataflow Job runs. # Environment for the job.
        "version": { # A structure describing which components and their versions of the service are required in order to run the job.
          "a_key": "", # Properties of the object.
        },
        "tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The system will append the suffix "/temp-{JOBNAME} to this resource prefix, where {JOBNAME} is the value of the job_name field. The resulting bucket and object prefix is used as the prefix of the resources used to store temporary data needed during the job execution. NOTE: This will override the value in taskrunner_settings. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
        "internalExperiments": { # Experimental settings.
          "a_key": "", # Properties of the object. Contains field @ype with type URL.
        },
        "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset}
        "experiments": [ # The list of experiments to enable.
          "A String",
        ],
        "sdkPipelineOptions": { # The Dataflow SDK pipeline options specified by the user. These options are passed through the service and are used to recreate the SDK pipeline options on the worker in a language agnostic and platform independent way.
          "a_key": "", # Properties of the object.
        },
        "userAgent": { # A description of the process that generated the request.
          "a_key": "", # Properties of the object.
        },
        "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com".
        "workerPools": [ # Worker pools. At least one "harness" worker pool must be specified in order for the job to have workers.
          { # Describes one particular pool of Dataflow workers to be instantiated by the Dataflow service in order to perform the computations required by a job. Note that a workflow job may use multiple pools, in order to match the various computational requirements of the various stages of the job.
            "diskSourceImage": "A String", # Fully qualified source image for disks.
            "taskrunnerSettings": { # Taskrunner configuration settings. # Settings passed through to Google Compute Engine workers when using the standard Dataflow task runner. Users should ignore this field.
              "workflowFileName": "A String", # Store the workflow in this file.
              "logUploadLocation": "A String", # Indicates where to put logs. If this is not specified, the logs will not be uploaded. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
              "commandlinesFileName": "A String", # Store preprocessing commands in this file.
              "parallelWorkerSettings": { # Provides data to pass through to the worker harness. # Settings to pass to the parallel worker harness.
                "reportingEnabled": True or False, # Send work progress updates to service.
                "shuffleServicePath": "A String", # The Shuffle service path relative to the root URL, for example, "shuffle/v1beta1".
                "workerId": "A String", # ID of the worker running this pipeline.
                "baseUrl": "A String", # The base URL for accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/"
                "servicePath": "A String", # The Dataflow service path relative to the root URL, for example, "dataflow/v1b3/projects".
                "tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
              },
              "vmId": "A String", # ID string of VM.
              "baseTaskDir": "A String", # Location on the worker for task-specific subdirectories.
              "continueOnException": True or False, # Do we continue taskrunner if an exception is hit?
              "baseUrl": "A String", # The base URL for the taskrunner to use when accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/"
              "taskUser": "A String", # The UNIX user ID on the worker VM to use for tasks launched by taskrunner; e.g. "root".
              "taskGroup": "A String", # The UNIX group ID on the worker VM to use for tasks launched by taskrunner; e.g. "wheel".
              "oauthScopes": [ # OAuth2 scopes to be requested by the taskrunner in order to access the dataflow API.
                "A String",
              ],
              "languageHint": "A String", # Suggested backend language.
              "logToSerialconsole": True or False, # Send taskrunner log into to Google Compute Engine VM serial console?
              "streamingWorkerMainClass": "A String", # Streaming worker main class name.
              "logDir": "A String", # Directory on the VM to store logs.
              "dataflowApiVersion": "A String", # API version of endpoint, e.g. "v1b3"
              "harnessCommand": "A String", # Command to launch the worker harness.
              "tempStoragePrefix": "A String", # The prefix of the resources the taskrunner should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
              "alsologtostderr": True or False, # Also send taskrunner log info to stderr?
            },
            "kind": "A String", # The kind of the worker pool; currently only 'harness' and 'shuffle' are supported.
            "machineType": "A String", # Machine type (e.g. "n1-standard-1"). If empty or unspecified, the service will attempt to choose a reasonable default.
            "network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
            "zone": "A String", # Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default.
            "onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google Compute Engine API.
            "diskType": "A String", # Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default.
            "teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool. Allowed values are: TEARDOWN_ALWAYS, TEARDOWN_ON_SUCCESS, and TEARDOWN_NEVER. TEARDOWN_ALWAYS means workers are always torn down regardless of whether the job succeeds. TEARDOWN_ON_SUCCESS means workers are torn down if the job succeeds. TEARDOWN_NEVER means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the TEARDOWN_ALWAYS policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default.
            "diskSizeGb": 42, # Size of root disk for VMs, in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
            "metadata": { # Metadata to set on the Google Compute Engine VMs.
              "a_key": "A String",
            },
            "poolArgs": { # Extra arguments for this worker pool.
              "a_key": "", # Properties of the object. Contains field @ype with type URL.
            },
            "numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default.
            "workerHarnessContainerImage": "A String", # Docker container image that executes Dataflow worker harness, residing in Google Container Registry. Required.
            "defaultPackageSet": "A String", # The default package set to install. This allows the service to select a default set of packages which are useful to worker harnesses written in a particular language.
            "packages": [ # Packages to be installed on workers.
              { # Packages that need to be installed in order for a worker to run the steps of the Dataflow job which will be assigned to its worker pool. This is the mechanism by which the SDK causes code to be loaded onto the workers. For example, the Dataflow Java SDK might use this to install jars containing the user's code and all of the various dependencies (libraries, data files, etc) required in order for that code to run.
                "name": "A String", # The name of the package.
                "location": "A String", # The resource to read the package from. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket} bucket.storage.googleapis.com/
              },
            ],
            "autoscalingSettings": { # Settings for WorkerPool autoscaling. # Settings for autoscaling of this WorkerPool.
              "maxNumWorkers": 42, # The maximum number of workers to cap scaling at.
              "algorithm": "A String", # The algorithm to use for autoscaling.
            },
            "dataDisks": [ # Data disks that are used by a VM in this workflow.
              { # Describes the data disk used by a workflow job.
                "mountPoint": "A String", # Directory in a VM where disk is mounted.
                "sizeGb": 42, # Size of disk in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
                "diskType": "A String", # Disk storage type, as defined by Google Compute Engine. This must be a disk type appropriate to the project and zone in which the workers will run. If unknown or unspecified, the service will attempt to choose a reasonable default. For example, the standard persistent disk type is a resource name typically ending in "pd-standard". If SSD persistent disks are available, the resource name typically ends with "pd-ssd". The actual valid values are defined the Google Compute Engine API, not by the Dataflow API; consult the Google Compute Engine documentation for more information about determining the set of available disk types for a particular project and zone. Google Compute Engine Disk types are local to a particular project in a particular zone, and so the resource name will typically look something like this: compute.googleapis.com/projects/
                    # /zones//diskTypes/pd-standard
              },
            ],
          },
        ],
      },
      "replaceJobId": "A String", # If this job is an update of an existing job, this field will be the ID of the job it replaced. When sending a CreateJobRequest, you can update a job by specifying it here. The job named here will be stopped, and its intermediate state transferred to this job.
      "steps": [ # The top-level steps that constitute the entire job.
        { # Defines a particular step within a Dataflow job. A job consists of multiple steps, each of which performs some specific operation as part of the overall job. Data is typically passed from one step to another as part of the job. Here's an example of a sequence of steps which together implement a Map-Reduce job: * Read a collection of data from some source, parsing the collection's elements. * Validate the elements. * Apply a user-defined function to map each element to some value and extract an element-specific key value. * Group elements with the same key into a single element with that key, transforming a multiply-keyed collection into a uniquely-keyed collection. * Write the elements out to some data sink. (Note that the Dataflow service may be used to run many different types of jobs, not just Map-Reduce).
          "kind": "A String", # The kind of step in the dataflow Job.
          "name": "A String", # Name identifying the step. This must be unique for each step with respect to all other steps in the dataflow Job.
          "properties": { # Named properties associated with the step. Each kind of predefined step has its own required set of properties.
            "a_key": "", # Properties of the object.
          },
        },
      ],
      "currentStateTime": "A String", # The timestamp associated with the current state.
      "tempFiles": [ # A set of files the system should be aware of that are used for temporary storage. These temporary files will be removed on job completion. No duplicates are allowed. No file patterns are supported. The supported files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
        "A String",
      ],
      "type": "A String", # The type of dataflow job.
      "id": "A String", # The unique ID of this job. This field is set by the Dataflow service when the Job is created, and is immutable for the life of the Job.
      "currentState": "A String", # The current state of the job. Jobs are created in the JOB_STATE_STOPPED state unless otherwise specified. A job in the JOB_STATE_RUNNING state may asynchronously enter a terminal state. Once a job has reached a terminal state, no further state updates may be made. This field may be mutated by the Dataflow service; callers cannot mutate it.
      "executionInfo": { # Additional information about how a Dataflow job will be executed which isn’t contained in the submitted job. # Information about how the Dataflow service will actually run the job.
        "stages": { # A mapping from each stage to the information about that stage.
          "a_key": { # Contains information about how a particular google.dataflow.v1beta3.Step will be executed.
            "stepName": [ # The steps associated with the execution stage. Note that stages may have several steps, and that a given step might be run by more than one stage.
              "A String",
            ],
          },
        },
      },
    }</pre>
</div>

</body></html>