List BigQuery Jobs
Lists all jobs that you started in the specified project. Job information is available for a six month period after creation. The job list is sorted in reverse chronological order, by job creation time. Requires the Can View project role, or the Is Owner project role if you set the allUsers property.
External Documentation
To learn more, visit the GCP documentation.
Basic Parameters
Parameter | Description |
---|---|
Parent Job ID | If set, retrieves only jobs whose parent is this job. Otherwise, retrieves only jobs which have no parent. |
Project ID | Project ID of the jobs to list. |
Return All Pages | Automatically fetch all resources, page by page. |
Advanced Parameters
Parameter | Description |
---|---|
Max Creation Time | Max value for job creation time, in milliseconds since the POSIX epoch. If set, only jobs created before or at this timestamp are returned. |
Max Results | Maximum number of results to return. |
Min Creation Time | Min value for job creation time, in milliseconds since the POSIX epoch. If set, only jobs created after or at this timestamp are returned. |
Projection | Restrict information returned to a set of selected fields. |
State Filter | Filter for job state. |
Example Output
{
"etag": "A hash of this page of results.",
"jobs": [
{
"configuration": {
"copy": {
"createDisposition": "[Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion.",
"destinationEncryptionConfiguration": {
"kmsKeyName": "[Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key."
},
"destinationExpirationTime": "[Optional] The time when the destination table expires. Expired tables will be deleted and their storage reclaimed.",
"destinationTable": {
"datasetId": "[Required] The ID of the dataset containing this table.",
"projectId": "[Required] The ID of the project containing this table.",
"tableId": "[Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters."
},
"operationType": "[Optional] Supported operation types in table copy job.",
"sourceTable": {
"datasetId": "[Required] The ID of the dataset containing this table.",
"projectId": "[Required] The ID of the project containing this table.",
"tableId": "[Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters."
},
"sourceTables": [
{
"datasetId": "[Required] The ID of the dataset containing this table.",
"projectId": "[Required] The ID of the project containing this table.",
"tableId": "[Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters."
}
],
"writeDisposition": "[Optional] Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. The default value is WRITE_EMPTY. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion."
},
"dryRun": false,
"extract": {
"compression": "[Optional] The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE and SNAPPY are only supported for Avro. Not applicable when extracting models.",
"destinationFormat": "[Optional] The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON, PARQUET or AVRO for tables and ML_TF_SAVED_MODEL or ML_XGBOOST_BOOSTER for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is ML_TF_SAVED_MODEL.",
"destinationUri": "[Pick one] DEPRECATED: Use destinationUris instead, passing only one URI as necessary. The fully-qualified Google Cloud Storage URI where the extracted table should be written.",
"destinationUris": [
"string"
],
"fieldDelimiter": "[Optional] Delimiter to use between fields in the exported data. Default is ','. Not applicable when extracting models.",
"printHeader": true,
"sourceModel": {
"datasetId": "[Required] The ID of the dataset containing this model.",
"modelId": "[Required] The ID of the model. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.",
"projectId": "[Required] The ID of the project containing this model."
},
"sourceTable": {
"datasetId": "[Required] The ID of the dataset containing this table.",
"projectId": "[Required] The ID of the project containing this table.",
"tableId": "[Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters."
},
"useAvroLogicalTypes": false
},
"jobTimeoutMs": "[Optional] Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.",
"jobType": "[Output-only] The type of the job. Can be QUERY, LOAD, EXTRACT, COPY or UNKNOWN.",
"labels": {},
"load": {
"allowJaggedRows": false,
"allowQuotedNewlines": false,
"autodetect": false,
"clustering": {
"fields": [
"string"
]
},
"createDisposition": "[Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion.",
"decimalTargetTypes": [
"string"
],
"destinationEncryptionConfiguration": {
"kmsKeyName": "[Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key."
},
"destinationTable": {
"datasetId": "[Required] The ID of the dataset containing this table.",
"projectId": "[Required] The ID of the project containing this table.",
"tableId": "[Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters."
},
"destinationTableProperties": {
"description": "[Optional] The description for the destination table. This will only be used if the destination table is newly created. If the table already exists and a value different than the current description is provided, the job will fail.",
"expirationTime": "[Optional] The destination table expiration time. If this field is set: For a new table, it will set the table's expiration time (even if there is a dataset level default table expiration time). For an existing table, it will update the table's expiration time. If this field is not set: For a new table, if dataset level default table expiration time is present, that will be applied. For an existing table, no change is made to the table's expiration time. Additionally this field is only applied when data is written to an empty table (WRITE_EMPTY) or data is overwritten to a table (WRITE_TRUNCATE).",
"friendlyName": "[Optional] The friendly name for the destination table. This will only be used if the destination table is newly created. If the table already exists and a value different than the current friendly name is provided, the job will fail.",
"labels": {}
},
"encoding": "[Optional] The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.",
"fieldDelimiter": "[Optional] The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence \"\\t\" to specify a tab separator. The default value is a comma (',').",
"hivePartitioningOptions": {
"mode": "[Optional] When set, what mode of hive partitioning to use when reading data. The following modes are supported. (1) AUTO: automatically infer partition key name(s) and type(s). (2) STRINGS: automatically infer partition key name(s). All types are interpreted as strings. (3) CUSTOM: partition key schema is encoded in the source URI prefix. Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported types include: AVRO, CSV, JSON, ORC and Parquet.",
"requirePartitionFilter": false,
"sourceUriPrefix": "[Optional] When hive partition detection is requested, a common prefix for all source uris should be supplied. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-01-01/country=BR/id=7/file.avro gs://bucket/path_to_table/dt=2018-12-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/ (trailing slash does not matter)."
},
"ignoreUnknownValues": false,
"jsonExtension": "[Optional] If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON.",
"maxBadRecords": 0,
"nullMarker": "[Optional] Specifies a string that represents a null value in a CSV file. For example, if you specify \"\\N\", BigQuery interprets \"\\N\" as a null value when loading a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value.",
"parquetOptions": {
"enableListInference": false,
"enumAsString": false
},
"projectionFields": [
"string"
],
"quote": "\"",
"rangePartitioning": {
"field": "[TrustedTester] [Required] The table is partitioned by this field. The field must be a top-level NULLABLE/REQUIRED field. The only supported type is INTEGER/INT64.",
"range": {
"end": "[TrustedTester] [Required] The end of range partitioning, exclusive.",
"interval": "[TrustedTester] [Required] The width of each interval.",
"start": "[TrustedTester] [Required] The start of range partitioning, inclusive."
}
},
"schema": {
"fields": [
{
"categories": {
"names": [
"string"
]
},
"collationSpec": "Optional. Collation specification of the field. It only can be set on string type field.",
"description": "[Optional] The field description. The maximum length is 1,024 characters.",
"fields": [
null
],
"maxLength": "[Optional] Maximum length of values of this field for STRINGS or BYTES. If max_length is not specified, no maximum length constraint is imposed on this field. If type = \"STRING\", then max_length represents the maximum UTF-8 length of strings in this field. If type = \"BYTES\", then max_length represents the maximum number of bytes in this field. It is invalid to set this field if type ≠ \"STRING\" and ≠ \"BYTES\".",
"mode": "[Optional] The field mode. Possible values include NULLABLE, REQUIRED and REPEATED. The default value is NULLABLE.",
"name": "[Required] The field name. The name must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_), and must start with a letter or underscore. The maximum length is 300 characters.",
"policyTags": {
"names": [
"string"
]
},
"precision": "[Optional] Precision (maximum number of total digits in base 10) and scale (maximum number of digits in the fractional part in base 10) constraints for values of this field for NUMERIC or BIGNUMERIC. It is invalid to set precision or scale if type ≠ \"NUMERIC\" and ≠ \"BIGNUMERIC\". If precision and scale are not specified, no value range constraint is imposed on this field insofar as values are permitted by the type. Values of this NUMERIC or BIGNUMERIC field must be in this range when: - Precision (P) and scale (S) are specified: [-10P-S + 10-S, 10P-S - 10-S] - Precision (P) is specified but not scale (and thus scale is interpreted to be equal to zero): [-10P + 1, 10P - 1]. Acceptable values for precision and scale if both are specified: - If type = \"NUMERIC\": 1 ≤ precision - scale ≤ 29 and 0 ≤ scale ≤ 9. - If type = \"BIGNUMERIC\": 1 ≤ precision - scale ≤ 38 and 0 ≤ scale ≤ 38. Acceptable values for precision if only precision is specified but not scale (and thus scale is interpreted to be equal to zero): - If type = \"NUMERIC\": 1 ≤ precision ≤ 29. - If type = \"BIGNUMERIC\": 1 ≤ precision ≤ 38. If scale is specified but not precision, then it is invalid.",
"scale": "[Optional] See documentation for precision.",
"type": "[Required] The field data type. Possible values include STRING, BYTES, INTEGER, INT64 (same as INTEGER), FLOAT, FLOAT64 (same as FLOAT), NUMERIC, BIGNUMERIC, BOOLEAN, BOOL (same as BOOLEAN), TIMESTAMP, DATE, TIME, DATETIME, INTERVAL, RECORD (where RECORD indicates that the field contains a nested schema) or STRUCT (same as RECORD)."
}
]
},
"schemaInline": "[Deprecated] The inline schema. For CSV schemas, specify as \"Field1:Type1[,Field2:Type2]*\". For example, \"foo:STRING, bar:INTEGER, baz:FLOAT\".",
"schemaInlineFormat": "[Deprecated] The format of the schemaInline property.",
"schemaUpdateOptions": [
"string"
],
"skipLeadingRows": 0,
"sourceFormat": "[Optional] The format of the data files. For CSV files, specify \"CSV\". For datastore backups, specify \"DATASTORE_BACKUP\". For newline-delimited JSON, specify \"NEWLINE_DELIMITED_JSON\". For Avro, specify \"AVRO\". For parquet, specify \"PARQUET\". For orc, specify \"ORC\". The default value is CSV.",
"sourceUris": [
"string"
],
"timePartitioning": {
"expirationMs": "[Optional] Number of milliseconds for which to keep the storage for partitions in the table. The storage in a partition will have an expiration time of its partition time plus this value.",
"field": "[Beta] [Optional] If not set, the table is partitioned by pseudo column, referenced via either '_PARTITIONTIME' as TIMESTAMP type, or '_PARTITIONDATE' as DATE type. If field is specified, the table is instead partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED.",
"requirePartitionFilter": false,
"type": "[Required] The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively. When the type is not specified, the default behavior is DAY."
},
"useAvroLogicalTypes": false,
"writeDisposition": "[Optional] Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. The default value is WRITE_APPEND. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion."
},
"query": {
"allowLargeResults": false,
"clustering": {
"fields": [
"string"
]
},
"connectionProperties": [
{
"key": "[Required] Name of the connection property to set.",
"value": "[Required] Value of the connection property."
}
],
"createDisposition": "[Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion.",
"createSession": false,
"defaultDataset": {
"datasetId": "[Required] A unique ID for this dataset, without the project name. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.",
"projectId": "[Optional] The ID of the project containing this dataset."
},
"destinationEncryptionConfiguration": {
"kmsKeyName": "[Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key."
},
"destinationTable": {
"datasetId": "[Required] The ID of the dataset containing this table.",
"projectId": "[Required] The ID of the project containing this table.",
"tableId": "[Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters."
},
"flattenResults": true,
"maximumBillingTier": 1,
"maximumBytesBilled": "[Optional] Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.",
"parameterMode": "Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.",
"preserveNulls": false,
"priority": "[Optional] Specifies a priority for the query. Possible values include INTERACTIVE and BATCH. The default value is INTERACTIVE.",
"query": "[Required] SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL.",
"queryParameters": [
{
"name": "[Optional] If unset, this is a positional parameter. Otherwise, should be unique within a query.",
"parameterType": {
"structTypes": [
{
"description": "[Optional] Human-oriented description of the field.",
"name": "[Optional] The name of this field."
}
],
"type": "[Required] The top level type of this field."
},
"parameterValue": {
"arrayValues": [
null
],
"structValues": {},
"value": "[Optional] The value of this value, if a simple scalar type."
}
}
],
"rangePartitioning": {
"field": "[TrustedTester] [Required] The table is partitioned by this field. The field must be a top-level NULLABLE/REQUIRED field. The only supported type is INTEGER/INT64.",
"range": {
"end": "[TrustedTester] [Required] The end of range partitioning, exclusive.",
"interval": "[TrustedTester] [Required] The width of each interval.",
"start": "[TrustedTester] [Required] The start of range partitioning, inclusive."
}
},
"schemaUpdateOptions": [
"string"
],
"tableDefinitions": {},
"timePartitioning": {
"expirationMs": "[Optional] Number of milliseconds for which to keep the storage for partitions in the table. The storage in a partition will have an expiration time of its partition time plus this value.",
"field": "[Beta] [Optional] If not set, the table is partitioned by pseudo column, referenced via either '_PARTITIONTIME' as TIMESTAMP type, or '_PARTITIONDATE' as DATE type. If field is specified, the table is instead partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED.",
"requirePartitionFilter": false,
"type": "[Required] The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively. When the type is not specified, the default behavior is DAY."
},
"useLegacySql": true,
"useQueryCache": true,
"userDefinedFunctionResources": [
{
"inlineCode": "[Pick one] An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code.",
"resourceUri": "[Pick one] A code resource to load from a Google Cloud Storage URI (gs://bucket/path)."
}
],
"writeDisposition": "[Optional] Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. The default value is WRITE_EMPTY. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion."
}
},
"errorResult": {
"debugInfo": "Debugging information. This property is internal to Google and should not be used.",
"location": "Specifies where the error occurred, if present.",
"message": "A human-readable description of the error.",
"reason": "A short error code that summarizes the error."
},
"id": "Unique opaque ID of the job.",
"jobReference": {
"jobId": "[Required] The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.",
"location": "The geographic location of the job. See details at https://cloud.google.com/bigquery/docs/locations#specifying_your_location.",
"projectId": "[Required] The ID of the project containing this job."
},
"kind": "bigquery#job",
"state": "Running state of the job. When the state is DONE, errorResult can be checked to determine whether the job succeeded or failed.",
"statistics": {
"completionRatio": 0,
"creationTime": "[Output-only] Creation time of this job, in milliseconds since the epoch. This field will be present on all jobs.",
"endTime": "[Output-only] End time of this job, in milliseconds since the epoch. This field will be present whenever a job is in the DONE state.",
"extract": {
"destinationUriFileCounts": [
"int64"
],
"inputBytes": "[Output-only] Number of user bytes extracted into the result. This is the byte count as computed by BigQuery for billing purposes."
},
"load": {
"badRecords": "[Output-only] The number of bad records encountered. Note that if the job has failed because of more bad records encountered than the maximum allowed in the load job configuration, then this number can be less than the total number of bad records present in the input data.",
"inputFileBytes": "[Output-only] Number of bytes of source data in a load job.",
"inputFiles": "[Output-only] Number of source files in a load job.",
"outputBytes": "[Output-only] Size of the loaded data in bytes. Note that while a load job is in the running state, this value may change.",
"outputRows": "[Output-only] Number of rows imported in a load job. Note that while an import job is in the running state, this value may change."
},
"numChildJobs": "[Output-only] Number of child jobs executed.",
"parentJobId": "[Output-only] If this is a child job, the id of the parent.",
"query": {
"biEngineStatistics": {
"biEngineMode": "$(stats.bi_engine_mode)",
"biEngineReasons": [
{
"code": "$(reason.code)",
"message": "$(reason.message)"
}
]
},
"billingTier": 0,
"cacheHit": false,
"ddlAffectedRowAccessPolicyCount": "[Output-only] [Preview] The number of row access policies affected by a DDL statement. Present only for DROP ALL ROW ACCESS POLICIES queries.",
"ddlDestinationTable": {
"datasetId": "[Required] The ID of the dataset containing this table.",
"projectId": "[Required] The ID of the project containing this table.",
"tableId": "[Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters."
},
"ddlOperationPerformed": "The DDL operation performed, possibly dependent on the pre-existence of the DDL target. Possible values (new values might be added in the future): \"CREATE\": The query created the DDL target. \"SKIP\": No-op. Example cases: the query is CREATE TABLE IF NOT EXISTS while the table already exists, or the query is DROP TABLE IF EXISTS while the table does not exist. \"REPLACE\": The query replaced the DDL target. Example case: the query is CREATE OR REPLACE TABLE, and the table already exists. \"DROP\": The query deleted the DDL target.",
"ddlTargetDataset": {
"datasetId": "[Required] A unique ID for this dataset, without the project name. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.",
"projectId": "[Optional] The ID of the project containing this dataset."
},
"ddlTargetRoutine": {
"datasetId": "[Required] The ID of the dataset containing this routine.",
"projectId": "[Required] The ID of the project containing this routine.",
"routineId": "[Required] The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters."
},
"ddlTargetRowAccessPolicy": {
"datasetId": "[Required] The ID of the dataset containing this row access policy.",
"policyId": "[Required] The ID of the row access policy. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters.",
"projectId": "[Required] The ID of the project containing this row access policy.",
"tableId": "[Required] The ID of the table containing this row access policy."
},
"ddlTargetTable": {
"datasetId": "[Required] The ID of the dataset containing this table.",
"projectId": "[Required] The ID of the project containing this table.",
"tableId": "[Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters."
},
"dmlStats": {
"deletedRowCount": "Number of deleted Rows. populated by DML DELETE, MERGE and TRUNCATE statements.",
"insertedRowCount": "Number of inserted Rows. Populated by DML INSERT and MERGE statements.",
"updatedRowCount": "Number of updated Rows. Populated by DML UPDATE and MERGE statements."
},
"estimatedBytesProcessed": "[Output-only] The original estimate of bytes processed for the job.",
"mlStatistics": {
"iterationResults": [
{
"durationMs": "Time taken to run the iteration in milliseconds.",
"evalLoss": 0,
"index": 0,
"learnRate": 0,
"trainingLoss": 0
}
],
"maxIterations": "Maximum number of iterations specified as max_iterations in the 'CREATE MODEL' query. The actual number of iterations may be less than this number due to early stop."
},
"modelTraining": {
"currentIteration": 0,
"expectedTotalIterations": "[Output-only, Beta] Expected number of iterations for the create model query job specified as num_iterations in the input query. The actual total number of iterations may be less than this number due to early stop."
},
"modelTrainingCurrentIteration": 0,
"modelTrainingExpectedTotalIteration": "[Output-only, Beta] Deprecated; do not use.",
"numDmlAffectedRows": "[Output-only] The number of rows affected by a DML statement. Present only for DML statements INSERT, UPDATE or DELETE.",
"queryPlan": [
{
"completedParallelInputs": "Number of parallel input segments completed.",
"computeMsAvg": "Milliseconds the average shard spent on CPU-bound tasks.",
"computeMsMax": "Milliseconds the slowest shard spent on CPU-bound tasks.",
"computeRatioAvg": 0,
"computeRatioMax": 0,
"endMs": "Stage end time represented as milliseconds since epoch.",
"id": "Unique ID for stage within plan.",
"inputStages": [
"int64"
],
"name": "Human-readable name for stage.",
"parallelInputs": "Number of parallel input segments to be processed.",
"readMsAvg": "Milliseconds the average shard spent reading input.",
"readMsMax": "Milliseconds the slowest shard spent reading input.",
"readRatioAvg": 0,
"readRatioMax": 0,
"recordsRead": "Number of records read into the stage.",
"recordsWritten": "Number of records written by the stage.",
"shuffleOutputBytes": "Total number of bytes written to shuffle.",
"shuffleOutputBytesSpilled": "Total number of bytes written to shuffle and spilled to disk.",
"slotMs": "Slot-milliseconds used by the stage.",
"startMs": "Stage start time represented as milliseconds since epoch.",
"status": "Current status for the stage.",
"steps": [
{
"kind": "Machine-readable operation type.",
"substeps": [
"string"
]
}
],
"waitMsAvg": "Milliseconds the average shard spent waiting to be scheduled.",
"waitMsMax": "Milliseconds the slowest shard spent waiting to be scheduled.",
"waitRatioAvg": 0,
"waitRatioMax": 0,
"writeMsAvg": "Milliseconds the average shard spent on writing output.",
"writeMsMax": "Milliseconds the slowest shard spent on writing output.",
"writeRatioAvg": 0,
"writeRatioMax": 0
}
],
"referencedRoutines": [
{
"datasetId": "[Required] The ID of the dataset containing this routine.",
"projectId": "[Required] The ID of the project containing this routine.",
"routineId": "[Required] The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters."
}
],
"referencedTables": [
{
"datasetId": "[Required] The ID of the dataset containing this table.",
"projectId": "[Required] The ID of the project containing this table.",
"tableId": "[Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters."
}
],
"reservationUsage": [
{
"name": "[Output-only] Reservation name or \"unreserved\" for on-demand resources usage.",
"slotMs": "[Output-only] Slot-milliseconds the job spent in the given reservation."
}
],
"schema": {
"fields": [
{
"categories": {
"names": [
"string"
]
},
"collationSpec": "Optional. Collation specification of the field. It only can be set on string type field.",
"description": "[Optional] The field description. The maximum length is 1,024 characters.",
"fields": [
null
],
"maxLength": "[Optional] Maximum length of values of this field for STRINGS or BYTES. If max_length is not specified, no maximum length constraint is imposed on this field. If type = \"STRING\", then max_length represents the maximum UTF-8 length of strings in this field. If type = \"BYTES\", then max_length represents the maximum number of bytes in this field. It is invalid to set this field if type ≠ \"STRING\" and ≠ \"BYTES\".",
"mode": "[Optional] The field mode. Possible values include NULLABLE, REQUIRED and REPEATED. The default value is NULLABLE.",
"name": "[Required] The field name. The name must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_), and must start with a letter or underscore. The maximum length is 300 characters.",
"policyTags": {
"names": [
"string"
]
},
"precision": "[Optional] Precision (maximum number of total digits in base 10) and scale (maximum number of digits in the fractional part in base 10) constraints for values of this field for NUMERIC or BIGNUMERIC. It is invalid to set precision or scale if type ≠ \"NUMERIC\" and ≠ \"BIGNUMERIC\". If precision and scale are not specified, no value range constraint is imposed on this field insofar as values are permitted by the type. Values of this NUMERIC or BIGNUMERIC field must be in this range when: - Precision (P) and scale (S) are specified: [-10P-S + 10-S, 10P-S - 10-S] - Precision (P) is specified but not scale (and thus scale is interpreted to be equal to zero): [-10P + 1, 10P - 1]. Acceptable values for precision and scale if both are specified: - If type = \"NUMERIC\": 1 ≤ precision - scale ≤ 29 and 0 ≤ scale ≤ 9. - If type = \"BIGNUMERIC\": 1 ≤ precision - scale ≤ 38 and 0 ≤ scale ≤ 38. Acceptable values for precision if only precision is specified but not scale (and thus scale is interpreted to be equal to zero): - If type = \"NUMERIC\": 1 ≤ precision ≤ 29. - If type = \"BIGNUMERIC\": 1 ≤ precision ≤ 38. If scale is specified but not precision, then it is invalid.",
"scale": "[Optional] See documentation for precision.",
"type": "[Required] The field data type. Possible values include STRING, BYTES, INTEGER, INT64 (same as INTEGER), FLOAT, FLOAT64 (same as FLOAT), NUMERIC, BIGNUMERIC, BOOLEAN, BOOL (same as BOOLEAN), TIMESTAMP, DATE, TIME, DATETIME, INTERVAL, RECORD (where RECORD indicates that the field contains a nested schema) or STRUCT (same as RECORD)."
}
]
},
"statementType": "The type of query statement, if valid. Possible values (new values might be added in the future): \"SELECT\": SELECT query. \"INSERT\": INSERT query; see https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language. \"UPDATE\": UPDATE query; see https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language. \"DELETE\": DELETE query; see https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language. \"MERGE\": MERGE query; see https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language. \"ALTER_TABLE\": ALTER TABLE query. \"ALTER_VIEW\": ALTER VIEW query. \"ASSERT\": ASSERT condition AS 'description'. \"CREATE_FUNCTION\": CREATE FUNCTION query. \"CREATE_MODEL\": CREATE [OR REPLACE] MODEL ... AS SELECT ... . \"CREATE_PROCEDURE\": CREATE PROCEDURE query. \"CREATE_TABLE\": CREATE [OR REPLACE] TABLE without AS SELECT. \"CREATE_TABLE_AS_SELECT\": CREATE [OR REPLACE] TABLE ... AS SELECT ... . \"CREATE_VIEW\": CREATE [OR REPLACE] VIEW ... AS SELECT ... . \"DROP_FUNCTION\" : DROP FUNCTION query. \"DROP_PROCEDURE\": DROP PROCEDURE query. \"DROP_TABLE\": DROP TABLE query. \"DROP_VIEW\": DROP VIEW query.",
"timeline": [
{
"activeUnits": "Total number of units currently being processed by workers. This does not correspond directly to slot usage. This is the largest value observed since the last sample.",
"completedUnits": "Total parallel units of work completed by this query.",
"elapsedMs": "Milliseconds elapsed since the start of query execution.",
"pendingUnits": "Total parallel units of work remaining for the active stages.",
"totalSlotMs": "Cumulative slot-ms consumed by the query."
}
],
"totalBytesBilled": "[Output-only] Total bytes billed for the job.",
"totalBytesProcessed": "[Output-only] Total bytes processed for the job.",
"totalBytesProcessedAccuracy": "[Output-only] For dry-run jobs, totalBytesProcessed is an estimate and this field specifies the accuracy of the estimate. Possible values can be: UNKNOWN: accuracy of the estimate is unknown. PRECISE: estimate is precise. LOWER_BOUND: estimate is lower bound of what the query would cost. UPPER_BOUND: estimate is upper bound of what the query would cost.",
"totalPartitionsProcessed": "[Output-only] Total number of partitions processed from all partitioned tables referenced in the job.",
"totalSlotMs": "[Output-only] Slot-milliseconds for the job.",
"undeclaredQueryParameters": [
{
"name": "[Optional] If unset, this is a positional parameter. Otherwise, should be unique within a query.",
"parameterType": {
"structTypes": [
{
"description": "[Optional] Human-oriented description of the field.",
"name": "[Optional] The name of this field."
}
],
"type": "[Required] The top level type of this field."
},
"parameterValue": {
"arrayValues": [
null
],
"structValues": {},
"value": "[Optional] The value of this value, if a simple scalar type."
}
}
]
},
"quotaDeferments": [
"string"
],
"reservationUsage": [
{
"name": "[Output-only] Reservation name or \"unreserved\" for on-demand resources usage.",
"slotMs": "[Output-only] Slot-milliseconds the job spent in the given reservation."
}
],
"reservation_id": "[Output-only] Name of the primary reservation assigned to this job. Note that this could be different than reservations reported in the reservation usage field if parent reservations were used to execute this job.",
"rowLevelSecurityStatistics": {
"rowLevelSecurityApplied": false
},
"scriptStatistics": {
"evaluationKind": "[Output-only] Whether this child job was a statement or expression.",
"stackFrames": [
{
"endColumn": 0,
"endLine": 0,
"procedureId": "[Output-only] Name of the active procedure, empty if in a top-level script.",
"startColumn": 0,
"startLine": 0,
"text": "[Output-only] Text of the current statement/expression."
}
]
},
"sessionInfo": {
"sessionId": "[Output-only] // [Preview] Id of the session."
},
"startTime": "[Output-only] Start time of this job, in milliseconds since the epoch. This field will be present when the job transitions from the PENDING state to either RUNNING or DONE.",
"totalBytesProcessed": "[Output-only] [Deprecated] Use the bytes processed in the query statistics instead.",
"totalSlotMs": "[Output-only] Slot-milliseconds for the job.",
"transactionInfo": {
"transactionId": "[Output-only] // [Alpha] Id of the transaction."
}
},
"status": {
"errorResult": {
"debugInfo": "Debugging information. This property is internal to Google and should not be used.",
"location": "Specifies where the error occurred, if present.",
"message": "A human-readable description of the error.",
"reason": "A short error code that summarizes the error."
},
"errors": [
{
"debugInfo": "Debugging information. This property is internal to Google and should not be used.",
"location": "Specifies where the error occurred, if present.",
"message": "A human-readable description of the error.",
"reason": "A short error code that summarizes the error."
}
],
"state": "[Output-only] Running state of the job."
},
"user_email": "[Full-projection-only] Email address of the user who ran the job."
}
],
"kind": "bigquery#jobList",
"nextPageToken": "A token to request the next page of results."
}
Workflow Library Example
List Bigquery Jobs with Gcp and Send Results Via Email
Preview this Workflow on desktop