gcp.bigquery.Job
Explore with Pulumi AI
Jobs are actions that BigQuery runs on your behalf to load data, export data, query data, or copy data. Once a BigQuery job is created, it cannot be changed or deleted.
To get more information about Job, see:
- API documentation
- How-to Guides
Example Usage
Bigquery Job Query
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const bar = new gcp.bigquery.Dataset("bar", {
    datasetId: "job_query_dataset",
    friendlyName: "test",
    description: "This is a test description",
    location: "US",
});
const foo = new gcp.bigquery.Table("foo", {
    deletionProtection: false,
    datasetId: bar.datasetId,
    tableId: "job_query_table",
});
const job = new gcp.bigquery.Job("job", {
    jobId: "job_query",
    labels: {
        "example-label": "example-value",
    },
    query: {
        query: "SELECT state FROM [lookerdata:cdc.project_tycho_reports]",
        destinationTable: {
            projectId: foo.project,
            datasetId: foo.datasetId,
            tableId: foo.tableId,
        },
        allowLargeResults: true,
        flattenResults: true,
        scriptOptions: {
            keyResultStatement: "LAST",
        },
    },
});
import pulumi
import pulumi_gcp as gcp
bar = gcp.bigquery.Dataset("bar",
    dataset_id="job_query_dataset",
    friendly_name="test",
    description="This is a test description",
    location="US")
foo = gcp.bigquery.Table("foo",
    deletion_protection=False,
    dataset_id=bar.dataset_id,
    table_id="job_query_table")
job = gcp.bigquery.Job("job",
    job_id="job_query",
    labels={
        "example-label": "example-value",
    },
    query={
        "query": "SELECT state FROM [lookerdata:cdc.project_tycho_reports]",
        "destination_table": {
            "project_id": foo.project,
            "dataset_id": foo.dataset_id,
            "table_id": foo.table_id,
        },
        "allow_large_results": True,
        "flatten_results": True,
        "script_options": {
            "key_result_statement": "LAST",
        },
    })
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		bar, err := bigquery.NewDataset(ctx, "bar", &bigquery.DatasetArgs{
			DatasetId:    pulumi.String("job_query_dataset"),
			FriendlyName: pulumi.String("test"),
			Description:  pulumi.String("This is a test description"),
			Location:     pulumi.String("US"),
		})
		if err != nil {
			return err
		}
		foo, err := bigquery.NewTable(ctx, "foo", &bigquery.TableArgs{
			DeletionProtection: pulumi.Bool(false),
			DatasetId:          bar.DatasetId,
			TableId:            pulumi.String("job_query_table"),
		})
		if err != nil {
			return err
		}
		_, err = bigquery.NewJob(ctx, "job", &bigquery.JobArgs{
			JobId: pulumi.String("job_query"),
			Labels: pulumi.StringMap{
				"example-label": pulumi.String("example-value"),
			},
			Query: &bigquery.JobQueryArgs{
				Query: pulumi.String("SELECT state FROM [lookerdata:cdc.project_tycho_reports]"),
				DestinationTable: &bigquery.JobQueryDestinationTableArgs{
					ProjectId: foo.Project,
					DatasetId: foo.DatasetId,
					TableId:   foo.TableId,
				},
				AllowLargeResults: pulumi.Bool(true),
				FlattenResults:    pulumi.Bool(true),
				ScriptOptions: &bigquery.JobQueryScriptOptionsArgs{
					KeyResultStatement: pulumi.String("LAST"),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var bar = new Gcp.BigQuery.Dataset("bar", new()
    {
        DatasetId = "job_query_dataset",
        FriendlyName = "test",
        Description = "This is a test description",
        Location = "US",
    });
    var foo = new Gcp.BigQuery.Table("foo", new()
    {
        DeletionProtection = false,
        DatasetId = bar.DatasetId,
        TableId = "job_query_table",
    });
    var job = new Gcp.BigQuery.Job("job", new()
    {
        JobId = "job_query",
        Labels = 
        {
            { "example-label", "example-value" },
        },
        Query = new Gcp.BigQuery.Inputs.JobQueryArgs
        {
            Query = "SELECT state FROM [lookerdata:cdc.project_tycho_reports]",
            DestinationTable = new Gcp.BigQuery.Inputs.JobQueryDestinationTableArgs
            {
                ProjectId = foo.Project,
                DatasetId = foo.DatasetId,
                TableId = foo.TableId,
            },
            AllowLargeResults = true,
            FlattenResults = true,
            ScriptOptions = new Gcp.BigQuery.Inputs.JobQueryScriptOptionsArgs
            {
                KeyResultStatement = "LAST",
            },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Table;
import com.pulumi.gcp.bigquery.TableArgs;
import com.pulumi.gcp.bigquery.Job;
import com.pulumi.gcp.bigquery.JobArgs;
import com.pulumi.gcp.bigquery.inputs.JobQueryArgs;
import com.pulumi.gcp.bigquery.inputs.JobQueryDestinationTableArgs;
import com.pulumi.gcp.bigquery.inputs.JobQueryScriptOptionsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var bar = new Dataset("bar", DatasetArgs.builder()
            .datasetId("job_query_dataset")
            .friendlyName("test")
            .description("This is a test description")
            .location("US")
            .build());
        var foo = new Table("foo", TableArgs.builder()
            .deletionProtection(false)
            .datasetId(bar.datasetId())
            .tableId("job_query_table")
            .build());
        var job = new Job("job", JobArgs.builder()
            .jobId("job_query")
            .labels(Map.of("example-label", "example-value"))
            .query(JobQueryArgs.builder()
                .query("SELECT state FROM [lookerdata:cdc.project_tycho_reports]")
                .destinationTable(JobQueryDestinationTableArgs.builder()
                    .projectId(foo.project())
                    .datasetId(foo.datasetId())
                    .tableId(foo.tableId())
                    .build())
                .allowLargeResults(true)
                .flattenResults(true)
                .scriptOptions(JobQueryScriptOptionsArgs.builder()
                    .keyResultStatement("LAST")
                    .build())
                .build())
            .build());
    }
}
resources:
  foo:
    type: gcp:bigquery:Table
    properties:
      deletionProtection: false
      datasetId: ${bar.datasetId}
      tableId: job_query_table
  bar:
    type: gcp:bigquery:Dataset
    properties:
      datasetId: job_query_dataset
      friendlyName: test
      description: This is a test description
      location: US
  job:
    type: gcp:bigquery:Job
    properties:
      jobId: job_query
      labels:
        example-label: example-value
      query:
        query: SELECT state FROM [lookerdata:cdc.project_tycho_reports]
        destinationTable:
          projectId: ${foo.project}
          datasetId: ${foo.datasetId}
          tableId: ${foo.tableId}
        allowLargeResults: true
        flattenResults: true
        scriptOptions:
          keyResultStatement: LAST
Bigquery Job Query Table Reference
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const bar = new gcp.bigquery.Dataset("bar", {
    datasetId: "job_query_dataset",
    friendlyName: "test",
    description: "This is a test description",
    location: "US",
});
const foo = new gcp.bigquery.Table("foo", {
    deletionProtection: false,
    datasetId: bar.datasetId,
    tableId: "job_query_table",
});
const job = new gcp.bigquery.Job("job", {
    jobId: "job_query",
    labels: {
        "example-label": "example-value",
    },
    query: {
        query: "SELECT state FROM [lookerdata:cdc.project_tycho_reports]",
        destinationTable: {
            tableId: foo.id,
        },
        defaultDataset: {
            datasetId: bar.id,
        },
        allowLargeResults: true,
        flattenResults: true,
        scriptOptions: {
            keyResultStatement: "LAST",
        },
    },
});
import pulumi
import pulumi_gcp as gcp
bar = gcp.bigquery.Dataset("bar",
    dataset_id="job_query_dataset",
    friendly_name="test",
    description="This is a test description",
    location="US")
foo = gcp.bigquery.Table("foo",
    deletion_protection=False,
    dataset_id=bar.dataset_id,
    table_id="job_query_table")
job = gcp.bigquery.Job("job",
    job_id="job_query",
    labels={
        "example-label": "example-value",
    },
    query={
        "query": "SELECT state FROM [lookerdata:cdc.project_tycho_reports]",
        "destination_table": {
            "table_id": foo.id,
        },
        "default_dataset": {
            "dataset_id": bar.id,
        },
        "allow_large_results": True,
        "flatten_results": True,
        "script_options": {
            "key_result_statement": "LAST",
        },
    })
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		bar, err := bigquery.NewDataset(ctx, "bar", &bigquery.DatasetArgs{
			DatasetId:    pulumi.String("job_query_dataset"),
			FriendlyName: pulumi.String("test"),
			Description:  pulumi.String("This is a test description"),
			Location:     pulumi.String("US"),
		})
		if err != nil {
			return err
		}
		foo, err := bigquery.NewTable(ctx, "foo", &bigquery.TableArgs{
			DeletionProtection: pulumi.Bool(false),
			DatasetId:          bar.DatasetId,
			TableId:            pulumi.String("job_query_table"),
		})
		if err != nil {
			return err
		}
		_, err = bigquery.NewJob(ctx, "job", &bigquery.JobArgs{
			JobId: pulumi.String("job_query"),
			Labels: pulumi.StringMap{
				"example-label": pulumi.String("example-value"),
			},
			Query: &bigquery.JobQueryArgs{
				Query: pulumi.String("SELECT state FROM [lookerdata:cdc.project_tycho_reports]"),
				DestinationTable: &bigquery.JobQueryDestinationTableArgs{
					TableId: foo.ID(),
				},
				DefaultDataset: &bigquery.JobQueryDefaultDatasetArgs{
					DatasetId: bar.ID(),
				},
				AllowLargeResults: pulumi.Bool(true),
				FlattenResults:    pulumi.Bool(true),
				ScriptOptions: &bigquery.JobQueryScriptOptionsArgs{
					KeyResultStatement: pulumi.String("LAST"),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var bar = new Gcp.BigQuery.Dataset("bar", new()
    {
        DatasetId = "job_query_dataset",
        FriendlyName = "test",
        Description = "This is a test description",
        Location = "US",
    });
    var foo = new Gcp.BigQuery.Table("foo", new()
    {
        DeletionProtection = false,
        DatasetId = bar.DatasetId,
        TableId = "job_query_table",
    });
    var job = new Gcp.BigQuery.Job("job", new()
    {
        JobId = "job_query",
        Labels = 
        {
            { "example-label", "example-value" },
        },
        Query = new Gcp.BigQuery.Inputs.JobQueryArgs
        {
            Query = "SELECT state FROM [lookerdata:cdc.project_tycho_reports]",
            DestinationTable = new Gcp.BigQuery.Inputs.JobQueryDestinationTableArgs
            {
                TableId = foo.Id,
            },
            DefaultDataset = new Gcp.BigQuery.Inputs.JobQueryDefaultDatasetArgs
            {
                DatasetId = bar.Id,
            },
            AllowLargeResults = true,
            FlattenResults = true,
            ScriptOptions = new Gcp.BigQuery.Inputs.JobQueryScriptOptionsArgs
            {
                KeyResultStatement = "LAST",
            },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Table;
import com.pulumi.gcp.bigquery.TableArgs;
import com.pulumi.gcp.bigquery.Job;
import com.pulumi.gcp.bigquery.JobArgs;
import com.pulumi.gcp.bigquery.inputs.JobQueryArgs;
import com.pulumi.gcp.bigquery.inputs.JobQueryDestinationTableArgs;
import com.pulumi.gcp.bigquery.inputs.JobQueryDefaultDatasetArgs;
import com.pulumi.gcp.bigquery.inputs.JobQueryScriptOptionsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var bar = new Dataset("bar", DatasetArgs.builder()
            .datasetId("job_query_dataset")
            .friendlyName("test")
            .description("This is a test description")
            .location("US")
            .build());
        var foo = new Table("foo", TableArgs.builder()
            .deletionProtection(false)
            .datasetId(bar.datasetId())
            .tableId("job_query_table")
            .build());
        var job = new Job("job", JobArgs.builder()
            .jobId("job_query")
            .labels(Map.of("example-label", "example-value"))
            .query(JobQueryArgs.builder()
                .query("SELECT state FROM [lookerdata:cdc.project_tycho_reports]")
                .destinationTable(JobQueryDestinationTableArgs.builder()
                    .tableId(foo.id())
                    .build())
                .defaultDataset(JobQueryDefaultDatasetArgs.builder()
                    .datasetId(bar.id())
                    .build())
                .allowLargeResults(true)
                .flattenResults(true)
                .scriptOptions(JobQueryScriptOptionsArgs.builder()
                    .keyResultStatement("LAST")
                    .build())
                .build())
            .build());
    }
}
resources:
  foo:
    type: gcp:bigquery:Table
    properties:
      deletionProtection: false
      datasetId: ${bar.datasetId}
      tableId: job_query_table
  bar:
    type: gcp:bigquery:Dataset
    properties:
      datasetId: job_query_dataset
      friendlyName: test
      description: This is a test description
      location: US
  job:
    type: gcp:bigquery:Job
    properties:
      jobId: job_query
      labels:
        example-label: example-value
      query:
        query: SELECT state FROM [lookerdata:cdc.project_tycho_reports]
        destinationTable:
          tableId: ${foo.id}
        defaultDataset:
          datasetId: ${bar.id}
        allowLargeResults: true
        flattenResults: true
        scriptOptions:
          keyResultStatement: LAST
Bigquery Job Load
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const bar = new gcp.bigquery.Dataset("bar", {
    datasetId: "job_load_dataset",
    friendlyName: "test",
    description: "This is a test description",
    location: "US",
});
const foo = new gcp.bigquery.Table("foo", {
    deletionProtection: false,
    datasetId: bar.datasetId,
    tableId: "job_load_table",
});
const job = new gcp.bigquery.Job("job", {
    jobId: "job_load",
    labels: {
        my_job: "load",
    },
    load: {
        sourceUris: ["gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv"],
        destinationTable: {
            projectId: foo.project,
            datasetId: foo.datasetId,
            tableId: foo.tableId,
        },
        skipLeadingRows: 1,
        schemaUpdateOptions: [
            "ALLOW_FIELD_RELAXATION",
            "ALLOW_FIELD_ADDITION",
        ],
        writeDisposition: "WRITE_APPEND",
        autodetect: true,
    },
});
import pulumi
import pulumi_gcp as gcp
bar = gcp.bigquery.Dataset("bar",
    dataset_id="job_load_dataset",
    friendly_name="test",
    description="This is a test description",
    location="US")
foo = gcp.bigquery.Table("foo",
    deletion_protection=False,
    dataset_id=bar.dataset_id,
    table_id="job_load_table")
job = gcp.bigquery.Job("job",
    job_id="job_load",
    labels={
        "my_job": "load",
    },
    load={
        "source_uris": ["gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv"],
        "destination_table": {
            "project_id": foo.project,
            "dataset_id": foo.dataset_id,
            "table_id": foo.table_id,
        },
        "skip_leading_rows": 1,
        "schema_update_options": [
            "ALLOW_FIELD_RELAXATION",
            "ALLOW_FIELD_ADDITION",
        ],
        "write_disposition": "WRITE_APPEND",
        "autodetect": True,
    })
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		bar, err := bigquery.NewDataset(ctx, "bar", &bigquery.DatasetArgs{
			DatasetId:    pulumi.String("job_load_dataset"),
			FriendlyName: pulumi.String("test"),
			Description:  pulumi.String("This is a test description"),
			Location:     pulumi.String("US"),
		})
		if err != nil {
			return err
		}
		foo, err := bigquery.NewTable(ctx, "foo", &bigquery.TableArgs{
			DeletionProtection: pulumi.Bool(false),
			DatasetId:          bar.DatasetId,
			TableId:            pulumi.String("job_load_table"),
		})
		if err != nil {
			return err
		}
		_, err = bigquery.NewJob(ctx, "job", &bigquery.JobArgs{
			JobId: pulumi.String("job_load"),
			Labels: pulumi.StringMap{
				"my_job": pulumi.String("load"),
			},
			Load: &bigquery.JobLoadArgs{
				SourceUris: pulumi.StringArray{
					pulumi.String("gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv"),
				},
				DestinationTable: &bigquery.JobLoadDestinationTableArgs{
					ProjectId: foo.Project,
					DatasetId: foo.DatasetId,
					TableId:   foo.TableId,
				},
				SkipLeadingRows: pulumi.Int(1),
				SchemaUpdateOptions: pulumi.StringArray{
					pulumi.String("ALLOW_FIELD_RELAXATION"),
					pulumi.String("ALLOW_FIELD_ADDITION"),
				},
				WriteDisposition: pulumi.String("WRITE_APPEND"),
				Autodetect:       pulumi.Bool(true),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var bar = new Gcp.BigQuery.Dataset("bar", new()
    {
        DatasetId = "job_load_dataset",
        FriendlyName = "test",
        Description = "This is a test description",
        Location = "US",
    });
    var foo = new Gcp.BigQuery.Table("foo", new()
    {
        DeletionProtection = false,
        DatasetId = bar.DatasetId,
        TableId = "job_load_table",
    });
    var job = new Gcp.BigQuery.Job("job", new()
    {
        JobId = "job_load",
        Labels = 
        {
            { "my_job", "load" },
        },
        Load = new Gcp.BigQuery.Inputs.JobLoadArgs
        {
            SourceUris = new[]
            {
                "gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv",
            },
            DestinationTable = new Gcp.BigQuery.Inputs.JobLoadDestinationTableArgs
            {
                ProjectId = foo.Project,
                DatasetId = foo.DatasetId,
                TableId = foo.TableId,
            },
            SkipLeadingRows = 1,
            SchemaUpdateOptions = new[]
            {
                "ALLOW_FIELD_RELAXATION",
                "ALLOW_FIELD_ADDITION",
            },
            WriteDisposition = "WRITE_APPEND",
            Autodetect = true,
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Table;
import com.pulumi.gcp.bigquery.TableArgs;
import com.pulumi.gcp.bigquery.Job;
import com.pulumi.gcp.bigquery.JobArgs;
import com.pulumi.gcp.bigquery.inputs.JobLoadArgs;
import com.pulumi.gcp.bigquery.inputs.JobLoadDestinationTableArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var bar = new Dataset("bar", DatasetArgs.builder()
            .datasetId("job_load_dataset")
            .friendlyName("test")
            .description("This is a test description")
            .location("US")
            .build());
        var foo = new Table("foo", TableArgs.builder()
            .deletionProtection(false)
            .datasetId(bar.datasetId())
            .tableId("job_load_table")
            .build());
        var job = new Job("job", JobArgs.builder()
            .jobId("job_load")
            .labels(Map.of("my_job", "load"))
            .load(JobLoadArgs.builder()
                .sourceUris("gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv")
                .destinationTable(JobLoadDestinationTableArgs.builder()
                    .projectId(foo.project())
                    .datasetId(foo.datasetId())
                    .tableId(foo.tableId())
                    .build())
                .skipLeadingRows(1)
                .schemaUpdateOptions(                
                    "ALLOW_FIELD_RELAXATION",
                    "ALLOW_FIELD_ADDITION")
                .writeDisposition("WRITE_APPEND")
                .autodetect(true)
                .build())
            .build());
    }
}
resources:
  foo:
    type: gcp:bigquery:Table
    properties:
      deletionProtection: false
      datasetId: ${bar.datasetId}
      tableId: job_load_table
  bar:
    type: gcp:bigquery:Dataset
    properties:
      datasetId: job_load_dataset
      friendlyName: test
      description: This is a test description
      location: US
  job:
    type: gcp:bigquery:Job
    properties:
      jobId: job_load
      labels:
        my_job: load
      load:
        sourceUris:
          - gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv
        destinationTable:
          projectId: ${foo.project}
          datasetId: ${foo.datasetId}
          tableId: ${foo.tableId}
        skipLeadingRows: 1
        schemaUpdateOptions:
          - ALLOW_FIELD_RELAXATION
          - ALLOW_FIELD_ADDITION
        writeDisposition: WRITE_APPEND
        autodetect: true
Bigquery Job Load Geojson
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const project = "my-project-name";
const bucket = new gcp.storage.Bucket("bucket", {
    name: `${project}-bq-geojson`,
    location: "US",
    uniformBucketLevelAccess: true,
});
const object = new gcp.storage.BucketObject("object", {
    name: "geojson-data.jsonl",
    bucket: bucket.name,
    content: `{"type":"Feature","properties":{"continent":"Europe","region":"Scandinavia"},"geometry":{"type":"Polygon","coordinates":[[[-30.94,53.33],[33.05,53.33],[33.05,71.86],[-30.94,71.86],[-30.94,53.33]]]}}
{"type":"Feature","properties":{"continent":"Africa","region":"West Africa"},"geometry":{"type":"Polygon","coordinates":[[[-23.91,0],[11.95,0],[11.95,18.98],[-23.91,18.98],[-23.91,0]]]}}
`,
});
const bar = new gcp.bigquery.Dataset("bar", {
    datasetId: "job_load_dataset",
    friendlyName: "test",
    description: "This is a test description",
    location: "US",
});
const foo = new gcp.bigquery.Table("foo", {
    deletionProtection: false,
    datasetId: bar.datasetId,
    tableId: "job_load_table",
});
const job = new gcp.bigquery.Job("job", {
    jobId: "job_load",
    labels: {
        my_job: "load",
    },
    load: {
        sourceUris: [pulumi.interpolate`gs://${object.bucket}/${object.name}`],
        destinationTable: {
            projectId: foo.project,
            datasetId: foo.datasetId,
            tableId: foo.tableId,
        },
        writeDisposition: "WRITE_TRUNCATE",
        autodetect: true,
        sourceFormat: "NEWLINE_DELIMITED_JSON",
        jsonExtension: "GEOJSON",
    },
}, {
    dependsOn: [object],
});
import pulumi
import pulumi_gcp as gcp
project = "my-project-name"
bucket = gcp.storage.Bucket("bucket",
    name=f"{project}-bq-geojson",
    location="US",
    uniform_bucket_level_access=True)
object = gcp.storage.BucketObject("object",
    name="geojson-data.jsonl",
    bucket=bucket.name,
    content="""{"type":"Feature","properties":{"continent":"Europe","region":"Scandinavia"},"geometry":{"type":"Polygon","coordinates":[[[-30.94,53.33],[33.05,53.33],[33.05,71.86],[-30.94,71.86],[-30.94,53.33]]]}}
{"type":"Feature","properties":{"continent":"Africa","region":"West Africa"},"geometry":{"type":"Polygon","coordinates":[[[-23.91,0],[11.95,0],[11.95,18.98],[-23.91,18.98],[-23.91,0]]]}}
""")
bar = gcp.bigquery.Dataset("bar",
    dataset_id="job_load_dataset",
    friendly_name="test",
    description="This is a test description",
    location="US")
foo = gcp.bigquery.Table("foo",
    deletion_protection=False,
    dataset_id=bar.dataset_id,
    table_id="job_load_table")
job = gcp.bigquery.Job("job",
    job_id="job_load",
    labels={
        "my_job": "load",
    },
    load={
        "source_uris": [pulumi.Output.all(
            bucket=object.bucket,
            name=object.name
).apply(lambda resolved_outputs: f"gs://{resolved_outputs['bucket']}/{resolved_outputs['name']}")
],
        "destination_table": {
            "project_id": foo.project,
            "dataset_id": foo.dataset_id,
            "table_id": foo.table_id,
        },
        "write_disposition": "WRITE_TRUNCATE",
        "autodetect": True,
        "source_format": "NEWLINE_DELIMITED_JSON",
        "json_extension": "GEOJSON",
    },
    opts = pulumi.ResourceOptions(depends_on=[object]))
package main
import (
	"fmt"
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery"
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/storage"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		project := "my-project-name"
		bucket, err := storage.NewBucket(ctx, "bucket", &storage.BucketArgs{
			Name:                     pulumi.Sprintf("%v-bq-geojson", project),
			Location:                 pulumi.String("US"),
			UniformBucketLevelAccess: pulumi.Bool(true),
		})
		if err != nil {
			return err
		}
		object, err := storage.NewBucketObject(ctx, "object", &storage.BucketObjectArgs{
			Name:    pulumi.String("geojson-data.jsonl"),
			Bucket:  bucket.Name,
			Content: pulumi.String("{\"type\":\"Feature\",\"properties\":{\"continent\":\"Europe\",\"region\":\"Scandinavia\"},\"geometry\":{\"type\":\"Polygon\",\"coordinates\":[[[-30.94,53.33],[33.05,53.33],[33.05,71.86],[-30.94,71.86],[-30.94,53.33]]]}}\n{\"type\":\"Feature\",\"properties\":{\"continent\":\"Africa\",\"region\":\"West Africa\"},\"geometry\":{\"type\":\"Polygon\",\"coordinates\":[[[-23.91,0],[11.95,0],[11.95,18.98],[-23.91,18.98],[-23.91,0]]]}}\n"),
		})
		if err != nil {
			return err
		}
		bar, err := bigquery.NewDataset(ctx, "bar", &bigquery.DatasetArgs{
			DatasetId:    pulumi.String("job_load_dataset"),
			FriendlyName: pulumi.String("test"),
			Description:  pulumi.String("This is a test description"),
			Location:     pulumi.String("US"),
		})
		if err != nil {
			return err
		}
		foo, err := bigquery.NewTable(ctx, "foo", &bigquery.TableArgs{
			DeletionProtection: pulumi.Bool(false),
			DatasetId:          bar.DatasetId,
			TableId:            pulumi.String("job_load_table"),
		})
		if err != nil {
			return err
		}
		_, err = bigquery.NewJob(ctx, "job", &bigquery.JobArgs{
			JobId: pulumi.String("job_load"),
			Labels: pulumi.StringMap{
				"my_job": pulumi.String("load"),
			},
			Load: &bigquery.JobLoadArgs{
				SourceUris: pulumi.StringArray{
					pulumi.All(object.Bucket, object.Name).ApplyT(func(_args []interface{}) (string, error) {
						bucket := _args[0].(string)
						name := _args[1].(string)
						return fmt.Sprintf("gs://%v/%v", bucket, name), nil
					}).(pulumi.StringOutput),
				},
				DestinationTable: &bigquery.JobLoadDestinationTableArgs{
					ProjectId: foo.Project,
					DatasetId: foo.DatasetId,
					TableId:   foo.TableId,
				},
				WriteDisposition: pulumi.String("WRITE_TRUNCATE"),
				Autodetect:       pulumi.Bool(true),
				SourceFormat:     pulumi.String("NEWLINE_DELIMITED_JSON"),
				JsonExtension:    pulumi.String("GEOJSON"),
			},
		}, pulumi.DependsOn([]pulumi.Resource{
			object,
		}))
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var project = "my-project-name";
    var bucket = new Gcp.Storage.Bucket("bucket", new()
    {
        Name = $"{project}-bq-geojson",
        Location = "US",
        UniformBucketLevelAccess = true,
    });
    var @object = new Gcp.Storage.BucketObject("object", new()
    {
        Name = "geojson-data.jsonl",
        Bucket = bucket.Name,
        Content = @"{""type"":""Feature"",""properties"":{""continent"":""Europe"",""region"":""Scandinavia""},""geometry"":{""type"":""Polygon"",""coordinates"":[[[-30.94,53.33],[33.05,53.33],[33.05,71.86],[-30.94,71.86],[-30.94,53.33]]]}}
{""type"":""Feature"",""properties"":{""continent"":""Africa"",""region"":""West Africa""},""geometry"":{""type"":""Polygon"",""coordinates"":[[[-23.91,0],[11.95,0],[11.95,18.98],[-23.91,18.98],[-23.91,0]]]}}
",
    });
    var bar = new Gcp.BigQuery.Dataset("bar", new()
    {
        DatasetId = "job_load_dataset",
        FriendlyName = "test",
        Description = "This is a test description",
        Location = "US",
    });
    var foo = new Gcp.BigQuery.Table("foo", new()
    {
        DeletionProtection = false,
        DatasetId = bar.DatasetId,
        TableId = "job_load_table",
    });
    var job = new Gcp.BigQuery.Job("job", new()
    {
        JobId = "job_load",
        Labels = 
        {
            { "my_job", "load" },
        },
        Load = new Gcp.BigQuery.Inputs.JobLoadArgs
        {
            SourceUris = new[]
            {
                Output.Tuple(@object.Bucket, @object.Name).Apply(values =>
                {
                    var bucket = values.Item1;
                    var name = values.Item2;
                    return $"gs://{bucket}/{name}";
                }),
            },
            DestinationTable = new Gcp.BigQuery.Inputs.JobLoadDestinationTableArgs
            {
                ProjectId = foo.Project,
                DatasetId = foo.DatasetId,
                TableId = foo.TableId,
            },
            WriteDisposition = "WRITE_TRUNCATE",
            Autodetect = true,
            SourceFormat = "NEWLINE_DELIMITED_JSON",
            JsonExtension = "GEOJSON",
        },
    }, new CustomResourceOptions
    {
        DependsOn =
        {
            @object,
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.storage.Bucket;
import com.pulumi.gcp.storage.BucketArgs;
import com.pulumi.gcp.storage.BucketObject;
import com.pulumi.gcp.storage.BucketObjectArgs;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Table;
import com.pulumi.gcp.bigquery.TableArgs;
import com.pulumi.gcp.bigquery.Job;
import com.pulumi.gcp.bigquery.JobArgs;
import com.pulumi.gcp.bigquery.inputs.JobLoadArgs;
import com.pulumi.gcp.bigquery.inputs.JobLoadDestinationTableArgs;
import com.pulumi.resources.CustomResourceOptions;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        final var project = "my-project-name";
        var bucket = new Bucket("bucket", BucketArgs.builder()
            .name(String.format("%s-bq-geojson", project))
            .location("US")
            .uniformBucketLevelAccess(true)
            .build());
        var object = new BucketObject("object", BucketObjectArgs.builder()
            .name("geojson-data.jsonl")
            .bucket(bucket.name())
            .content("""
{"type":"Feature","properties":{"continent":"Europe","region":"Scandinavia"},"geometry":{"type":"Polygon","coordinates":[[[-30.94,53.33],[33.05,53.33],[33.05,71.86],[-30.94,71.86],[-30.94,53.33]]]}}
{"type":"Feature","properties":{"continent":"Africa","region":"West Africa"},"geometry":{"type":"Polygon","coordinates":[[[-23.91,0],[11.95,0],[11.95,18.98],[-23.91,18.98],[-23.91,0]]]}}
            """)
            .build());
        var bar = new Dataset("bar", DatasetArgs.builder()
            .datasetId("job_load_dataset")
            .friendlyName("test")
            .description("This is a test description")
            .location("US")
            .build());
        var foo = new Table("foo", TableArgs.builder()
            .deletionProtection(false)
            .datasetId(bar.datasetId())
            .tableId("job_load_table")
            .build());
        var job = new Job("job", JobArgs.builder()
            .jobId("job_load")
            .labels(Map.of("my_job", "load"))
            .load(JobLoadArgs.builder()
                .sourceUris(Output.tuple(object.bucket(), object.name()).applyValue(values -> {
                    var bucket = values.t1;
                    var name = values.t2;
                    return String.format("gs://%s/%s", bucket,name);
                }))
                .destinationTable(JobLoadDestinationTableArgs.builder()
                    .projectId(foo.project())
                    .datasetId(foo.datasetId())
                    .tableId(foo.tableId())
                    .build())
                .writeDisposition("WRITE_TRUNCATE")
                .autodetect(true)
                .sourceFormat("NEWLINE_DELIMITED_JSON")
                .jsonExtension("GEOJSON")
                .build())
            .build(), CustomResourceOptions.builder()
                .dependsOn(object)
                .build());
    }
}
resources:
  bucket:
    type: gcp:storage:Bucket
    properties:
      name: ${project}-bq-geojson
      location: US
      uniformBucketLevelAccess: true
  object:
    type: gcp:storage:BucketObject
    properties:
      name: geojson-data.jsonl
      bucket: ${bucket.name}
      content: |
        {"type":"Feature","properties":{"continent":"Europe","region":"Scandinavia"},"geometry":{"type":"Polygon","coordinates":[[[-30.94,53.33],[33.05,53.33],[33.05,71.86],[-30.94,71.86],[-30.94,53.33]]]}}
        {"type":"Feature","properties":{"continent":"Africa","region":"West Africa"},"geometry":{"type":"Polygon","coordinates":[[[-23.91,0],[11.95,0],[11.95,18.98],[-23.91,18.98],[-23.91,0]]]}}        
  foo:
    type: gcp:bigquery:Table
    properties:
      deletionProtection: false
      datasetId: ${bar.datasetId}
      tableId: job_load_table
  bar:
    type: gcp:bigquery:Dataset
    properties:
      datasetId: job_load_dataset
      friendlyName: test
      description: This is a test description
      location: US
  job:
    type: gcp:bigquery:Job
    properties:
      jobId: job_load
      labels:
        my_job: load
      load:
        sourceUris:
          - gs://${object.bucket}/${object.name}
        destinationTable:
          projectId: ${foo.project}
          datasetId: ${foo.datasetId}
          tableId: ${foo.tableId}
        writeDisposition: WRITE_TRUNCATE
        autodetect: true
        sourceFormat: NEWLINE_DELIMITED_JSON
        jsonExtension: GEOJSON
    options:
      dependsOn:
        - ${object}
variables:
  project: my-project-name
Bigquery Job Load Parquet
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const test = new gcp.storage.Bucket("test", {
    name: "job_load_bucket",
    location: "US",
    uniformBucketLevelAccess: true,
});
const testBucketObject = new gcp.storage.BucketObject("test", {
    name: "job_load_bucket_object",
    source: new pulumi.asset.FileAsset("./test-fixtures/test.parquet.gzip"),
    bucket: test.name,
});
const testDataset = new gcp.bigquery.Dataset("test", {
    datasetId: "job_load_dataset",
    friendlyName: "test",
    description: "This is a test description",
    location: "US",
});
const testTable = new gcp.bigquery.Table("test", {
    deletionProtection: false,
    tableId: "job_load_table",
    datasetId: testDataset.datasetId,
});
const job = new gcp.bigquery.Job("job", {
    jobId: "job_load",
    labels: {
        my_job: "load",
    },
    load: {
        sourceUris: [pulumi.interpolate`gs://${testBucketObject.bucket}/${testBucketObject.name}`],
        destinationTable: {
            projectId: testTable.project,
            datasetId: testTable.datasetId,
            tableId: testTable.tableId,
        },
        schemaUpdateOptions: [
            "ALLOW_FIELD_RELAXATION",
            "ALLOW_FIELD_ADDITION",
        ],
        writeDisposition: "WRITE_APPEND",
        sourceFormat: "PARQUET",
        autodetect: true,
        parquetOptions: {
            enumAsString: true,
            enableListInference: true,
        },
    },
});
import pulumi
import pulumi_gcp as gcp
test = gcp.storage.Bucket("test",
    name="job_load_bucket",
    location="US",
    uniform_bucket_level_access=True)
test_bucket_object = gcp.storage.BucketObject("test",
    name="job_load_bucket_object",
    source=pulumi.FileAsset("./test-fixtures/test.parquet.gzip"),
    bucket=test.name)
test_dataset = gcp.bigquery.Dataset("test",
    dataset_id="job_load_dataset",
    friendly_name="test",
    description="This is a test description",
    location="US")
test_table = gcp.bigquery.Table("test",
    deletion_protection=False,
    table_id="job_load_table",
    dataset_id=test_dataset.dataset_id)
job = gcp.bigquery.Job("job",
    job_id="job_load",
    labels={
        "my_job": "load",
    },
    load={
        "source_uris": [pulumi.Output.all(
            bucket=test_bucket_object.bucket,
            name=test_bucket_object.name
).apply(lambda resolved_outputs: f"gs://{resolved_outputs['bucket']}/{resolved_outputs['name']}")
],
        "destination_table": {
            "project_id": test_table.project,
            "dataset_id": test_table.dataset_id,
            "table_id": test_table.table_id,
        },
        "schema_update_options": [
            "ALLOW_FIELD_RELAXATION",
            "ALLOW_FIELD_ADDITION",
        ],
        "write_disposition": "WRITE_APPEND",
        "source_format": "PARQUET",
        "autodetect": True,
        "parquet_options": {
            "enum_as_string": True,
            "enable_list_inference": True,
        },
    })
package main
import (
	"fmt"
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery"
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/storage"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		test, err := storage.NewBucket(ctx, "test", &storage.BucketArgs{
			Name:                     pulumi.String("job_load_bucket"),
			Location:                 pulumi.String("US"),
			UniformBucketLevelAccess: pulumi.Bool(true),
		})
		if err != nil {
			return err
		}
		testBucketObject, err := storage.NewBucketObject(ctx, "test", &storage.BucketObjectArgs{
			Name:   pulumi.String("job_load_bucket_object"),
			Source: pulumi.NewFileAsset("./test-fixtures/test.parquet.gzip"),
			Bucket: test.Name,
		})
		if err != nil {
			return err
		}
		testDataset, err := bigquery.NewDataset(ctx, "test", &bigquery.DatasetArgs{
			DatasetId:    pulumi.String("job_load_dataset"),
			FriendlyName: pulumi.String("test"),
			Description:  pulumi.String("This is a test description"),
			Location:     pulumi.String("US"),
		})
		if err != nil {
			return err
		}
		testTable, err := bigquery.NewTable(ctx, "test", &bigquery.TableArgs{
			DeletionProtection: pulumi.Bool(false),
			TableId:            pulumi.String("job_load_table"),
			DatasetId:          testDataset.DatasetId,
		})
		if err != nil {
			return err
		}
		_, err = bigquery.NewJob(ctx, "job", &bigquery.JobArgs{
			JobId: pulumi.String("job_load"),
			Labels: pulumi.StringMap{
				"my_job": pulumi.String("load"),
			},
			Load: &bigquery.JobLoadArgs{
				SourceUris: pulumi.StringArray{
					pulumi.All(testBucketObject.Bucket, testBucketObject.Name).ApplyT(func(_args []interface{}) (string, error) {
						bucket := _args[0].(string)
						name := _args[1].(string)
						return fmt.Sprintf("gs://%v/%v", bucket, name), nil
					}).(pulumi.StringOutput),
				},
				DestinationTable: &bigquery.JobLoadDestinationTableArgs{
					ProjectId: testTable.Project,
					DatasetId: testTable.DatasetId,
					TableId:   testTable.TableId,
				},
				SchemaUpdateOptions: pulumi.StringArray{
					pulumi.String("ALLOW_FIELD_RELAXATION"),
					pulumi.String("ALLOW_FIELD_ADDITION"),
				},
				WriteDisposition: pulumi.String("WRITE_APPEND"),
				SourceFormat:     pulumi.String("PARQUET"),
				Autodetect:       pulumi.Bool(true),
				ParquetOptions: &bigquery.JobLoadParquetOptionsArgs{
					EnumAsString:        pulumi.Bool(true),
					EnableListInference: pulumi.Bool(true),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var test = new Gcp.Storage.Bucket("test", new()
    {
        Name = "job_load_bucket",
        Location = "US",
        UniformBucketLevelAccess = true,
    });
    var testBucketObject = new Gcp.Storage.BucketObject("test", new()
    {
        Name = "job_load_bucket_object",
        Source = new FileAsset("./test-fixtures/test.parquet.gzip"),
        Bucket = test.Name,
    });
    var testDataset = new Gcp.BigQuery.Dataset("test", new()
    {
        DatasetId = "job_load_dataset",
        FriendlyName = "test",
        Description = "This is a test description",
        Location = "US",
    });
    var testTable = new Gcp.BigQuery.Table("test", new()
    {
        DeletionProtection = false,
        TableId = "job_load_table",
        DatasetId = testDataset.DatasetId,
    });
    var job = new Gcp.BigQuery.Job("job", new()
    {
        JobId = "job_load",
        Labels = 
        {
            { "my_job", "load" },
        },
        Load = new Gcp.BigQuery.Inputs.JobLoadArgs
        {
            SourceUris = new[]
            {
                Output.Tuple(testBucketObject.Bucket, testBucketObject.Name).Apply(values =>
                {
                    var bucket = values.Item1;
                    var name = values.Item2;
                    return $"gs://{bucket}/{name}";
                }),
            },
            DestinationTable = new Gcp.BigQuery.Inputs.JobLoadDestinationTableArgs
            {
                ProjectId = testTable.Project,
                DatasetId = testTable.DatasetId,
                TableId = testTable.TableId,
            },
            SchemaUpdateOptions = new[]
            {
                "ALLOW_FIELD_RELAXATION",
                "ALLOW_FIELD_ADDITION",
            },
            WriteDisposition = "WRITE_APPEND",
            SourceFormat = "PARQUET",
            Autodetect = true,
            ParquetOptions = new Gcp.BigQuery.Inputs.JobLoadParquetOptionsArgs
            {
                EnumAsString = true,
                EnableListInference = true,
            },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.storage.Bucket;
import com.pulumi.gcp.storage.BucketArgs;
import com.pulumi.gcp.storage.BucketObject;
import com.pulumi.gcp.storage.BucketObjectArgs;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Table;
import com.pulumi.gcp.bigquery.TableArgs;
import com.pulumi.gcp.bigquery.Job;
import com.pulumi.gcp.bigquery.JobArgs;
import com.pulumi.gcp.bigquery.inputs.JobLoadArgs;
import com.pulumi.gcp.bigquery.inputs.JobLoadDestinationTableArgs;
import com.pulumi.gcp.bigquery.inputs.JobLoadParquetOptionsArgs;
import com.pulumi.asset.FileAsset;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var test = new Bucket("test", BucketArgs.builder()
            .name("job_load_bucket")
            .location("US")
            .uniformBucketLevelAccess(true)
            .build());
        var testBucketObject = new BucketObject("testBucketObject", BucketObjectArgs.builder()
            .name("job_load_bucket_object")
            .source(new FileAsset("./test-fixtures/test.parquet.gzip"))
            .bucket(test.name())
            .build());
        var testDataset = new Dataset("testDataset", DatasetArgs.builder()
            .datasetId("job_load_dataset")
            .friendlyName("test")
            .description("This is a test description")
            .location("US")
            .build());
        var testTable = new Table("testTable", TableArgs.builder()
            .deletionProtection(false)
            .tableId("job_load_table")
            .datasetId(testDataset.datasetId())
            .build());
        var job = new Job("job", JobArgs.builder()
            .jobId("job_load")
            .labels(Map.of("my_job", "load"))
            .load(JobLoadArgs.builder()
                .sourceUris(Output.tuple(testBucketObject.bucket(), testBucketObject.name()).applyValue(values -> {
                    var bucket = values.t1;
                    var name = values.t2;
                    return String.format("gs://%s/%s", bucket,name);
                }))
                .destinationTable(JobLoadDestinationTableArgs.builder()
                    .projectId(testTable.project())
                    .datasetId(testTable.datasetId())
                    .tableId(testTable.tableId())
                    .build())
                .schemaUpdateOptions(                
                    "ALLOW_FIELD_RELAXATION",
                    "ALLOW_FIELD_ADDITION")
                .writeDisposition("WRITE_APPEND")
                .sourceFormat("PARQUET")
                .autodetect(true)
                .parquetOptions(JobLoadParquetOptionsArgs.builder()
                    .enumAsString(true)
                    .enableListInference(true)
                    .build())
                .build())
            .build());
    }
}
resources:
  test:
    type: gcp:storage:Bucket
    properties:
      name: job_load_bucket
      location: US
      uniformBucketLevelAccess: true
  testBucketObject:
    type: gcp:storage:BucketObject
    name: test
    properties:
      name: job_load_bucket_object
      source:
        fn::FileAsset: ./test-fixtures/test.parquet.gzip
      bucket: ${test.name}
  testDataset:
    type: gcp:bigquery:Dataset
    name: test
    properties:
      datasetId: job_load_dataset
      friendlyName: test
      description: This is a test description
      location: US
  testTable:
    type: gcp:bigquery:Table
    name: test
    properties:
      deletionProtection: false
      tableId: job_load_table
      datasetId: ${testDataset.datasetId}
  job:
    type: gcp:bigquery:Job
    properties:
      jobId: job_load
      labels:
        my_job: load
      load:
        sourceUris:
          - gs://${testBucketObject.bucket}/${testBucketObject.name}
        destinationTable:
          projectId: ${testTable.project}
          datasetId: ${testTable.datasetId}
          tableId: ${testTable.tableId}
        schemaUpdateOptions:
          - ALLOW_FIELD_RELAXATION
          - ALLOW_FIELD_ADDITION
        writeDisposition: WRITE_APPEND
        sourceFormat: PARQUET
        autodetect: true
        parquetOptions:
          enumAsString: true
          enableListInference: true
Bigquery Job Copy
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const count = 2;
const sourceDataset: gcp.bigquery.Dataset[] = [];
for (const range = {value: 0}; range.value < count; range.value++) {
    sourceDataset.push(new gcp.bigquery.Dataset(`source-${range.value}`, {
        datasetId: `job_copy_${range.value}_dataset`,
        friendlyName: "test",
        description: "This is a test description",
        location: "US",
    }));
}
const source: gcp.bigquery.Table[] = [];
for (const range = {value: 0}; range.value < count; range.value++) {
    source.push(new gcp.bigquery.Table(`source-${range.value}`, {
        datasetId: sourceDataset[range.value].datasetId,
        tableId: `job_copy_${range.value}_table`,
        deletionProtection: false,
        schema: `[
  {
    "name": "name",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "post_abbr",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "date",
    "type": "DATE",
    "mode": "NULLABLE"
  }
]
`,
    }));
}
const destDataset = new gcp.bigquery.Dataset("dest", {
    datasetId: "job_copy_dest_dataset",
    friendlyName: "test",
    description: "This is a test description",
    location: "US",
});
const project = gcp.organizations.getProject({
    projectId: "my-project-name",
});
const encryptRole = new gcp.kms.CryptoKeyIAMMember("encrypt_role", {
    cryptoKeyId: "example-key",
    role: "roles/cloudkms.cryptoKeyEncrypterDecrypter",
    member: project.then(project => `serviceAccount:bq-${project.number}@bigquery-encryption.iam.gserviceaccount.com`),
});
const dest = new gcp.bigquery.Table("dest", {
    deletionProtection: false,
    datasetId: destDataset.datasetId,
    tableId: "job_copy_dest_table",
    schema: `[
  {
    "name": "name",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "post_abbr",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "date",
    "type": "DATE",
    "mode": "NULLABLE"
  }
]
`,
    encryptionConfiguration: {
        kmsKeyName: "example-key",
    },
}, {
    dependsOn: [encryptRole],
});
const job = new gcp.bigquery.Job("job", {
    jobId: "job_copy",
    copy: {
        sourceTables: [
            {
                projectId: source[0].project,
                datasetId: source[0].datasetId,
                tableId: source[0].tableId,
            },
            {
                projectId: source[1].project,
                datasetId: source[1].datasetId,
                tableId: source[1].tableId,
            },
        ],
        destinationTable: {
            projectId: dest.project,
            datasetId: dest.datasetId,
            tableId: dest.tableId,
        },
        destinationEncryptionConfiguration: {
            kmsKeyName: "example-key",
        },
    },
}, {
    dependsOn: [encryptRole],
});
import pulumi
import pulumi_gcp as gcp
count = 2
source_dataset = []
for range in [{"value": i} for i in range(0, count)]:
    source_dataset.append(gcp.bigquery.Dataset(f"source-{range['value']}",
        dataset_id=f"job_copy_{range['value']}_dataset",
        friendly_name="test",
        description="This is a test description",
        location="US"))
source = []
for range in [{"value": i} for i in range(0, count)]:
    source.append(gcp.bigquery.Table(f"source-{range['value']}",
        dataset_id=source_dataset[range["value"]].dataset_id,
        table_id=f"job_copy_{range['value']}_table",
        deletion_protection=False,
        schema="""[
  {
    "name": "name",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "post_abbr",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "date",
    "type": "DATE",
    "mode": "NULLABLE"
  }
]
"""))
dest_dataset = gcp.bigquery.Dataset("dest",
    dataset_id="job_copy_dest_dataset",
    friendly_name="test",
    description="This is a test description",
    location="US")
project = gcp.organizations.get_project(project_id="my-project-name")
encrypt_role = gcp.kms.CryptoKeyIAMMember("encrypt_role",
    crypto_key_id="example-key",
    role="roles/cloudkms.cryptoKeyEncrypterDecrypter",
    member=f"serviceAccount:bq-{project.number}@bigquery-encryption.iam.gserviceaccount.com")
dest = gcp.bigquery.Table("dest",
    deletion_protection=False,
    dataset_id=dest_dataset.dataset_id,
    table_id="job_copy_dest_table",
    schema="""[
  {
    "name": "name",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "post_abbr",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "date",
    "type": "DATE",
    "mode": "NULLABLE"
  }
]
""",
    encryption_configuration={
        "kms_key_name": "example-key",
    },
    opts = pulumi.ResourceOptions(depends_on=[encrypt_role]))
job = gcp.bigquery.Job("job",
    job_id="job_copy",
    copy={
        "source_tables": [
            {
                "project_id": source[0].project,
                "dataset_id": source[0].dataset_id,
                "table_id": source[0].table_id,
            },
            {
                "project_id": source[1].project,
                "dataset_id": source[1].dataset_id,
                "table_id": source[1].table_id,
            },
        ],
        "destination_table": {
            "project_id": dest.project,
            "dataset_id": dest.dataset_id,
            "table_id": dest.table_id,
        },
        "destination_encryption_configuration": {
            "kms_key_name": "example-key",
        },
    },
    opts = pulumi.ResourceOptions(depends_on=[encrypt_role]))
package main
import (
	"fmt"
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery"
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/kms"
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		count := 2
		var sourceDataset []*bigquery.Dataset
		for index := 0; index < count; index++ {
			key0 := index
			val0 := index
			__res, err := bigquery.NewDataset(ctx, fmt.Sprintf("source-%v", key0), &bigquery.DatasetArgs{
				DatasetId:    pulumi.Sprintf("job_copy_%v_dataset", val0),
				FriendlyName: pulumi.String("test"),
				Description:  pulumi.String("This is a test description"),
				Location:     pulumi.String("US"),
			})
			if err != nil {
				return err
			}
			sourceDataset = append(sourceDataset, __res)
		}
		var source []*bigquery.Table
		for index := 0; index < count; index++ {
			key0 := index
			val0 := index
			__res, err := bigquery.NewTable(ctx, fmt.Sprintf("source-%v", key0), &bigquery.TableArgs{
				DatasetId:          sourceDataset[val0].DatasetId,
				TableId:            pulumi.Sprintf("job_copy_%v_table", val0),
				DeletionProtection: pulumi.Bool(false),
				Schema: pulumi.String(`[
  {
    "name": "name",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "post_abbr",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "date",
    "type": "DATE",
    "mode": "NULLABLE"
  }
]
`),
			})
			if err != nil {
				return err
			}
			source = append(source, __res)
		}
		destDataset, err := bigquery.NewDataset(ctx, "dest", &bigquery.DatasetArgs{
			DatasetId:    pulumi.String("job_copy_dest_dataset"),
			FriendlyName: pulumi.String("test"),
			Description:  pulumi.String("This is a test description"),
			Location:     pulumi.String("US"),
		})
		if err != nil {
			return err
		}
		project, err := organizations.LookupProject(ctx, &organizations.LookupProjectArgs{
			ProjectId: pulumi.StringRef("my-project-name"),
		}, nil)
		if err != nil {
			return err
		}
		encryptRole, err := kms.NewCryptoKeyIAMMember(ctx, "encrypt_role", &kms.CryptoKeyIAMMemberArgs{
			CryptoKeyId: pulumi.String("example-key"),
			Role:        pulumi.String("roles/cloudkms.cryptoKeyEncrypterDecrypter"),
			Member:      pulumi.Sprintf("serviceAccount:bq-%v@bigquery-encryption.iam.gserviceaccount.com", project.Number),
		})
		if err != nil {
			return err
		}
		dest, err := bigquery.NewTable(ctx, "dest", &bigquery.TableArgs{
			DeletionProtection: pulumi.Bool(false),
			DatasetId:          destDataset.DatasetId,
			TableId:            pulumi.String("job_copy_dest_table"),
			Schema: pulumi.String(`[
  {
    "name": "name",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "post_abbr",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "date",
    "type": "DATE",
    "mode": "NULLABLE"
  }
]
`),
			EncryptionConfiguration: &bigquery.TableEncryptionConfigurationArgs{
				KmsKeyName: pulumi.String("example-key"),
			},
		}, pulumi.DependsOn([]pulumi.Resource{
			encryptRole,
		}))
		if err != nil {
			return err
		}
		_, err = bigquery.NewJob(ctx, "job", &bigquery.JobArgs{
			JobId: pulumi.String("job_copy"),
			Copy: &bigquery.JobCopyArgs{
				SourceTables: bigquery.JobCopySourceTableArray{
					&bigquery.JobCopySourceTableArgs{
						ProjectId: source[0].Project,
						DatasetId: source[0].DatasetId,
						TableId:   source[0].TableId,
					},
					&bigquery.JobCopySourceTableArgs{
						ProjectId: source[1].Project,
						DatasetId: source[1].DatasetId,
						TableId:   source[1].TableId,
					},
				},
				DestinationTable: &bigquery.JobCopyDestinationTableArgs{
					ProjectId: dest.Project,
					DatasetId: dest.DatasetId,
					TableId:   dest.TableId,
				},
				DestinationEncryptionConfiguration: &bigquery.JobCopyDestinationEncryptionConfigurationArgs{
					KmsKeyName: pulumi.String("example-key"),
				},
			},
		}, pulumi.DependsOn([]pulumi.Resource{
			encryptRole,
		}))
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var count = 2;
    var sourceDataset = new List<Gcp.BigQuery.Dataset>();
    for (var rangeIndex = 0; rangeIndex < count; rangeIndex++)
    {
        var range = new { Value = rangeIndex };
        sourceDataset.Add(new Gcp.BigQuery.Dataset($"source-{range.Value}", new()
        {
            DatasetId = $"job_copy_{range.Value}_dataset",
            FriendlyName = "test",
            Description = "This is a test description",
            Location = "US",
        }));
    }
    var source = new List<Gcp.BigQuery.Table>();
    for (var rangeIndex = 0; rangeIndex < count; rangeIndex++)
    {
        var range = new { Value = rangeIndex };
        source.Add(new Gcp.BigQuery.Table($"source-{range.Value}", new()
        {
            DatasetId = sourceDataset[range.Value].DatasetId,
            TableId = $"job_copy_{range.Value}_table",
            DeletionProtection = false,
            Schema = @"[
  {
    ""name"": ""name"",
    ""type"": ""STRING"",
    ""mode"": ""NULLABLE""
  },
  {
    ""name"": ""post_abbr"",
    ""type"": ""STRING"",
    ""mode"": ""NULLABLE""
  },
  {
    ""name"": ""date"",
    ""type"": ""DATE"",
    ""mode"": ""NULLABLE""
  }
]
",
        }));
    }
    var destDataset = new Gcp.BigQuery.Dataset("dest", new()
    {
        DatasetId = "job_copy_dest_dataset",
        FriendlyName = "test",
        Description = "This is a test description",
        Location = "US",
    });
    var project = Gcp.Organizations.GetProject.Invoke(new()
    {
        ProjectId = "my-project-name",
    });
    var encryptRole = new Gcp.Kms.CryptoKeyIAMMember("encrypt_role", new()
    {
        CryptoKeyId = "example-key",
        Role = "roles/cloudkms.cryptoKeyEncrypterDecrypter",
        Member = $"serviceAccount:bq-{project.Apply(getProjectResult => getProjectResult.Number)}@bigquery-encryption.iam.gserviceaccount.com",
    });
    var dest = new Gcp.BigQuery.Table("dest", new()
    {
        DeletionProtection = false,
        DatasetId = destDataset.DatasetId,
        TableId = "job_copy_dest_table",
        Schema = @"[
  {
    ""name"": ""name"",
    ""type"": ""STRING"",
    ""mode"": ""NULLABLE""
  },
  {
    ""name"": ""post_abbr"",
    ""type"": ""STRING"",
    ""mode"": ""NULLABLE""
  },
  {
    ""name"": ""date"",
    ""type"": ""DATE"",
    ""mode"": ""NULLABLE""
  }
]
",
        EncryptionConfiguration = new Gcp.BigQuery.Inputs.TableEncryptionConfigurationArgs
        {
            KmsKeyName = "example-key",
        },
    }, new CustomResourceOptions
    {
        DependsOn =
        {
            encryptRole,
        },
    });
    var job = new Gcp.BigQuery.Job("job", new()
    {
        JobId = "job_copy",
        Copy = new Gcp.BigQuery.Inputs.JobCopyArgs
        {
            SourceTables = new[]
            {
                new Gcp.BigQuery.Inputs.JobCopySourceTableArgs
                {
                    ProjectId = source[0].Project,
                    DatasetId = source[0].DatasetId,
                    TableId = source[0].TableId,
                },
                new Gcp.BigQuery.Inputs.JobCopySourceTableArgs
                {
                    ProjectId = source[1].Project,
                    DatasetId = source[1].DatasetId,
                    TableId = source[1].TableId,
                },
            },
            DestinationTable = new Gcp.BigQuery.Inputs.JobCopyDestinationTableArgs
            {
                ProjectId = dest.Project,
                DatasetId = dest.DatasetId,
                TableId = dest.TableId,
            },
            DestinationEncryptionConfiguration = new Gcp.BigQuery.Inputs.JobCopyDestinationEncryptionConfigurationArgs
            {
                KmsKeyName = "example-key",
            },
        },
    }, new CustomResourceOptions
    {
        DependsOn =
        {
            encryptRole,
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Table;
import com.pulumi.gcp.bigquery.TableArgs;
import com.pulumi.gcp.organizations.OrganizationsFunctions;
import com.pulumi.gcp.organizations.inputs.GetProjectArgs;
import com.pulumi.gcp.kms.CryptoKeyIAMMember;
import com.pulumi.gcp.kms.CryptoKeyIAMMemberArgs;
import com.pulumi.gcp.bigquery.inputs.TableEncryptionConfigurationArgs;
import com.pulumi.gcp.bigquery.Job;
import com.pulumi.gcp.bigquery.JobArgs;
import com.pulumi.gcp.bigquery.inputs.JobCopyArgs;
import com.pulumi.gcp.bigquery.inputs.JobCopyDestinationTableArgs;
import com.pulumi.gcp.bigquery.inputs.JobCopyDestinationEncryptionConfigurationArgs;
import com.pulumi.codegen.internal.KeyedValue;
import com.pulumi.resources.CustomResourceOptions;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        final var count = 2;
        for (var i = 0; i < count; i++) {
            new Dataset("sourceDataset-" + i, DatasetArgs.builder()
                .datasetId(String.format("job_copy_%s_dataset", range.value()))
                .friendlyName("test")
                .description("This is a test description")
                .location("US")
                .build());
        
}
        for (var i = 0; i < count; i++) {
            new Table("source-" + i, TableArgs.builder()
                .datasetId(sourceDataset[range.value()].datasetId())
                .tableId(String.format("job_copy_%s_table", range.value()))
                .deletionProtection(false)
                .schema("""
[
  {
    "name": "name",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "post_abbr",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "date",
    "type": "DATE",
    "mode": "NULLABLE"
  }
]
                """)
                .build());
        
}
        var destDataset = new Dataset("destDataset", DatasetArgs.builder()
            .datasetId("job_copy_dest_dataset")
            .friendlyName("test")
            .description("This is a test description")
            .location("US")
            .build());
        final var project = OrganizationsFunctions.getProject(GetProjectArgs.builder()
            .projectId("my-project-name")
            .build());
        var encryptRole = new CryptoKeyIAMMember("encryptRole", CryptoKeyIAMMemberArgs.builder()
            .cryptoKeyId("example-key")
            .role("roles/cloudkms.cryptoKeyEncrypterDecrypter")
            .member(String.format("serviceAccount:bq-%s@bigquery-encryption.iam.gserviceaccount.com", project.applyValue(getProjectResult -> getProjectResult.number())))
            .build());
        var dest = new Table("dest", TableArgs.builder()
            .deletionProtection(false)
            .datasetId(destDataset.datasetId())
            .tableId("job_copy_dest_table")
            .schema("""
[
  {
    "name": "name",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "post_abbr",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "date",
    "type": "DATE",
    "mode": "NULLABLE"
  }
]
            """)
            .encryptionConfiguration(TableEncryptionConfigurationArgs.builder()
                .kmsKeyName("example-key")
                .build())
            .build(), CustomResourceOptions.builder()
                .dependsOn(encryptRole)
                .build());
        var job = new Job("job", JobArgs.builder()
            .jobId("job_copy")
            .copy(JobCopyArgs.builder()
                .sourceTables(                
                    JobCopySourceTableArgs.builder()
                        .projectId(source[0].project())
                        .datasetId(source[0].datasetId())
                        .tableId(source[0].tableId())
                        .build(),
                    JobCopySourceTableArgs.builder()
                        .projectId(source[1].project())
                        .datasetId(source[1].datasetId())
                        .tableId(source[1].tableId())
                        .build())
                .destinationTable(JobCopyDestinationTableArgs.builder()
                    .projectId(dest.project())
                    .datasetId(dest.datasetId())
                    .tableId(dest.tableId())
                    .build())
                .destinationEncryptionConfiguration(JobCopyDestinationEncryptionConfigurationArgs.builder()
                    .kmsKeyName("example-key")
                    .build())
                .build())
            .build(), CustomResourceOptions.builder()
                .dependsOn(encryptRole)
                .build());
    }
}
Coming soon!
Bigquery Job Extract
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const source_oneDataset = new gcp.bigquery.Dataset("source-one", {
    datasetId: "job_extract_dataset",
    friendlyName: "test",
    description: "This is a test description",
    location: "US",
});
const source_one = new gcp.bigquery.Table("source-one", {
    deletionProtection: false,
    datasetId: source_oneDataset.datasetId,
    tableId: "job_extract_table",
    schema: `[
  {
    "name": "name",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "post_abbr",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "date",
    "type": "DATE",
    "mode": "NULLABLE"
  }
]
`,
});
const dest = new gcp.storage.Bucket("dest", {
    name: "job_extract_bucket",
    location: "US",
    forceDestroy: true,
});
const job = new gcp.bigquery.Job("job", {
    jobId: "job_extract",
    extract: {
        destinationUris: [pulumi.interpolate`${dest.url}/extract`],
        sourceTable: {
            projectId: source_one.project,
            datasetId: source_one.datasetId,
            tableId: source_one.tableId,
        },
        destinationFormat: "NEWLINE_DELIMITED_JSON",
        compression: "GZIP",
    },
});
import pulumi
import pulumi_gcp as gcp
source_one_dataset = gcp.bigquery.Dataset("source-one",
    dataset_id="job_extract_dataset",
    friendly_name="test",
    description="This is a test description",
    location="US")
source_one = gcp.bigquery.Table("source-one",
    deletion_protection=False,
    dataset_id=source_one_dataset.dataset_id,
    table_id="job_extract_table",
    schema="""[
  {
    "name": "name",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "post_abbr",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "date",
    "type": "DATE",
    "mode": "NULLABLE"
  }
]
""")
dest = gcp.storage.Bucket("dest",
    name="job_extract_bucket",
    location="US",
    force_destroy=True)
job = gcp.bigquery.Job("job",
    job_id="job_extract",
    extract={
        "destination_uris": [dest.url.apply(lambda url: f"{url}/extract")],
        "source_table": {
            "project_id": source_one.project,
            "dataset_id": source_one.dataset_id,
            "table_id": source_one.table_id,
        },
        "destination_format": "NEWLINE_DELIMITED_JSON",
        "compression": "GZIP",
    })
package main
import (
	"fmt"
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery"
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/storage"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		source_oneDataset, err := bigquery.NewDataset(ctx, "source-one", &bigquery.DatasetArgs{
			DatasetId:    pulumi.String("job_extract_dataset"),
			FriendlyName: pulumi.String("test"),
			Description:  pulumi.String("This is a test description"),
			Location:     pulumi.String("US"),
		})
		if err != nil {
			return err
		}
		source_one, err := bigquery.NewTable(ctx, "source-one", &bigquery.TableArgs{
			DeletionProtection: pulumi.Bool(false),
			DatasetId:          source_oneDataset.DatasetId,
			TableId:            pulumi.String("job_extract_table"),
			Schema: pulumi.String(`[
  {
    "name": "name",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "post_abbr",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "date",
    "type": "DATE",
    "mode": "NULLABLE"
  }
]
`),
		})
		if err != nil {
			return err
		}
		dest, err := storage.NewBucket(ctx, "dest", &storage.BucketArgs{
			Name:         pulumi.String("job_extract_bucket"),
			Location:     pulumi.String("US"),
			ForceDestroy: pulumi.Bool(true),
		})
		if err != nil {
			return err
		}
		_, err = bigquery.NewJob(ctx, "job", &bigquery.JobArgs{
			JobId: pulumi.String("job_extract"),
			Extract: &bigquery.JobExtractArgs{
				DestinationUris: pulumi.StringArray{
					dest.Url.ApplyT(func(url string) (string, error) {
						return fmt.Sprintf("%v/extract", url), nil
					}).(pulumi.StringOutput),
				},
				SourceTable: &bigquery.JobExtractSourceTableArgs{
					ProjectId: source_one.Project,
					DatasetId: source_one.DatasetId,
					TableId:   source_one.TableId,
				},
				DestinationFormat: pulumi.String("NEWLINE_DELIMITED_JSON"),
				Compression:       pulumi.String("GZIP"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var source_oneDataset = new Gcp.BigQuery.Dataset("source-one", new()
    {
        DatasetId = "job_extract_dataset",
        FriendlyName = "test",
        Description = "This is a test description",
        Location = "US",
    });
    var source_one = new Gcp.BigQuery.Table("source-one", new()
    {
        DeletionProtection = false,
        DatasetId = source_oneDataset.DatasetId,
        TableId = "job_extract_table",
        Schema = @"[
  {
    ""name"": ""name"",
    ""type"": ""STRING"",
    ""mode"": ""NULLABLE""
  },
  {
    ""name"": ""post_abbr"",
    ""type"": ""STRING"",
    ""mode"": ""NULLABLE""
  },
  {
    ""name"": ""date"",
    ""type"": ""DATE"",
    ""mode"": ""NULLABLE""
  }
]
",
    });
    var dest = new Gcp.Storage.Bucket("dest", new()
    {
        Name = "job_extract_bucket",
        Location = "US",
        ForceDestroy = true,
    });
    var job = new Gcp.BigQuery.Job("job", new()
    {
        JobId = "job_extract",
        Extract = new Gcp.BigQuery.Inputs.JobExtractArgs
        {
            DestinationUris = new[]
            {
                dest.Url.Apply(url => $"{url}/extract"),
            },
            SourceTable = new Gcp.BigQuery.Inputs.JobExtractSourceTableArgs
            {
                ProjectId = source_one.Project,
                DatasetId = source_one.DatasetId,
                TableId = source_one.TableId,
            },
            DestinationFormat = "NEWLINE_DELIMITED_JSON",
            Compression = "GZIP",
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Table;
import com.pulumi.gcp.bigquery.TableArgs;
import com.pulumi.gcp.storage.Bucket;
import com.pulumi.gcp.storage.BucketArgs;
import com.pulumi.gcp.bigquery.Job;
import com.pulumi.gcp.bigquery.JobArgs;
import com.pulumi.gcp.bigquery.inputs.JobExtractArgs;
import com.pulumi.gcp.bigquery.inputs.JobExtractSourceTableArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var source_oneDataset = new Dataset("source-oneDataset", DatasetArgs.builder()
            .datasetId("job_extract_dataset")
            .friendlyName("test")
            .description("This is a test description")
            .location("US")
            .build());
        var source_one = new Table("source-one", TableArgs.builder()
            .deletionProtection(false)
            .datasetId(source_oneDataset.datasetId())
            .tableId("job_extract_table")
            .schema("""
[
  {
    "name": "name",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "post_abbr",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "date",
    "type": "DATE",
    "mode": "NULLABLE"
  }
]
            """)
            .build());
        var dest = new Bucket("dest", BucketArgs.builder()
            .name("job_extract_bucket")
            .location("US")
            .forceDestroy(true)
            .build());
        var job = new Job("job", JobArgs.builder()
            .jobId("job_extract")
            .extract(JobExtractArgs.builder()
                .destinationUris(dest.url().applyValue(url -> String.format("%s/extract", url)))
                .sourceTable(JobExtractSourceTableArgs.builder()
                    .projectId(source_one.project())
                    .datasetId(source_one.datasetId())
                    .tableId(source_one.tableId())
                    .build())
                .destinationFormat("NEWLINE_DELIMITED_JSON")
                .compression("GZIP")
                .build())
            .build());
    }
}
resources:
  source-one:
    type: gcp:bigquery:Table
    properties:
      deletionProtection: false
      datasetId: ${["source-oneDataset"].datasetId}
      tableId: job_extract_table
      schema: |
        [
          {
            "name": "name",
            "type": "STRING",
            "mode": "NULLABLE"
          },
          {
            "name": "post_abbr",
            "type": "STRING",
            "mode": "NULLABLE"
          },
          {
            "name": "date",
            "type": "DATE",
            "mode": "NULLABLE"
          }
        ]        
  source-oneDataset:
    type: gcp:bigquery:Dataset
    name: source-one
    properties:
      datasetId: job_extract_dataset
      friendlyName: test
      description: This is a test description
      location: US
  dest:
    type: gcp:storage:Bucket
    properties:
      name: job_extract_bucket
      location: US
      forceDestroy: true
  job:
    type: gcp:bigquery:Job
    properties:
      jobId: job_extract
      extract:
        destinationUris:
          - ${dest.url}/extract
        sourceTable:
          projectId: ${["source-one"].project}
          datasetId: ${["source-one"].datasetId}
          tableId: ${["source-one"].tableId}
        destinationFormat: NEWLINE_DELIMITED_JSON
        compression: GZIP
Create Job Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Job(name: string, args: JobArgs, opts?: CustomResourceOptions);@overload
def Job(resource_name: str,
        args: JobArgs,
        opts: Optional[ResourceOptions] = None)
@overload
def Job(resource_name: str,
        opts: Optional[ResourceOptions] = None,
        job_id: Optional[str] = None,
        copy: Optional[JobCopyArgs] = None,
        extract: Optional[JobExtractArgs] = None,
        job_timeout_ms: Optional[str] = None,
        labels: Optional[Mapping[str, str]] = None,
        load: Optional[JobLoadArgs] = None,
        location: Optional[str] = None,
        project: Optional[str] = None,
        query: Optional[JobQueryArgs] = None)func NewJob(ctx *Context, name string, args JobArgs, opts ...ResourceOption) (*Job, error)public Job(string name, JobArgs args, CustomResourceOptions? opts = null)type: gcp:bigquery:Job
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args JobArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args JobArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args JobArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args JobArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args JobArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var jobResource = new Gcp.BigQuery.Job("jobResource", new()
{
    JobId = "string",
    Copy = new Gcp.BigQuery.Inputs.JobCopyArgs
    {
        SourceTables = new[]
        {
            new Gcp.BigQuery.Inputs.JobCopySourceTableArgs
            {
                TableId = "string",
                DatasetId = "string",
                ProjectId = "string",
            },
        },
        CreateDisposition = "string",
        DestinationEncryptionConfiguration = new Gcp.BigQuery.Inputs.JobCopyDestinationEncryptionConfigurationArgs
        {
            KmsKeyName = "string",
            KmsKeyVersion = "string",
        },
        DestinationTable = new Gcp.BigQuery.Inputs.JobCopyDestinationTableArgs
        {
            TableId = "string",
            DatasetId = "string",
            ProjectId = "string",
        },
        WriteDisposition = "string",
    },
    Extract = new Gcp.BigQuery.Inputs.JobExtractArgs
    {
        DestinationUris = new[]
        {
            "string",
        },
        Compression = "string",
        DestinationFormat = "string",
        FieldDelimiter = "string",
        PrintHeader = false,
        SourceModel = new Gcp.BigQuery.Inputs.JobExtractSourceModelArgs
        {
            DatasetId = "string",
            ModelId = "string",
            ProjectId = "string",
        },
        SourceTable = new Gcp.BigQuery.Inputs.JobExtractSourceTableArgs
        {
            TableId = "string",
            DatasetId = "string",
            ProjectId = "string",
        },
        UseAvroLogicalTypes = false,
    },
    JobTimeoutMs = "string",
    Labels = 
    {
        { "string", "string" },
    },
    Load = new Gcp.BigQuery.Inputs.JobLoadArgs
    {
        DestinationTable = new Gcp.BigQuery.Inputs.JobLoadDestinationTableArgs
        {
            TableId = "string",
            DatasetId = "string",
            ProjectId = "string",
        },
        SourceUris = new[]
        {
            "string",
        },
        MaxBadRecords = 0,
        NullMarker = "string",
        DestinationEncryptionConfiguration = new Gcp.BigQuery.Inputs.JobLoadDestinationEncryptionConfigurationArgs
        {
            KmsKeyName = "string",
            KmsKeyVersion = "string",
        },
        Autodetect = false,
        Encoding = "string",
        FieldDelimiter = "string",
        IgnoreUnknownValues = false,
        JsonExtension = "string",
        AllowJaggedRows = false,
        CreateDisposition = "string",
        ParquetOptions = new Gcp.BigQuery.Inputs.JobLoadParquetOptionsArgs
        {
            EnableListInference = false,
            EnumAsString = false,
        },
        ProjectionFields = new[]
        {
            "string",
        },
        Quote = "string",
        SchemaUpdateOptions = new[]
        {
            "string",
        },
        SkipLeadingRows = 0,
        SourceFormat = "string",
        AllowQuotedNewlines = false,
        TimePartitioning = new Gcp.BigQuery.Inputs.JobLoadTimePartitioningArgs
        {
            Type = "string",
            ExpirationMs = "string",
            Field = "string",
        },
        WriteDisposition = "string",
    },
    Location = "string",
    Project = "string",
    Query = new Gcp.BigQuery.Inputs.JobQueryArgs
    {
        Query = "string",
        ParameterMode = "string",
        MaximumBytesBilled = "string",
        DestinationEncryptionConfiguration = new Gcp.BigQuery.Inputs.JobQueryDestinationEncryptionConfigurationArgs
        {
            KmsKeyName = "string",
            KmsKeyVersion = "string",
        },
        DestinationTable = new Gcp.BigQuery.Inputs.JobQueryDestinationTableArgs
        {
            TableId = "string",
            DatasetId = "string",
            ProjectId = "string",
        },
        Priority = "string",
        MaximumBillingTier = 0,
        DefaultDataset = new Gcp.BigQuery.Inputs.JobQueryDefaultDatasetArgs
        {
            DatasetId = "string",
            ProjectId = "string",
        },
        AllowLargeResults = false,
        FlattenResults = false,
        CreateDisposition = "string",
        SchemaUpdateOptions = new[]
        {
            "string",
        },
        ScriptOptions = new Gcp.BigQuery.Inputs.JobQueryScriptOptionsArgs
        {
            KeyResultStatement = "string",
            StatementByteBudget = "string",
            StatementTimeoutMs = "string",
        },
        UseLegacySql = false,
        UseQueryCache = false,
        UserDefinedFunctionResources = new[]
        {
            new Gcp.BigQuery.Inputs.JobQueryUserDefinedFunctionResourceArgs
            {
                InlineCode = "string",
                ResourceUri = "string",
            },
        },
        WriteDisposition = "string",
    },
});
example, err := bigquery.NewJob(ctx, "jobResource", &bigquery.JobArgs{
	JobId: pulumi.String("string"),
	Copy: &bigquery.JobCopyArgs{
		SourceTables: bigquery.JobCopySourceTableArray{
			&bigquery.JobCopySourceTableArgs{
				TableId:   pulumi.String("string"),
				DatasetId: pulumi.String("string"),
				ProjectId: pulumi.String("string"),
			},
		},
		CreateDisposition: pulumi.String("string"),
		DestinationEncryptionConfiguration: &bigquery.JobCopyDestinationEncryptionConfigurationArgs{
			KmsKeyName:    pulumi.String("string"),
			KmsKeyVersion: pulumi.String("string"),
		},
		DestinationTable: &bigquery.JobCopyDestinationTableArgs{
			TableId:   pulumi.String("string"),
			DatasetId: pulumi.String("string"),
			ProjectId: pulumi.String("string"),
		},
		WriteDisposition: pulumi.String("string"),
	},
	Extract: &bigquery.JobExtractArgs{
		DestinationUris: pulumi.StringArray{
			pulumi.String("string"),
		},
		Compression:       pulumi.String("string"),
		DestinationFormat: pulumi.String("string"),
		FieldDelimiter:    pulumi.String("string"),
		PrintHeader:       pulumi.Bool(false),
		SourceModel: &bigquery.JobExtractSourceModelArgs{
			DatasetId: pulumi.String("string"),
			ModelId:   pulumi.String("string"),
			ProjectId: pulumi.String("string"),
		},
		SourceTable: &bigquery.JobExtractSourceTableArgs{
			TableId:   pulumi.String("string"),
			DatasetId: pulumi.String("string"),
			ProjectId: pulumi.String("string"),
		},
		UseAvroLogicalTypes: pulumi.Bool(false),
	},
	JobTimeoutMs: pulumi.String("string"),
	Labels: pulumi.StringMap{
		"string": pulumi.String("string"),
	},
	Load: &bigquery.JobLoadArgs{
		DestinationTable: &bigquery.JobLoadDestinationTableArgs{
			TableId:   pulumi.String("string"),
			DatasetId: pulumi.String("string"),
			ProjectId: pulumi.String("string"),
		},
		SourceUris: pulumi.StringArray{
			pulumi.String("string"),
		},
		MaxBadRecords: pulumi.Int(0),
		NullMarker:    pulumi.String("string"),
		DestinationEncryptionConfiguration: &bigquery.JobLoadDestinationEncryptionConfigurationArgs{
			KmsKeyName:    pulumi.String("string"),
			KmsKeyVersion: pulumi.String("string"),
		},
		Autodetect:          pulumi.Bool(false),
		Encoding:            pulumi.String("string"),
		FieldDelimiter:      pulumi.String("string"),
		IgnoreUnknownValues: pulumi.Bool(false),
		JsonExtension:       pulumi.String("string"),
		AllowJaggedRows:     pulumi.Bool(false),
		CreateDisposition:   pulumi.String("string"),
		ParquetOptions: &bigquery.JobLoadParquetOptionsArgs{
			EnableListInference: pulumi.Bool(false),
			EnumAsString:        pulumi.Bool(false),
		},
		ProjectionFields: pulumi.StringArray{
			pulumi.String("string"),
		},
		Quote: pulumi.String("string"),
		SchemaUpdateOptions: pulumi.StringArray{
			pulumi.String("string"),
		},
		SkipLeadingRows:     pulumi.Int(0),
		SourceFormat:        pulumi.String("string"),
		AllowQuotedNewlines: pulumi.Bool(false),
		TimePartitioning: &bigquery.JobLoadTimePartitioningArgs{
			Type:         pulumi.String("string"),
			ExpirationMs: pulumi.String("string"),
			Field:        pulumi.String("string"),
		},
		WriteDisposition: pulumi.String("string"),
	},
	Location: pulumi.String("string"),
	Project:  pulumi.String("string"),
	Query: &bigquery.JobQueryArgs{
		Query:              pulumi.String("string"),
		ParameterMode:      pulumi.String("string"),
		MaximumBytesBilled: pulumi.String("string"),
		DestinationEncryptionConfiguration: &bigquery.JobQueryDestinationEncryptionConfigurationArgs{
			KmsKeyName:    pulumi.String("string"),
			KmsKeyVersion: pulumi.String("string"),
		},
		DestinationTable: &bigquery.JobQueryDestinationTableArgs{
			TableId:   pulumi.String("string"),
			DatasetId: pulumi.String("string"),
			ProjectId: pulumi.String("string"),
		},
		Priority:           pulumi.String("string"),
		MaximumBillingTier: pulumi.Int(0),
		DefaultDataset: &bigquery.JobQueryDefaultDatasetArgs{
			DatasetId: pulumi.String("string"),
			ProjectId: pulumi.String("string"),
		},
		AllowLargeResults: pulumi.Bool(false),
		FlattenResults:    pulumi.Bool(false),
		CreateDisposition: pulumi.String("string"),
		SchemaUpdateOptions: pulumi.StringArray{
			pulumi.String("string"),
		},
		ScriptOptions: &bigquery.JobQueryScriptOptionsArgs{
			KeyResultStatement:  pulumi.String("string"),
			StatementByteBudget: pulumi.String("string"),
			StatementTimeoutMs:  pulumi.String("string"),
		},
		UseLegacySql:  pulumi.Bool(false),
		UseQueryCache: pulumi.Bool(false),
		UserDefinedFunctionResources: bigquery.JobQueryUserDefinedFunctionResourceArray{
			&bigquery.JobQueryUserDefinedFunctionResourceArgs{
				InlineCode:  pulumi.String("string"),
				ResourceUri: pulumi.String("string"),
			},
		},
		WriteDisposition: pulumi.String("string"),
	},
})
var jobResource = new Job("jobResource", JobArgs.builder()
    .jobId("string")
    .copy(JobCopyArgs.builder()
        .sourceTables(JobCopySourceTableArgs.builder()
            .tableId("string")
            .datasetId("string")
            .projectId("string")
            .build())
        .createDisposition("string")
        .destinationEncryptionConfiguration(JobCopyDestinationEncryptionConfigurationArgs.builder()
            .kmsKeyName("string")
            .kmsKeyVersion("string")
            .build())
        .destinationTable(JobCopyDestinationTableArgs.builder()
            .tableId("string")
            .datasetId("string")
            .projectId("string")
            .build())
        .writeDisposition("string")
        .build())
    .extract(JobExtractArgs.builder()
        .destinationUris("string")
        .compression("string")
        .destinationFormat("string")
        .fieldDelimiter("string")
        .printHeader(false)
        .sourceModel(JobExtractSourceModelArgs.builder()
            .datasetId("string")
            .modelId("string")
            .projectId("string")
            .build())
        .sourceTable(JobExtractSourceTableArgs.builder()
            .tableId("string")
            .datasetId("string")
            .projectId("string")
            .build())
        .useAvroLogicalTypes(false)
        .build())
    .jobTimeoutMs("string")
    .labels(Map.of("string", "string"))
    .load(JobLoadArgs.builder()
        .destinationTable(JobLoadDestinationTableArgs.builder()
            .tableId("string")
            .datasetId("string")
            .projectId("string")
            .build())
        .sourceUris("string")
        .maxBadRecords(0)
        .nullMarker("string")
        .destinationEncryptionConfiguration(JobLoadDestinationEncryptionConfigurationArgs.builder()
            .kmsKeyName("string")
            .kmsKeyVersion("string")
            .build())
        .autodetect(false)
        .encoding("string")
        .fieldDelimiter("string")
        .ignoreUnknownValues(false)
        .jsonExtension("string")
        .allowJaggedRows(false)
        .createDisposition("string")
        .parquetOptions(JobLoadParquetOptionsArgs.builder()
            .enableListInference(false)
            .enumAsString(false)
            .build())
        .projectionFields("string")
        .quote("string")
        .schemaUpdateOptions("string")
        .skipLeadingRows(0)
        .sourceFormat("string")
        .allowQuotedNewlines(false)
        .timePartitioning(JobLoadTimePartitioningArgs.builder()
            .type("string")
            .expirationMs("string")
            .field("string")
            .build())
        .writeDisposition("string")
        .build())
    .location("string")
    .project("string")
    .query(JobQueryArgs.builder()
        .query("string")
        .parameterMode("string")
        .maximumBytesBilled("string")
        .destinationEncryptionConfiguration(JobQueryDestinationEncryptionConfigurationArgs.builder()
            .kmsKeyName("string")
            .kmsKeyVersion("string")
            .build())
        .destinationTable(JobQueryDestinationTableArgs.builder()
            .tableId("string")
            .datasetId("string")
            .projectId("string")
            .build())
        .priority("string")
        .maximumBillingTier(0)
        .defaultDataset(JobQueryDefaultDatasetArgs.builder()
            .datasetId("string")
            .projectId("string")
            .build())
        .allowLargeResults(false)
        .flattenResults(false)
        .createDisposition("string")
        .schemaUpdateOptions("string")
        .scriptOptions(JobQueryScriptOptionsArgs.builder()
            .keyResultStatement("string")
            .statementByteBudget("string")
            .statementTimeoutMs("string")
            .build())
        .useLegacySql(false)
        .useQueryCache(false)
        .userDefinedFunctionResources(JobQueryUserDefinedFunctionResourceArgs.builder()
            .inlineCode("string")
            .resourceUri("string")
            .build())
        .writeDisposition("string")
        .build())
    .build());
job_resource = gcp.bigquery.Job("jobResource",
    job_id="string",
    copy={
        "source_tables": [{
            "table_id": "string",
            "dataset_id": "string",
            "project_id": "string",
        }],
        "create_disposition": "string",
        "destination_encryption_configuration": {
            "kms_key_name": "string",
            "kms_key_version": "string",
        },
        "destination_table": {
            "table_id": "string",
            "dataset_id": "string",
            "project_id": "string",
        },
        "write_disposition": "string",
    },
    extract={
        "destination_uris": ["string"],
        "compression": "string",
        "destination_format": "string",
        "field_delimiter": "string",
        "print_header": False,
        "source_model": {
            "dataset_id": "string",
            "model_id": "string",
            "project_id": "string",
        },
        "source_table": {
            "table_id": "string",
            "dataset_id": "string",
            "project_id": "string",
        },
        "use_avro_logical_types": False,
    },
    job_timeout_ms="string",
    labels={
        "string": "string",
    },
    load={
        "destination_table": {
            "table_id": "string",
            "dataset_id": "string",
            "project_id": "string",
        },
        "source_uris": ["string"],
        "max_bad_records": 0,
        "null_marker": "string",
        "destination_encryption_configuration": {
            "kms_key_name": "string",
            "kms_key_version": "string",
        },
        "autodetect": False,
        "encoding": "string",
        "field_delimiter": "string",
        "ignore_unknown_values": False,
        "json_extension": "string",
        "allow_jagged_rows": False,
        "create_disposition": "string",
        "parquet_options": {
            "enable_list_inference": False,
            "enum_as_string": False,
        },
        "projection_fields": ["string"],
        "quote": "string",
        "schema_update_options": ["string"],
        "skip_leading_rows": 0,
        "source_format": "string",
        "allow_quoted_newlines": False,
        "time_partitioning": {
            "type": "string",
            "expiration_ms": "string",
            "field": "string",
        },
        "write_disposition": "string",
    },
    location="string",
    project="string",
    query={
        "query": "string",
        "parameter_mode": "string",
        "maximum_bytes_billed": "string",
        "destination_encryption_configuration": {
            "kms_key_name": "string",
            "kms_key_version": "string",
        },
        "destination_table": {
            "table_id": "string",
            "dataset_id": "string",
            "project_id": "string",
        },
        "priority": "string",
        "maximum_billing_tier": 0,
        "default_dataset": {
            "dataset_id": "string",
            "project_id": "string",
        },
        "allow_large_results": False,
        "flatten_results": False,
        "create_disposition": "string",
        "schema_update_options": ["string"],
        "script_options": {
            "key_result_statement": "string",
            "statement_byte_budget": "string",
            "statement_timeout_ms": "string",
        },
        "use_legacy_sql": False,
        "use_query_cache": False,
        "user_defined_function_resources": [{
            "inline_code": "string",
            "resource_uri": "string",
        }],
        "write_disposition": "string",
    })
const jobResource = new gcp.bigquery.Job("jobResource", {
    jobId: "string",
    copy: {
        sourceTables: [{
            tableId: "string",
            datasetId: "string",
            projectId: "string",
        }],
        createDisposition: "string",
        destinationEncryptionConfiguration: {
            kmsKeyName: "string",
            kmsKeyVersion: "string",
        },
        destinationTable: {
            tableId: "string",
            datasetId: "string",
            projectId: "string",
        },
        writeDisposition: "string",
    },
    extract: {
        destinationUris: ["string"],
        compression: "string",
        destinationFormat: "string",
        fieldDelimiter: "string",
        printHeader: false,
        sourceModel: {
            datasetId: "string",
            modelId: "string",
            projectId: "string",
        },
        sourceTable: {
            tableId: "string",
            datasetId: "string",
            projectId: "string",
        },
        useAvroLogicalTypes: false,
    },
    jobTimeoutMs: "string",
    labels: {
        string: "string",
    },
    load: {
        destinationTable: {
            tableId: "string",
            datasetId: "string",
            projectId: "string",
        },
        sourceUris: ["string"],
        maxBadRecords: 0,
        nullMarker: "string",
        destinationEncryptionConfiguration: {
            kmsKeyName: "string",
            kmsKeyVersion: "string",
        },
        autodetect: false,
        encoding: "string",
        fieldDelimiter: "string",
        ignoreUnknownValues: false,
        jsonExtension: "string",
        allowJaggedRows: false,
        createDisposition: "string",
        parquetOptions: {
            enableListInference: false,
            enumAsString: false,
        },
        projectionFields: ["string"],
        quote: "string",
        schemaUpdateOptions: ["string"],
        skipLeadingRows: 0,
        sourceFormat: "string",
        allowQuotedNewlines: false,
        timePartitioning: {
            type: "string",
            expirationMs: "string",
            field: "string",
        },
        writeDisposition: "string",
    },
    location: "string",
    project: "string",
    query: {
        query: "string",
        parameterMode: "string",
        maximumBytesBilled: "string",
        destinationEncryptionConfiguration: {
            kmsKeyName: "string",
            kmsKeyVersion: "string",
        },
        destinationTable: {
            tableId: "string",
            datasetId: "string",
            projectId: "string",
        },
        priority: "string",
        maximumBillingTier: 0,
        defaultDataset: {
            datasetId: "string",
            projectId: "string",
        },
        allowLargeResults: false,
        flattenResults: false,
        createDisposition: "string",
        schemaUpdateOptions: ["string"],
        scriptOptions: {
            keyResultStatement: "string",
            statementByteBudget: "string",
            statementTimeoutMs: "string",
        },
        useLegacySql: false,
        useQueryCache: false,
        userDefinedFunctionResources: [{
            inlineCode: "string",
            resourceUri: "string",
        }],
        writeDisposition: "string",
    },
});
type: gcp:bigquery:Job
properties:
    copy:
        createDisposition: string
        destinationEncryptionConfiguration:
            kmsKeyName: string
            kmsKeyVersion: string
        destinationTable:
            datasetId: string
            projectId: string
            tableId: string
        sourceTables:
            - datasetId: string
              projectId: string
              tableId: string
        writeDisposition: string
    extract:
        compression: string
        destinationFormat: string
        destinationUris:
            - string
        fieldDelimiter: string
        printHeader: false
        sourceModel:
            datasetId: string
            modelId: string
            projectId: string
        sourceTable:
            datasetId: string
            projectId: string
            tableId: string
        useAvroLogicalTypes: false
    jobId: string
    jobTimeoutMs: string
    labels:
        string: string
    load:
        allowJaggedRows: false
        allowQuotedNewlines: false
        autodetect: false
        createDisposition: string
        destinationEncryptionConfiguration:
            kmsKeyName: string
            kmsKeyVersion: string
        destinationTable:
            datasetId: string
            projectId: string
            tableId: string
        encoding: string
        fieldDelimiter: string
        ignoreUnknownValues: false
        jsonExtension: string
        maxBadRecords: 0
        nullMarker: string
        parquetOptions:
            enableListInference: false
            enumAsString: false
        projectionFields:
            - string
        quote: string
        schemaUpdateOptions:
            - string
        skipLeadingRows: 0
        sourceFormat: string
        sourceUris:
            - string
        timePartitioning:
            expirationMs: string
            field: string
            type: string
        writeDisposition: string
    location: string
    project: string
    query:
        allowLargeResults: false
        createDisposition: string
        defaultDataset:
            datasetId: string
            projectId: string
        destinationEncryptionConfiguration:
            kmsKeyName: string
            kmsKeyVersion: string
        destinationTable:
            datasetId: string
            projectId: string
            tableId: string
        flattenResults: false
        maximumBillingTier: 0
        maximumBytesBilled: string
        parameterMode: string
        priority: string
        query: string
        schemaUpdateOptions:
            - string
        scriptOptions:
            keyResultStatement: string
            statementByteBudget: string
            statementTimeoutMs: string
        useLegacySql: false
        useQueryCache: false
        userDefinedFunctionResources:
            - inlineCode: string
              resourceUri: string
        writeDisposition: string
Job Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The Job resource accepts the following input properties:
- JobId string
- The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
- Copy
JobCopy 
- Copies a table.
- Extract
JobExtract 
- Configures an extract job.
- JobTimeout stringMs 
- Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
- Labels Dictionary<string, string>
- The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- Load
JobLoad 
- Configures a load job.
- Location string
- Specifies where the error occurred, if present.
- Project string
- Query
JobQuery 
- Configures a query job.
- JobId string
- The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
- Copy
JobCopy Args 
- Copies a table.
- Extract
JobExtract Args 
- Configures an extract job.
- JobTimeout stringMs 
- Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
- Labels map[string]string
- The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- Load
JobLoad Args 
- Configures a load job.
- Location string
- Specifies where the error occurred, if present.
- Project string
- Query
JobQuery Args 
- Configures a query job.
- jobId String
- The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
- copy
JobCopy 
- Copies a table.
- extract
JobExtract 
- Configures an extract job.
- jobTimeout StringMs 
- Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
- labels Map<String,String>
- The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- load
JobLoad 
- Configures a load job.
- location String
- Specifies where the error occurred, if present.
- project String
- query
JobQuery 
- Configures a query job.
- jobId string
- The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
- copy
JobCopy 
- Copies a table.
- extract
JobExtract 
- Configures an extract job.
- jobTimeout stringMs 
- Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
- labels {[key: string]: string}
- The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- load
JobLoad 
- Configures a load job.
- location string
- Specifies where the error occurred, if present.
- project string
- query
JobQuery 
- Configures a query job.
- job_id str
- The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
- copy
JobCopy Args 
- Copies a table.
- extract
JobExtract Args 
- Configures an extract job.
- job_timeout_ strms 
- Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
- labels Mapping[str, str]
- The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- load
JobLoad Args 
- Configures a load job.
- location str
- Specifies where the error occurred, if present.
- project str
- query
JobQuery Args 
- Configures a query job.
- jobId String
- The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
- copy Property Map
- Copies a table.
- extract Property Map
- Configures an extract job.
- jobTimeout StringMs 
- Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
- labels Map<String>
- The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- load Property Map
- Configures a load job.
- location String
- Specifies where the error occurred, if present.
- project String
- query Property Map
- Configures a query job.
Outputs
All input properties are implicitly available as output properties. Additionally, the Job resource produces the following output properties:
- EffectiveLabels Dictionary<string, string>
- (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- Id string
- The provider-assigned unique ID for this managed resource.
- JobType string
- (Output) The type of the job.
- PulumiLabels Dictionary<string, string>
- (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
- Statuses
List<JobStatus> 
- The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
- UserEmail string
- Email address of the user who ran the job.
- EffectiveLabels map[string]string
- (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- Id string
- The provider-assigned unique ID for this managed resource.
- JobType string
- (Output) The type of the job.
- PulumiLabels map[string]string
- (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
- Statuses
[]JobStatus 
- The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
- UserEmail string
- Email address of the user who ran the job.
- effectiveLabels Map<String,String>
- (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- id String
- The provider-assigned unique ID for this managed resource.
- jobType String
- (Output) The type of the job.
- pulumiLabels Map<String,String>
- (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
- statuses
List<JobStatus> 
- The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
- userEmail String
- Email address of the user who ran the job.
- effectiveLabels {[key: string]: string}
- (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- id string
- The provider-assigned unique ID for this managed resource.
- jobType string
- (Output) The type of the job.
- pulumiLabels {[key: string]: string}
- (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
- statuses
JobStatus[] 
- The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
- userEmail string
- Email address of the user who ran the job.
- effective_labels Mapping[str, str]
- (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- id str
- The provider-assigned unique ID for this managed resource.
- job_type str
- (Output) The type of the job.
- pulumi_labels Mapping[str, str]
- (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
- statuses
Sequence[JobStatus] 
- The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
- user_email str
- Email address of the user who ran the job.
- effectiveLabels Map<String>
- (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- id String
- The provider-assigned unique ID for this managed resource.
- jobType String
- (Output) The type of the job.
- pulumiLabels Map<String>
- (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
- statuses List<Property Map>
- The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
- userEmail String
- Email address of the user who ran the job.
Look up Existing Job Resource
Get an existing Job resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: JobState, opts?: CustomResourceOptions): Job@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        copy: Optional[JobCopyArgs] = None,
        effective_labels: Optional[Mapping[str, str]] = None,
        extract: Optional[JobExtractArgs] = None,
        job_id: Optional[str] = None,
        job_timeout_ms: Optional[str] = None,
        job_type: Optional[str] = None,
        labels: Optional[Mapping[str, str]] = None,
        load: Optional[JobLoadArgs] = None,
        location: Optional[str] = None,
        project: Optional[str] = None,
        pulumi_labels: Optional[Mapping[str, str]] = None,
        query: Optional[JobQueryArgs] = None,
        statuses: Optional[Sequence[JobStatusArgs]] = None,
        user_email: Optional[str] = None) -> Jobfunc GetJob(ctx *Context, name string, id IDInput, state *JobState, opts ...ResourceOption) (*Job, error)public static Job Get(string name, Input<string> id, JobState? state, CustomResourceOptions? opts = null)public static Job get(String name, Output<String> id, JobState state, CustomResourceOptions options)resources:  _:    type: gcp:bigquery:Job    get:      id: ${id}- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Copy
JobCopy 
- Copies a table.
- EffectiveLabels Dictionary<string, string>
- (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- Extract
JobExtract 
- Configures an extract job.
- JobId string
- The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
- JobTimeout stringMs 
- Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
- JobType string
- (Output) The type of the job.
- Labels Dictionary<string, string>
- The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- Load
JobLoad 
- Configures a load job.
- Location string
- Specifies where the error occurred, if present.
- Project string
- PulumiLabels Dictionary<string, string>
- (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
- Query
JobQuery 
- Configures a query job.
- Statuses
List<JobStatus> 
- The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
- UserEmail string
- Email address of the user who ran the job.
- Copy
JobCopy Args 
- Copies a table.
- EffectiveLabels map[string]string
- (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- Extract
JobExtract Args 
- Configures an extract job.
- JobId string
- The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
- JobTimeout stringMs 
- Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
- JobType string
- (Output) The type of the job.
- Labels map[string]string
- The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- Load
JobLoad Args 
- Configures a load job.
- Location string
- Specifies where the error occurred, if present.
- Project string
- PulumiLabels map[string]string
- (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
- Query
JobQuery Args 
- Configures a query job.
- Statuses
[]JobStatus Args 
- The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
- UserEmail string
- Email address of the user who ran the job.
- copy
JobCopy 
- Copies a table.
- effectiveLabels Map<String,String>
- (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- extract
JobExtract 
- Configures an extract job.
- jobId String
- The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
- jobTimeout StringMs 
- Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
- jobType String
- (Output) The type of the job.
- labels Map<String,String>
- The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- load
JobLoad 
- Configures a load job.
- location String
- Specifies where the error occurred, if present.
- project String
- pulumiLabels Map<String,String>
- (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
- query
JobQuery 
- Configures a query job.
- statuses
List<JobStatus> 
- The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
- userEmail String
- Email address of the user who ran the job.
- copy
JobCopy 
- Copies a table.
- effectiveLabels {[key: string]: string}
- (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- extract
JobExtract 
- Configures an extract job.
- jobId string
- The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
- jobTimeout stringMs 
- Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
- jobType string
- (Output) The type of the job.
- labels {[key: string]: string}
- The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- load
JobLoad 
- Configures a load job.
- location string
- Specifies where the error occurred, if present.
- project string
- pulumiLabels {[key: string]: string}
- (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
- query
JobQuery 
- Configures a query job.
- statuses
JobStatus[] 
- The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
- userEmail string
- Email address of the user who ran the job.
- copy
JobCopy Args 
- Copies a table.
- effective_labels Mapping[str, str]
- (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- extract
JobExtract Args 
- Configures an extract job.
- job_id str
- The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
- job_timeout_ strms 
- Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
- job_type str
- (Output) The type of the job.
- labels Mapping[str, str]
- The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- load
JobLoad Args 
- Configures a load job.
- location str
- Specifies where the error occurred, if present.
- project str
- pulumi_labels Mapping[str, str]
- (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
- query
JobQuery Args 
- Configures a query job.
- statuses
Sequence[JobStatus Args] 
- The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
- user_email str
- Email address of the user who ran the job.
- copy Property Map
- Copies a table.
- effectiveLabels Map<String>
- (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- extract Property Map
- Configures an extract job.
- jobId String
- The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
- jobTimeout StringMs 
- Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
- jobType String
- (Output) The type of the job.
- labels Map<String>
- The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- load Property Map
- Configures a load job.
- location String
- Specifies where the error occurred, if present.
- project String
- pulumiLabels Map<String>
- (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
- query Property Map
- Configures a query job.
- statuses List<Property Map>
- The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
- userEmail String
- Email address of the user who ran the job.
Supporting Types
JobCopy, JobCopyArgs    
- SourceTables List<JobCopy Source Table> 
- Source tables to copy. Structure is documented below.
- CreateDisposition string
- Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is CREATE_IF_NEEDED. Possible values are:CREATE_IF_NEEDED,CREATE_NEVER.
- DestinationEncryption JobConfiguration Copy Destination Encryption Configuration 
- Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- DestinationTable JobCopy Destination Table 
- The destination table. Structure is documented below.
- WriteDisposition string
- Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is WRITE_EMPTY. Possible values are:WRITE_TRUNCATE,WRITE_APPEND,WRITE_EMPTY.
- SourceTables []JobCopy Source Table 
- Source tables to copy. Structure is documented below.
- CreateDisposition string
- Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is CREATE_IF_NEEDED. Possible values are:CREATE_IF_NEEDED,CREATE_NEVER.
- DestinationEncryption JobConfiguration Copy Destination Encryption Configuration 
- Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- DestinationTable JobCopy Destination Table 
- The destination table. Structure is documented below.
- WriteDisposition string
- Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is WRITE_EMPTY. Possible values are:WRITE_TRUNCATE,WRITE_APPEND,WRITE_EMPTY.
- sourceTables List<JobCopy Source Table> 
- Source tables to copy. Structure is documented below.
- createDisposition String
- Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is CREATE_IF_NEEDED. Possible values are:CREATE_IF_NEEDED,CREATE_NEVER.
- destinationEncryption JobConfiguration Copy Destination Encryption Configuration 
- Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- destinationTable JobCopy Destination Table 
- The destination table. Structure is documented below.
- writeDisposition String
- Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is WRITE_EMPTY. Possible values are:WRITE_TRUNCATE,WRITE_APPEND,WRITE_EMPTY.
- sourceTables JobCopy Source Table[] 
- Source tables to copy. Structure is documented below.
- createDisposition string
- Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is CREATE_IF_NEEDED. Possible values are:CREATE_IF_NEEDED,CREATE_NEVER.
- destinationEncryption JobConfiguration Copy Destination Encryption Configuration 
- Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- destinationTable JobCopy Destination Table 
- The destination table. Structure is documented below.
- writeDisposition string
- Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is WRITE_EMPTY. Possible values are:WRITE_TRUNCATE,WRITE_APPEND,WRITE_EMPTY.
- source_tables Sequence[JobCopy Source Table] 
- Source tables to copy. Structure is documented below.
- create_disposition str
- Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is CREATE_IF_NEEDED. Possible values are:CREATE_IF_NEEDED,CREATE_NEVER.
- destination_encryption_ Jobconfiguration Copy Destination Encryption Configuration 
- Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- destination_table JobCopy Destination Table 
- The destination table. Structure is documented below.
- write_disposition str
- Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is WRITE_EMPTY. Possible values are:WRITE_TRUNCATE,WRITE_APPEND,WRITE_EMPTY.
- sourceTables List<Property Map>
- Source tables to copy. Structure is documented below.
- createDisposition String
- Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is CREATE_IF_NEEDED. Possible values are:CREATE_IF_NEEDED,CREATE_NEVER.
- destinationEncryption Property MapConfiguration 
- Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- destinationTable Property Map
- The destination table. Structure is documented below.
- writeDisposition String
- Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is WRITE_EMPTY. Possible values are:WRITE_TRUNCATE,WRITE_APPEND,WRITE_EMPTY.
JobCopyDestinationEncryptionConfiguration, JobCopyDestinationEncryptionConfigurationArgs          
- KmsKey stringName 
- Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- KmsKey stringVersion 
- (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
- KmsKey stringName 
- Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- KmsKey stringVersion 
- (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
- kmsKey StringName 
- Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- kmsKey StringVersion 
- (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
- kmsKey stringName 
- Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- kmsKey stringVersion 
- (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
- kms_key_ strname 
- Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- kms_key_ strversion 
- (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
- kmsKey StringName 
- Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- kmsKey StringVersion 
- (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
JobCopyDestinationTable, JobCopyDestinationTableArgs        
- TableId string
- The table. Can be specified {{table_id}}ifproject_idanddataset_idare also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}if not.
- DatasetId string
- The ID of the dataset containing this table.
- ProjectId string
- The ID of the project containing this table.
- TableId string
- The table. Can be specified {{table_id}}ifproject_idanddataset_idare also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}if not.
- DatasetId string
- The ID of the dataset containing this table.
- ProjectId string
- The ID of the project containing this table.
- tableId String
- The table. Can be specified {{table_id}}ifproject_idanddataset_idare also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}if not.
- datasetId String
- The ID of the dataset containing this table.
- projectId String
- The ID of the project containing this table.
- tableId string
- The table. Can be specified {{table_id}}ifproject_idanddataset_idare also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}if not.
- datasetId string
- The ID of the dataset containing this table.
- projectId string
- The ID of the project containing this table.
- table_id str
- The table. Can be specified {{table_id}}ifproject_idanddataset_idare also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}if not.
- dataset_id str
- The ID of the dataset containing this table.
- project_id str
- The ID of the project containing this table.
- tableId String
- The table. Can be specified {{table_id}}ifproject_idanddataset_idare also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}if not.
- datasetId String
- The ID of the dataset containing this table.
- projectId String
- The ID of the project containing this table.
JobCopySourceTable, JobCopySourceTableArgs        
- TableId string
- The table. Can be specified {{table_id}}ifproject_idanddataset_idare also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}if not.
- DatasetId string
- The ID of the dataset containing this table.
- ProjectId string
- The ID of the project containing this table.
- TableId string
- The table. Can be specified {{table_id}}ifproject_idanddataset_idare also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}if not.
- DatasetId string
- The ID of the dataset containing this table.
- ProjectId string
- The ID of the project containing this table.
- tableId String
- The table. Can be specified {{table_id}}ifproject_idanddataset_idare also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}if not.
- datasetId String
- The ID of the dataset containing this table.
- projectId String
- The ID of the project containing this table.
- tableId string
- The table. Can be specified {{table_id}}ifproject_idanddataset_idare also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}if not.
- datasetId string
- The ID of the dataset containing this table.
- projectId string
- The ID of the project containing this table.
- table_id str
- The table. Can be specified {{table_id}}ifproject_idanddataset_idare also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}if not.
- dataset_id str
- The ID of the dataset containing this table.
- project_id str
- The ID of the project containing this table.
- tableId String
- The table. Can be specified {{table_id}}ifproject_idanddataset_idare also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}if not.
- datasetId String
- The ID of the dataset containing this table.
- projectId String
- The ID of the project containing this table.
JobExtract, JobExtractArgs    
- DestinationUris List<string>
- A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.
- Compression string
- The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.
- DestinationFormat string
- The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is SAVED_MODEL.
- FieldDelimiter string
- When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. Default is ','
- PrintHeader bool
- Whether to print out a header row in the results. Default is true.
- SourceModel JobExtract Source Model 
- A reference to the model being exported. Structure is documented below.
- SourceTable JobExtract Source Table 
- A reference to the table being exported. Structure is documented below.
- UseAvro boolLogical Types 
- Whether to use logical types when extracting to AVRO format.
- DestinationUris []string
- A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.
- Compression string
- The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.
- DestinationFormat string
- The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is SAVED_MODEL.
- FieldDelimiter string
- When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. Default is ','
- PrintHeader bool
- Whether to print out a header row in the results. Default is true.
- SourceModel JobExtract Source Model 
- A reference to the model being exported. Structure is documented below.
- SourceTable JobExtract Source Table 
- A reference to the table being exported. Structure is documented below.
- UseAvro boolLogical Types 
- Whether to use logical types when extracting to AVRO format.
- destinationUris List<String>
- A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.
- compression String
- The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.
- destinationFormat String
- The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is SAVED_MODEL.
- fieldDelimiter String
- When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. Default is ','
- printHeader Boolean
- Whether to print out a header row in the results. Default is true.
- sourceModel JobExtract Source Model 
- A reference to the model being exported. Structure is documented below.
- sourceTable JobExtract Source Table 
- A reference to the table being exported. Structure is documented below.
- useAvro BooleanLogical Types 
- Whether to use logical types when extracting to AVRO format.
- destinationUris string[]
- A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.
- compression string
- The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.
- destinationFormat string
- The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is SAVED_MODEL.
- fieldDelimiter string
- When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. Default is ','
- printHeader boolean
- Whether to print out a header row in the results. Default is true.
- sourceModel JobExtract Source Model 
- A reference to the model being exported. Structure is documented below.
- sourceTable JobExtract Source Table 
- A reference to the table being exported. Structure is documented below.
- useAvro booleanLogical Types 
- Whether to use logical types when extracting to AVRO format.
- destination_uris Sequence[str]
- A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.
- compression str
- The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.
- destination_format str
- The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is SAVED_MODEL.
- field_delimiter str
- When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. Default is ','
- print_header bool
- Whether to print out a header row in the results. Default is true.
- source_model JobExtract Source Model 
- A reference to the model being exported. Structure is documented below.
- source_table JobExtract Source Table 
- A reference to the table being exported. Structure is documented below.
- use_avro_ boollogical_ types 
- Whether to use logical types when extracting to AVRO format.
- destinationUris List<String>
- A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.
- compression String
- The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.
- destinationFormat String
- The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is SAVED_MODEL.
- fieldDelimiter String
- When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. Default is ','
- printHeader Boolean
- Whether to print out a header row in the results. Default is true.
- sourceModel Property Map
- A reference to the model being exported. Structure is documented below.
- sourceTable Property Map
- A reference to the table being exported. Structure is documented below.
- useAvro BooleanLogical Types 
- Whether to use logical types when extracting to AVRO format.
JobExtractSourceModel, JobExtractSourceModelArgs        
- dataset_id str
- The ID of the dataset containing this model.
- model_id str
- The ID of the model.
- project_id str
- The ID of the project containing this model.
JobExtractSourceTable, JobExtractSourceTableArgs        
- TableId string
- The table. Can be specified {{table_id}}ifproject_idanddataset_idare also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}if not.
- DatasetId string
- The ID of the dataset containing this table.
- ProjectId string
- The ID of the project containing this table.
- TableId string
- The table. Can be specified {{table_id}}ifproject_idanddataset_idare also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}if not.
- DatasetId string
- The ID of the dataset containing this table.
- ProjectId string
- The ID of the project containing this table.
- tableId String
- The table. Can be specified {{table_id}}ifproject_idanddataset_idare also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}if not.
- datasetId String
- The ID of the dataset containing this table.
- projectId String
- The ID of the project containing this table.
- tableId string
- The table. Can be specified {{table_id}}ifproject_idanddataset_idare also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}if not.
- datasetId string
- The ID of the dataset containing this table.
- projectId string
- The ID of the project containing this table.
- table_id str
- The table. Can be specified {{table_id}}ifproject_idanddataset_idare also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}if not.
- dataset_id str
- The ID of the dataset containing this table.
- project_id str
- The ID of the project containing this table.
- tableId String
- The table. Can be specified {{table_id}}ifproject_idanddataset_idare also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}if not.
- datasetId String
- The ID of the dataset containing this table.
- projectId String
- The ID of the project containing this table.
JobLoad, JobLoadArgs    
- DestinationTable JobLoad Destination Table 
- The destination table to load the data into. Structure is documented below.
- SourceUris List<string>
- The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed.
- AllowJagged boolRows 
- Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.
- AllowQuoted boolNewlines 
- Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
- Autodetect bool
- Indicates if we should automatically infer the options and schema for CSV and JSON sources.
- CreateDisposition string
- Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is CREATE_IF_NEEDED. Possible values are:CREATE_IF_NEEDED,CREATE_NEVER.
- DestinationEncryption JobConfiguration Load Destination Encryption Configuration 
- Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- Encoding string
- The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
- FieldDelimiter string
- The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
- IgnoreUnknown boolValues 
- Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
- JsonExtension string
- If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON.
- MaxBad intRecords 
- The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
- NullMarker string
- Specifies a string that represents a null value in a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value.
- ParquetOptions JobLoad Parquet Options 
- Parquet Options for load and make external tables. Structure is documented below.
- ProjectionFields List<string>
- If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.
- Quote string
- The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
- SchemaUpdate List<string>Options 
- Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
- SkipLeading intRows 
- The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema.
- SourceFormat string
- The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". The default value is CSV.
- TimePartitioning JobLoad Time Partitioning 
- Time-based partitioning specification for the destination table. Structure is documented below.
- WriteDisposition string
- Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is WRITE_EMPTY. Possible values are:WRITE_TRUNCATE,WRITE_APPEND,WRITE_EMPTY.
- DestinationTable JobLoad Destination Table 
- The destination table to load the data into. Structure is documented below.
- SourceUris []string
- The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed.
- AllowJagged boolRows 
- Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.
- AllowQuoted boolNewlines 
- Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
- Autodetect bool
- Indicates if we should automatically infer the options and schema for CSV and JSON sources.
- CreateDisposition string
- Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is CREATE_IF_NEEDED. Possible values are:CREATE_IF_NEEDED,CREATE_NEVER.
- DestinationEncryption JobConfiguration Load Destination Encryption Configuration 
- Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- Encoding string
- The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
- FieldDelimiter string
- The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
- IgnoreUnknown boolValues 
- Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
- JsonExtension string
- If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON.
- MaxBad intRecords 
- The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
- NullMarker string
- Specifies a string that represents a null value in a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value.
- ParquetOptions JobLoad Parquet Options 
- Parquet Options for load and make external tables. Structure is documented below.
- ProjectionFields []string
- If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.
- Quote string
- The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
- SchemaUpdate []stringOptions 
- Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
- SkipLeading intRows 
- The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema.
- SourceFormat string
- The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". The default value is CSV.
- TimePartitioning JobLoad Time Partitioning 
- Time-based partitioning specification for the destination table. Structure is documented below.
- WriteDisposition string
- Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is WRITE_EMPTY. Possible values are:WRITE_TRUNCATE,WRITE_APPEND,WRITE_EMPTY.
- destinationTable JobLoad Destination Table 
- The destination table to load the data into. Structure is documented below.
- sourceUris List<String>
- The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed.
- allowJagged BooleanRows 
- Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.
- allowQuoted BooleanNewlines 
- Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
- autodetect Boolean
- Indicates if we should automatically infer the options and schema for CSV and JSON sources.
- createDisposition String
- Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is CREATE_IF_NEEDED. Possible values are:CREATE_IF_NEEDED,CREATE_NEVER.
- destinationEncryption JobConfiguration Load Destination Encryption Configuration 
- Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- encoding String
- The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
- fieldDelimiter String
- The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
- ignoreUnknown BooleanValues 
- Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
- jsonExtension String
- If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON.
- maxBad IntegerRecords 
- The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
- nullMarker String
- Specifies a string that represents a null value in a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value.
- parquetOptions JobLoad Parquet Options 
- Parquet Options for load and make external tables. Structure is documented below.
- projectionFields List<String>
- If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.
- quote String
- The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
- schemaUpdate List<String>Options 
- Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
- skipLeading IntegerRows 
- The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema.
- sourceFormat String
- The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". The default value is CSV.
- timePartitioning JobLoad Time Partitioning 
- Time-based partitioning specification for the destination table. Structure is documented below.
- writeDisposition String
- Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is WRITE_EMPTY. Possible values are:WRITE_TRUNCATE,WRITE_APPEND,WRITE_EMPTY.
- destinationTable JobLoad Destination Table 
- The destination table to load the data into. Structure is documented below.
- sourceUris string[]
- The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed.
- allowJagged booleanRows 
- Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.
- allowQuoted booleanNewlines 
- Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
- autodetect boolean
- Indicates if we should automatically infer the options and schema for CSV and JSON sources.
- createDisposition string
- Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is CREATE_IF_NEEDED. Possible values are:CREATE_IF_NEEDED,CREATE_NEVER.
- destinationEncryption JobConfiguration Load Destination Encryption Configuration 
- Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- encoding string
- The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
- fieldDelimiter string
- The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
- ignoreUnknown booleanValues 
- Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
- jsonExtension string
- If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON.
- maxBad numberRecords 
- The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
- nullMarker string
- Specifies a string that represents a null value in a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value.
- parquetOptions JobLoad Parquet Options 
- Parquet Options for load and make external tables. Structure is documented below.
- projectionFields string[]
- If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.
- quote string
- The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
- schemaUpdate string[]Options 
- Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
- skipLeading numberRows 
- The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema.
- sourceFormat string
- The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". The default value is CSV.
- timePartitioning JobLoad Time Partitioning 
- Time-based partitioning specification for the destination table. Structure is documented below.
- writeDisposition string
- Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is WRITE_EMPTY. Possible values are:WRITE_TRUNCATE,WRITE_APPEND,WRITE_EMPTY.
- destination_table JobLoad Destination Table 
- The destination table to load the data into. Structure is documented below.
- source_uris Sequence[str]
- The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed.
- allow_jagged_ boolrows 
- Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.
- allow_quoted_ boolnewlines 
- Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
- autodetect bool
- Indicates if we should automatically infer the options and schema for CSV and JSON sources.
- create_disposition str
- Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is CREATE_IF_NEEDED. Possible values are:CREATE_IF_NEEDED,CREATE_NEVER.
- destination_encryption_ Jobconfiguration Load Destination Encryption Configuration 
- Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- encoding str
- The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
- field_delimiter str
- The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
- ignore_unknown_ boolvalues 
- Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
- json_extension str
- If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON.
- max_bad_ intrecords 
- The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
- null_marker str
- Specifies a string that represents a null value in a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value.
- parquet_options JobLoad Parquet Options 
- Parquet Options for load and make external tables. Structure is documented below.
- projection_fields Sequence[str]
- If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.
- quote str
- The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
- schema_update_ Sequence[str]options 
- Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
- skip_leading_ introws 
- The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema.
- source_format str
- The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". The default value is CSV.
- time_partitioning JobLoad Time Partitioning 
- Time-based partitioning specification for the destination table. Structure is documented below.
- write_disposition str
- Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is WRITE_EMPTY. Possible values are:WRITE_TRUNCATE,WRITE_APPEND,WRITE_EMPTY.
- destinationTable Property Map
- The destination table to load the data into. Structure is documented below.
- sourceUris List<String>
- The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed.
- allowJagged BooleanRows 
- Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.
- allowQuoted BooleanNewlines 
- Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
- autodetect Boolean
- Indicates if we should automatically infer the options and schema for CSV and JSON sources.
- createDisposition String
- Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is CREATE_IF_NEEDED. Possible values are:CREATE_IF_NEEDED,CREATE_NEVER.
- destinationEncryption Property MapConfiguration 
- Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- encoding String
- The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
- fieldDelimiter String
- The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
- ignoreUnknown BooleanValues 
- Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
- jsonExtension String
- If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON.
- maxBad NumberRecords 
- The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
- nullMarker String
- Specifies a string that represents a null value in a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value.
- parquetOptions Property Map
- Parquet Options for load and make external tables. Structure is documented below.
- projectionFields List<String>
- If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.
- quote String
- The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
- schemaUpdate List<String>Options 
- Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
- skipLeading NumberRows 
- The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema.
- sourceFormat String
- The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". The default value is CSV.
- timePartitioning Property Map
- Time-based partitioning specification for the destination table. Structure is documented below.
- writeDisposition String
- Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is WRITE_EMPTY. Possible values are:WRITE_TRUNCATE,WRITE_APPEND,WRITE_EMPTY.
JobLoadDestinationEncryptionConfiguration, JobLoadDestinationEncryptionConfigurationArgs          
- KmsKey stringName 
- Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- KmsKey stringVersion 
- (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
- KmsKey stringName 
- Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- KmsKey stringVersion 
- (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
- kmsKey StringName 
- Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- kmsKey StringVersion 
- (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
- kmsKey stringName 
- Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- kmsKey stringVersion 
- (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
- kms_key_ strname 
- Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- kms_key_ strversion 
- (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
- kmsKey StringName 
- Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- kmsKey StringVersion 
- (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
JobLoadDestinationTable, JobLoadDestinationTableArgs        
- TableId string
- The table. Can be specified {{table_id}}ifproject_idanddataset_idare also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}if not.
- DatasetId string
- The ID of the dataset containing this table.
- ProjectId string
- The ID of the project containing this table.
- TableId string
- The table. Can be specified {{table_id}}ifproject_idanddataset_idare also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}if not.
- DatasetId string
- The ID of the dataset containing this table.
- ProjectId string
- The ID of the project containing this table.
- tableId String
- The table. Can be specified {{table_id}}ifproject_idanddataset_idare also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}if not.
- datasetId String
- The ID of the dataset containing this table.
- projectId String
- The ID of the project containing this table.
- tableId string
- The table. Can be specified {{table_id}}ifproject_idanddataset_idare also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}if not.
- datasetId string
- The ID of the dataset containing this table.
- projectId string
- The ID of the project containing this table.
- table_id str
- The table. Can be specified {{table_id}}ifproject_idanddataset_idare also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}if not.
- dataset_id str
- The ID of the dataset containing this table.
- project_id str
- The ID of the project containing this table.
- tableId String
- The table. Can be specified {{table_id}}ifproject_idanddataset_idare also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}if not.
- datasetId String
- The ID of the dataset containing this table.
- projectId String
- The ID of the project containing this table.
JobLoadParquetOptions, JobLoadParquetOptionsArgs        
- EnableList boolInference 
- If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type.
- EnumAs boolString 
- If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
- EnableList boolInference 
- If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type.
- EnumAs boolString 
- If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
- enableList BooleanInference 
- If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type.
- enumAs BooleanString 
- If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
- enableList booleanInference 
- If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type.
- enumAs booleanString 
- If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
- enable_list_ boolinference 
- If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type.
- enum_as_ boolstring 
- If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
- enableList BooleanInference 
- If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type.
- enumAs BooleanString 
- If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
JobLoadTimePartitioning, JobLoadTimePartitioningArgs        
- Type string
- The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, but in OnePlatform the field will be treated as unset.
- ExpirationMs string
- Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value.
- Field string
- If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. A wrapper is used here because an empty string is an invalid value.
- Type string
- The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, but in OnePlatform the field will be treated as unset.
- ExpirationMs string
- Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value.
- Field string
- If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. A wrapper is used here because an empty string is an invalid value.
- type String
- The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, but in OnePlatform the field will be treated as unset.
- expirationMs String
- Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value.
- field String
- If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. A wrapper is used here because an empty string is an invalid value.
- type string
- The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, but in OnePlatform the field will be treated as unset.
- expirationMs string
- Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value.
- field string
- If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. A wrapper is used here because an empty string is an invalid value.
- type str
- The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, but in OnePlatform the field will be treated as unset.
- expiration_ms str
- Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value.
- field str
- If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. A wrapper is used here because an empty string is an invalid value.
- type String
- The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, but in OnePlatform the field will be treated as unset.
- expirationMs String
- Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value.
- field String
- If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. A wrapper is used here because an empty string is an invalid value.
JobQuery, JobQueryArgs    
- Query string
- SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL.
NOTE: queries containing DML language
(DELETE,UPDATE,MERGE,INSERT) must specifycreate_disposition = ""andwrite_disposition = "".
- AllowLarge boolResults 
- If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. However, you must still set destinationTable when result size exceeds the allowed maximum response size.
- CreateDisposition string
- Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is CREATE_IF_NEEDED. Possible values are:CREATE_IF_NEEDED,CREATE_NEVER.
- DefaultDataset JobQuery Default Dataset 
- Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below.
- DestinationEncryption JobConfiguration Query Destination Encryption Configuration 
- Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- DestinationTable JobQuery Destination Table 
- Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below.
- FlattenResults bool
- If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened.
- MaximumBilling intTier 
- Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default.
- MaximumBytes stringBilled 
- Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.
- ParameterMode string
- Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.
- Priority string
- Specifies a priority for the query.
Default value is INTERACTIVE. Possible values are:INTERACTIVE,BATCH.
- SchemaUpdate List<string>Options 
- Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
- ScriptOptions JobQuery Script Options 
- Options controlling the execution of scripts. Structure is documented below.
- UseLegacy boolSql 
- Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL.
- UseQuery boolCache 
- Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true.
- UserDefined List<JobFunction Resources Query User Defined Function Resource> 
- Describes user-defined function resources used in the query. Structure is documented below.
- WriteDisposition string
- Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is WRITE_EMPTY. Possible values are:WRITE_TRUNCATE,WRITE_APPEND,WRITE_EMPTY.
- Query string
- SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL.
NOTE: queries containing DML language
(DELETE,UPDATE,MERGE,INSERT) must specifycreate_disposition = ""andwrite_disposition = "".
- AllowLarge boolResults 
- If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. However, you must still set destinationTable when result size exceeds the allowed maximum response size.
- CreateDisposition string
- Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is CREATE_IF_NEEDED. Possible values are:CREATE_IF_NEEDED,CREATE_NEVER.
- DefaultDataset JobQuery Default Dataset 
- Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below.
- DestinationEncryption JobConfiguration Query Destination Encryption Configuration 
- Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- DestinationTable JobQuery Destination Table 
- Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below.
- FlattenResults bool
- If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened.
- MaximumBilling intTier 
- Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default.
- MaximumBytes stringBilled 
- Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.
- ParameterMode string
- Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.
- Priority string
- Specifies a priority for the query.
Default value is INTERACTIVE. Possible values are:INTERACTIVE,BATCH.
- SchemaUpdate []stringOptions 
- Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
- ScriptOptions JobQuery Script Options 
- Options controlling the execution of scripts. Structure is documented below.
- UseLegacy boolSql 
- Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL.
- UseQuery boolCache 
- Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true.
- UserDefined []JobFunction Resources Query User Defined Function Resource 
- Describes user-defined function resources used in the query. Structure is documented below.
- WriteDisposition string
- Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is WRITE_EMPTY. Possible values are:WRITE_TRUNCATE,WRITE_APPEND,WRITE_EMPTY.
- query String
- SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL.
NOTE: queries containing DML language
(DELETE,UPDATE,MERGE,INSERT) must specifycreate_disposition = ""andwrite_disposition = "".
- allowLarge BooleanResults 
- If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. However, you must still set destinationTable when result size exceeds the allowed maximum response size.
- createDisposition String
- Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is CREATE_IF_NEEDED. Possible values are:CREATE_IF_NEEDED,CREATE_NEVER.
- defaultDataset JobQuery Default Dataset 
- Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below.
- destinationEncryption JobConfiguration Query Destination Encryption Configuration 
- Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- destinationTable JobQuery Destination Table 
- Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below.
- flattenResults Boolean
- If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened.
- maximumBilling IntegerTier 
- Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default.
- maximumBytes StringBilled 
- Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.
- parameterMode String
- Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.
- priority String
- Specifies a priority for the query.
Default value is INTERACTIVE. Possible values are:INTERACTIVE,BATCH.
- schemaUpdate List<String>Options 
- Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
- scriptOptions JobQuery Script Options 
- Options controlling the execution of scripts. Structure is documented below.
- useLegacy BooleanSql 
- Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL.
- useQuery BooleanCache 
- Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true.
- userDefined List<JobFunction Resources Query User Defined Function Resource> 
- Describes user-defined function resources used in the query. Structure is documented below.
- writeDisposition String
- Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is WRITE_EMPTY. Possible values are:WRITE_TRUNCATE,WRITE_APPEND,WRITE_EMPTY.
- query string
- SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL.
NOTE: queries containing DML language
(DELETE,UPDATE,MERGE,INSERT) must specifycreate_disposition = ""andwrite_disposition = "".
- allowLarge booleanResults 
- If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. However, you must still set destinationTable when result size exceeds the allowed maximum response size.
- createDisposition string
- Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is CREATE_IF_NEEDED. Possible values are:CREATE_IF_NEEDED,CREATE_NEVER.
- defaultDataset JobQuery Default Dataset 
- Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below.
- destinationEncryption JobConfiguration Query Destination Encryption Configuration 
- Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- destinationTable JobQuery Destination Table 
- Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below.
- flattenResults boolean
- If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened.
- maximumBilling numberTier 
- Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default.
- maximumBytes stringBilled 
- Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.
- parameterMode string
- Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.
- priority string
- Specifies a priority for the query.
Default value is INTERACTIVE. Possible values are:INTERACTIVE,BATCH.
- schemaUpdate string[]Options 
- Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
- scriptOptions JobQuery Script Options 
- Options controlling the execution of scripts. Structure is documented below.
- useLegacy booleanSql 
- Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL.
- useQuery booleanCache 
- Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true.
- userDefined JobFunction Resources Query User Defined Function Resource[] 
- Describes user-defined function resources used in the query. Structure is documented below.
- writeDisposition string
- Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is WRITE_EMPTY. Possible values are:WRITE_TRUNCATE,WRITE_APPEND,WRITE_EMPTY.
- query str
- SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL.
NOTE: queries containing DML language
(DELETE,UPDATE,MERGE,INSERT) must specifycreate_disposition = ""andwrite_disposition = "".
- allow_large_ boolresults 
- If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. However, you must still set destinationTable when result size exceeds the allowed maximum response size.
- create_disposition str
- Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is CREATE_IF_NEEDED. Possible values are:CREATE_IF_NEEDED,CREATE_NEVER.
- default_dataset JobQuery Default Dataset 
- Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below.
- destination_encryption_ Jobconfiguration Query Destination Encryption Configuration 
- Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- destination_table JobQuery Destination Table 
- Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below.
- flatten_results bool
- If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened.
- maximum_billing_ inttier 
- Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default.
- maximum_bytes_ strbilled 
- Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.
- parameter_mode str
- Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.
- priority str
- Specifies a priority for the query.
Default value is INTERACTIVE. Possible values are:INTERACTIVE,BATCH.
- schema_update_ Sequence[str]options 
- Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
- script_options JobQuery Script Options 
- Options controlling the execution of scripts. Structure is documented below.
- use_legacy_ boolsql 
- Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL.
- use_query_ boolcache 
- Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true.
- user_defined_ Sequence[Jobfunction_ resources Query User Defined Function Resource] 
- Describes user-defined function resources used in the query. Structure is documented below.
- write_disposition str
- Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is WRITE_EMPTY. Possible values are:WRITE_TRUNCATE,WRITE_APPEND,WRITE_EMPTY.
- query String
- SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL.
NOTE: queries containing DML language
(DELETE,UPDATE,MERGE,INSERT) must specifycreate_disposition = ""andwrite_disposition = "".
- allowLarge BooleanResults 
- If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. However, you must still set destinationTable when result size exceeds the allowed maximum response size.
- createDisposition String
- Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is CREATE_IF_NEEDED. Possible values are:CREATE_IF_NEEDED,CREATE_NEVER.
- defaultDataset Property Map
- Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below.
- destinationEncryption Property MapConfiguration 
- Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- destinationTable Property Map
- Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below.
- flattenResults Boolean
- If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened.
- maximumBilling NumberTier 
- Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default.
- maximumBytes StringBilled 
- Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.
- parameterMode String
- Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.
- priority String
- Specifies a priority for the query.
Default value is INTERACTIVE. Possible values are:INTERACTIVE,BATCH.
- schemaUpdate List<String>Options 
- Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
- scriptOptions Property Map
- Options controlling the execution of scripts. Structure is documented below.
- useLegacy BooleanSql 
- Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL.
- useQuery BooleanCache 
- Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true.
- userDefined List<Property Map>Function Resources 
- Describes user-defined function resources used in the query. Structure is documented below.
- writeDisposition String
- Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is WRITE_EMPTY. Possible values are:WRITE_TRUNCATE,WRITE_APPEND,WRITE_EMPTY.
JobQueryDefaultDataset, JobQueryDefaultDatasetArgs        
- dataset_id str
- The dataset. Can be specified {{dataset_id}}ifproject_idis also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}if not.
- project_id str
- The ID of the project containing this table.
JobQueryDestinationEncryptionConfiguration, JobQueryDestinationEncryptionConfigurationArgs          
- KmsKey stringName 
- Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- KmsKey stringVersion 
- (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
- KmsKey stringName 
- Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- KmsKey stringVersion 
- (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
- kmsKey StringName 
- Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- kmsKey StringVersion 
- (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
- kmsKey stringName 
- Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- kmsKey stringVersion 
- (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
- kms_key_ strname 
- Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- kms_key_ strversion 
- (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
- kmsKey StringName 
- Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- kmsKey StringVersion 
- (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
JobQueryDestinationTable, JobQueryDestinationTableArgs        
- TableId string
- The table. Can be specified {{table_id}}ifproject_idanddataset_idare also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}if not.
- DatasetId string
- The ID of the dataset containing this table.
- ProjectId string
- The ID of the project containing this table.
- TableId string
- The table. Can be specified {{table_id}}ifproject_idanddataset_idare also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}if not.
- DatasetId string
- The ID of the dataset containing this table.
- ProjectId string
- The ID of the project containing this table.
- tableId String
- The table. Can be specified {{table_id}}ifproject_idanddataset_idare also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}if not.
- datasetId String
- The ID of the dataset containing this table.
- projectId String
- The ID of the project containing this table.
- tableId string
- The table. Can be specified {{table_id}}ifproject_idanddataset_idare also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}if not.
- datasetId string
- The ID of the dataset containing this table.
- projectId string
- The ID of the project containing this table.
- table_id str
- The table. Can be specified {{table_id}}ifproject_idanddataset_idare also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}if not.
- dataset_id str
- The ID of the dataset containing this table.
- project_id str
- The ID of the project containing this table.
- tableId String
- The table. Can be specified {{table_id}}ifproject_idanddataset_idare also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}if not.
- datasetId String
- The ID of the dataset containing this table.
- projectId String
- The ID of the project containing this table.
JobQueryScriptOptions, JobQueryScriptOptionsArgs        
- KeyResult stringStatement 
- Determines which statement in the script represents the "key result",
used to populate the schema and query results of the script job.
Possible values are: LAST,FIRST_SELECT.
- StatementByte stringBudget 
- Limit on the number of bytes billed per statement. Exceeding this budget results in an error.
- StatementTimeout stringMs 
- Timeout period for each statement in a script.
- KeyResult stringStatement 
- Determines which statement in the script represents the "key result",
used to populate the schema and query results of the script job.
Possible values are: LAST,FIRST_SELECT.
- StatementByte stringBudget 
- Limit on the number of bytes billed per statement. Exceeding this budget results in an error.
- StatementTimeout stringMs 
- Timeout period for each statement in a script.
- keyResult StringStatement 
- Determines which statement in the script represents the "key result",
used to populate the schema and query results of the script job.
Possible values are: LAST,FIRST_SELECT.
- statementByte StringBudget 
- Limit on the number of bytes billed per statement. Exceeding this budget results in an error.
- statementTimeout StringMs 
- Timeout period for each statement in a script.
- keyResult stringStatement 
- Determines which statement in the script represents the "key result",
used to populate the schema and query results of the script job.
Possible values are: LAST,FIRST_SELECT.
- statementByte stringBudget 
- Limit on the number of bytes billed per statement. Exceeding this budget results in an error.
- statementTimeout stringMs 
- Timeout period for each statement in a script.
- key_result_ strstatement 
- Determines which statement in the script represents the "key result",
used to populate the schema and query results of the script job.
Possible values are: LAST,FIRST_SELECT.
- statement_byte_ strbudget 
- Limit on the number of bytes billed per statement. Exceeding this budget results in an error.
- statement_timeout_ strms 
- Timeout period for each statement in a script.
- keyResult StringStatement 
- Determines which statement in the script represents the "key result",
used to populate the schema and query results of the script job.
Possible values are: LAST,FIRST_SELECT.
- statementByte StringBudget 
- Limit on the number of bytes billed per statement. Exceeding this budget results in an error.
- statementTimeout StringMs 
- Timeout period for each statement in a script.
JobQueryUserDefinedFunctionResource, JobQueryUserDefinedFunctionResourceArgs            
- InlineCode string
- An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code.
- ResourceUri string
- A code resource to load from a Google Cloud Storage URI (gs://bucket/path).
- InlineCode string
- An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code.
- ResourceUri string
- A code resource to load from a Google Cloud Storage URI (gs://bucket/path).
- inlineCode String
- An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code.
- resourceUri String
- A code resource to load from a Google Cloud Storage URI (gs://bucket/path).
- inlineCode string
- An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code.
- resourceUri string
- A code resource to load from a Google Cloud Storage URI (gs://bucket/path).
- inline_code str
- An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code.
- resource_uri str
- A code resource to load from a Google Cloud Storage URI (gs://bucket/path).
- inlineCode String
- An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code.
- resourceUri String
- A code resource to load from a Google Cloud Storage URI (gs://bucket/path).
JobStatus, JobStatusArgs    
- ErrorResults List<JobStatus Error Result> 
- (Output) Final error result of the job. If present, indicates that the job has completed and was unsuccessful. Structure is documented below.
- Errors
List<JobStatus Error> 
- (Output) The first errors encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has not completed or was unsuccessful. Structure is documented below.
- State string
- (Output) Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'.
- ErrorResults []JobStatus Error Result 
- (Output) Final error result of the job. If present, indicates that the job has completed and was unsuccessful. Structure is documented below.
- Errors
[]JobStatus Error 
- (Output) The first errors encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has not completed or was unsuccessful. Structure is documented below.
- State string
- (Output) Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'.
- errorResults List<JobStatus Error Result> 
- (Output) Final error result of the job. If present, indicates that the job has completed and was unsuccessful. Structure is documented below.
- errors
List<JobStatus Error> 
- (Output) The first errors encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has not completed or was unsuccessful. Structure is documented below.
- state String
- (Output) Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'.
- errorResults JobStatus Error Result[] 
- (Output) Final error result of the job. If present, indicates that the job has completed and was unsuccessful. Structure is documented below.
- errors
JobStatus Error[] 
- (Output) The first errors encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has not completed or was unsuccessful. Structure is documented below.
- state string
- (Output) Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'.
- error_results Sequence[JobStatus Error Result] 
- (Output) Final error result of the job. If present, indicates that the job has completed and was unsuccessful. Structure is documented below.
- errors
Sequence[JobStatus Error] 
- (Output) The first errors encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has not completed or was unsuccessful. Structure is documented below.
- state str
- (Output) Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'.
- errorResults List<Property Map>
- (Output) Final error result of the job. If present, indicates that the job has completed and was unsuccessful. Structure is documented below.
- errors List<Property Map>
- (Output) The first errors encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has not completed or was unsuccessful. Structure is documented below.
- state String
- (Output) Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'.
JobStatusError, JobStatusErrorArgs      
JobStatusErrorResult, JobStatusErrorResultArgs        
Import
Job can be imported using any of these accepted formats:
- projects/{{project}}/jobs/{{job_id}}/location/{{location}}
- projects/{{project}}/jobs/{{job_id}}
- {{project}}/{{job_id}}/{{location}}
- {{job_id}}/{{location}}
- {{project}}/{{job_id}}
- {{job_id}}
When using the pulumi import command, Job can be imported using one of the formats above. For example:
$ pulumi import gcp:bigquery/job:Job default projects/{{project}}/jobs/{{job_id}}/location/{{location}}
$ pulumi import gcp:bigquery/job:Job default projects/{{project}}/jobs/{{job_id}}
$ pulumi import gcp:bigquery/job:Job default {{project}}/{{job_id}}/{{location}}
$ pulumi import gcp:bigquery/job:Job default {{job_id}}/{{location}}
$ pulumi import gcp:bigquery/job:Job default {{project}}/{{job_id}}
$ pulumi import gcp:bigquery/job:Job default {{job_id}}
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Google Cloud (GCP) Classic pulumi/pulumi-gcp
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the google-betaTerraform Provider.