gcp.dataproc.Batch
Explore with Pulumi AI
Dataproc Serverless Batches lets you run Spark workloads without requiring you to provision and manage your own Dataproc cluster.
To get more information about Batch, see:
- API documentation
- How-to Guides
Example Usage
Dataproc Batch Spark
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const exampleBatchSpark = new gcp.dataproc.Batch("example_batch_spark", {
    batchId: "tf-test-batch_88722",
    location: "us-central1",
    labels: {
        batch_test: "terraform",
    },
    runtimeConfig: {
        properties: {
            "spark.dynamicAllocation.enabled": "false",
            "spark.executor.instances": "2",
        },
    },
    environmentConfig: {
        executionConfig: {
            subnetworkUri: "default",
            ttl: "3600s",
            networkTags: ["tag1"],
        },
    },
    sparkBatch: {
        mainClass: "org.apache.spark.examples.SparkPi",
        args: ["10"],
        jarFileUris: ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
    },
});
import pulumi
import pulumi_gcp as gcp
example_batch_spark = gcp.dataproc.Batch("example_batch_spark",
    batch_id="tf-test-batch_88722",
    location="us-central1",
    labels={
        "batch_test": "terraform",
    },
    runtime_config={
        "properties": {
            "spark.dynamicAllocation.enabled": "false",
            "spark.executor.instances": "2",
        },
    },
    environment_config={
        "execution_config": {
            "subnetwork_uri": "default",
            "ttl": "3600s",
            "network_tags": ["tag1"],
        },
    },
    spark_batch={
        "main_class": "org.apache.spark.examples.SparkPi",
        "args": ["10"],
        "jar_file_uris": ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
    })
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewBatch(ctx, "example_batch_spark", &dataproc.BatchArgs{
			BatchId:  pulumi.String("tf-test-batch_88722"),
			Location: pulumi.String("us-central1"),
			Labels: pulumi.StringMap{
				"batch_test": pulumi.String("terraform"),
			},
			RuntimeConfig: &dataproc.BatchRuntimeConfigArgs{
				Properties: pulumi.StringMap{
					"spark.dynamicAllocation.enabled": pulumi.String("false"),
					"spark.executor.instances":        pulumi.String("2"),
				},
			},
			EnvironmentConfig: &dataproc.BatchEnvironmentConfigArgs{
				ExecutionConfig: &dataproc.BatchEnvironmentConfigExecutionConfigArgs{
					SubnetworkUri: pulumi.String("default"),
					Ttl:           pulumi.String("3600s"),
					NetworkTags: pulumi.StringArray{
						pulumi.String("tag1"),
					},
				},
			},
			SparkBatch: &dataproc.BatchSparkBatchArgs{
				MainClass: pulumi.String("org.apache.spark.examples.SparkPi"),
				Args: pulumi.StringArray{
					pulumi.String("10"),
				},
				JarFileUris: pulumi.StringArray{
					pulumi.String("file:///usr/lib/spark/examples/jars/spark-examples.jar"),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var exampleBatchSpark = new Gcp.Dataproc.Batch("example_batch_spark", new()
    {
        BatchId = "tf-test-batch_88722",
        Location = "us-central1",
        Labels = 
        {
            { "batch_test", "terraform" },
        },
        RuntimeConfig = new Gcp.Dataproc.Inputs.BatchRuntimeConfigArgs
        {
            Properties = 
            {
                { "spark.dynamicAllocation.enabled", "false" },
                { "spark.executor.instances", "2" },
            },
        },
        EnvironmentConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigArgs
        {
            ExecutionConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigExecutionConfigArgs
            {
                SubnetworkUri = "default",
                Ttl = "3600s",
                NetworkTags = new[]
                {
                    "tag1",
                },
            },
        },
        SparkBatch = new Gcp.Dataproc.Inputs.BatchSparkBatchArgs
        {
            MainClass = "org.apache.spark.examples.SparkPi",
            Args = new[]
            {
                "10",
            },
            JarFileUris = new[]
            {
                "file:///usr/lib/spark/examples/jars/spark-examples.jar",
            },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataproc.Batch;
import com.pulumi.gcp.dataproc.BatchArgs;
import com.pulumi.gcp.dataproc.inputs.BatchRuntimeConfigArgs;
import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigArgs;
import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigExecutionConfigArgs;
import com.pulumi.gcp.dataproc.inputs.BatchSparkBatchArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var exampleBatchSpark = new Batch("exampleBatchSpark", BatchArgs.builder()
            .batchId("tf-test-batch_88722")
            .location("us-central1")
            .labels(Map.of("batch_test", "terraform"))
            .runtimeConfig(BatchRuntimeConfigArgs.builder()
                .properties(Map.ofEntries(
                    Map.entry("spark.dynamicAllocation.enabled", "false"),
                    Map.entry("spark.executor.instances", "2")
                ))
                .build())
            .environmentConfig(BatchEnvironmentConfigArgs.builder()
                .executionConfig(BatchEnvironmentConfigExecutionConfigArgs.builder()
                    .subnetworkUri("default")
                    .ttl("3600s")
                    .networkTags("tag1")
                    .build())
                .build())
            .sparkBatch(BatchSparkBatchArgs.builder()
                .mainClass("org.apache.spark.examples.SparkPi")
                .args("10")
                .jarFileUris("file:///usr/lib/spark/examples/jars/spark-examples.jar")
                .build())
            .build());
    }
}
resources:
  exampleBatchSpark:
    type: gcp:dataproc:Batch
    name: example_batch_spark
    properties:
      batchId: tf-test-batch_88722
      location: us-central1
      labels:
        batch_test: terraform
      runtimeConfig:
        properties:
          spark.dynamicAllocation.enabled: 'false'
          spark.executor.instances: '2'
      environmentConfig:
        executionConfig:
          subnetworkUri: default
          ttl: 3600s
          networkTags:
            - tag1
      sparkBatch:
        mainClass: org.apache.spark.examples.SparkPi
        args:
          - '10'
        jarFileUris:
          - file:///usr/lib/spark/examples/jars/spark-examples.jar
Dataproc Batch Spark Full
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const project = gcp.organizations.getProject({});
const gcsAccount = gcp.storage.getProjectServiceAccount({});
const bucket = new gcp.storage.Bucket("bucket", {
    uniformBucketLevelAccess: true,
    name: "dataproc-bucket",
    location: "US",
    forceDestroy: true,
});
const cryptoKeyMember1 = new gcp.kms.CryptoKeyIAMMember("crypto_key_member_1", {
    cryptoKeyId: "example-key",
    role: "roles/cloudkms.cryptoKeyEncrypterDecrypter",
    member: project.then(project => `serviceAccount:service-${project.number}@dataproc-accounts.iam.gserviceaccount.com`),
});
const ms = new gcp.dataproc.MetastoreService("ms", {
    serviceId: "dataproc-batch",
    location: "us-central1",
    port: 9080,
    tier: "DEVELOPER",
    maintenanceWindow: {
        hourOfDay: 2,
        dayOfWeek: "SUNDAY",
    },
    hiveMetastoreConfig: {
        version: "3.1.2",
    },
});
const basic = new gcp.dataproc.Cluster("basic", {
    name: "dataproc-batch",
    region: "us-central1",
    clusterConfig: {
        softwareConfig: {
            overrideProperties: {
                "dataproc:dataproc.allow.zero.workers": "true",
                "spark:spark.history.fs.logDirectory": pulumi.interpolate`gs://${bucket.name}/*/spark-job-history`,
            },
        },
        endpointConfig: {
            enableHttpPortAccess: true,
        },
        masterConfig: {
            numInstances: 1,
            machineType: "e2-standard-2",
            diskConfig: {
                bootDiskSizeGb: 35,
            },
        },
        metastoreConfig: {
            dataprocMetastoreService: ms.name,
        },
    },
});
const exampleBatchSpark = new gcp.dataproc.Batch("example_batch_spark", {
    batchId: "dataproc-batch",
    location: "us-central1",
    labels: {
        batch_test: "terraform",
    },
    runtimeConfig: {
        properties: {
            "spark.dynamicAllocation.enabled": "false",
            "spark.executor.instances": "2",
        },
        version: "2.2",
    },
    environmentConfig: {
        executionConfig: {
            ttl: "3600s",
            networkTags: ["tag1"],
            kmsKey: "example-key",
            networkUri: "default",
            serviceAccount: project.then(project => `${project.number}-compute@developer.gserviceaccount.com`),
            stagingBucket: bucket.name,
        },
        peripheralsConfig: {
            metastoreService: ms.name,
            sparkHistoryServerConfig: {
                dataprocCluster: basic.id,
            },
        },
    },
    sparkBatch: {
        mainClass: "org.apache.spark.examples.SparkPi",
        args: ["10"],
        jarFileUris: ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
    },
}, {
    dependsOn: [cryptoKeyMember1],
});
import pulumi
import pulumi_gcp as gcp
project = gcp.organizations.get_project()
gcs_account = gcp.storage.get_project_service_account()
bucket = gcp.storage.Bucket("bucket",
    uniform_bucket_level_access=True,
    name="dataproc-bucket",
    location="US",
    force_destroy=True)
crypto_key_member1 = gcp.kms.CryptoKeyIAMMember("crypto_key_member_1",
    crypto_key_id="example-key",
    role="roles/cloudkms.cryptoKeyEncrypterDecrypter",
    member=f"serviceAccount:service-{project.number}@dataproc-accounts.iam.gserviceaccount.com")
ms = gcp.dataproc.MetastoreService("ms",
    service_id="dataproc-batch",
    location="us-central1",
    port=9080,
    tier="DEVELOPER",
    maintenance_window={
        "hour_of_day": 2,
        "day_of_week": "SUNDAY",
    },
    hive_metastore_config={
        "version": "3.1.2",
    })
basic = gcp.dataproc.Cluster("basic",
    name="dataproc-batch",
    region="us-central1",
    cluster_config={
        "software_config": {
            "override_properties": {
                "dataproc:dataproc.allow.zero.workers": "true",
                "spark:spark.history.fs.logDirectory": bucket.name.apply(lambda name: f"gs://{name}/*/spark-job-history"),
            },
        },
        "endpoint_config": {
            "enable_http_port_access": True,
        },
        "master_config": {
            "num_instances": 1,
            "machine_type": "e2-standard-2",
            "disk_config": {
                "boot_disk_size_gb": 35,
            },
        },
        "metastore_config": {
            "dataproc_metastore_service": ms.name,
        },
    })
example_batch_spark = gcp.dataproc.Batch("example_batch_spark",
    batch_id="dataproc-batch",
    location="us-central1",
    labels={
        "batch_test": "terraform",
    },
    runtime_config={
        "properties": {
            "spark.dynamicAllocation.enabled": "false",
            "spark.executor.instances": "2",
        },
        "version": "2.2",
    },
    environment_config={
        "execution_config": {
            "ttl": "3600s",
            "network_tags": ["tag1"],
            "kms_key": "example-key",
            "network_uri": "default",
            "service_account": f"{project.number}-compute@developer.gserviceaccount.com",
            "staging_bucket": bucket.name,
        },
        "peripherals_config": {
            "metastore_service": ms.name,
            "spark_history_server_config": {
                "dataproc_cluster": basic.id,
            },
        },
    },
    spark_batch={
        "main_class": "org.apache.spark.examples.SparkPi",
        "args": ["10"],
        "jar_file_uris": ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
    },
    opts = pulumi.ResourceOptions(depends_on=[crypto_key_member1]))
package main
import (
	"fmt"
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/kms"
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations"
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/storage"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		project, err := organizations.LookupProject(ctx, &organizations.LookupProjectArgs{}, nil)
		if err != nil {
			return err
		}
		_, err = storage.GetProjectServiceAccount(ctx, &storage.GetProjectServiceAccountArgs{}, nil)
		if err != nil {
			return err
		}
		bucket, err := storage.NewBucket(ctx, "bucket", &storage.BucketArgs{
			UniformBucketLevelAccess: pulumi.Bool(true),
			Name:                     pulumi.String("dataproc-bucket"),
			Location:                 pulumi.String("US"),
			ForceDestroy:             pulumi.Bool(true),
		})
		if err != nil {
			return err
		}
		cryptoKeyMember1, err := kms.NewCryptoKeyIAMMember(ctx, "crypto_key_member_1", &kms.CryptoKeyIAMMemberArgs{
			CryptoKeyId: pulumi.String("example-key"),
			Role:        pulumi.String("roles/cloudkms.cryptoKeyEncrypterDecrypter"),
			Member:      pulumi.Sprintf("serviceAccount:service-%v@dataproc-accounts.iam.gserviceaccount.com", project.Number),
		})
		if err != nil {
			return err
		}
		ms, err := dataproc.NewMetastoreService(ctx, "ms", &dataproc.MetastoreServiceArgs{
			ServiceId: pulumi.String("dataproc-batch"),
			Location:  pulumi.String("us-central1"),
			Port:      pulumi.Int(9080),
			Tier:      pulumi.String("DEVELOPER"),
			MaintenanceWindow: &dataproc.MetastoreServiceMaintenanceWindowArgs{
				HourOfDay: pulumi.Int(2),
				DayOfWeek: pulumi.String("SUNDAY"),
			},
			HiveMetastoreConfig: &dataproc.MetastoreServiceHiveMetastoreConfigArgs{
				Version: pulumi.String("3.1.2"),
			},
		})
		if err != nil {
			return err
		}
		basic, err := dataproc.NewCluster(ctx, "basic", &dataproc.ClusterArgs{
			Name:   pulumi.String("dataproc-batch"),
			Region: pulumi.String("us-central1"),
			ClusterConfig: &dataproc.ClusterClusterConfigArgs{
				SoftwareConfig: &dataproc.ClusterClusterConfigSoftwareConfigArgs{
					OverrideProperties: pulumi.StringMap{
						"dataproc:dataproc.allow.zero.workers": pulumi.String("true"),
						"spark:spark.history.fs.logDirectory": bucket.Name.ApplyT(func(name string) (string, error) {
							return fmt.Sprintf("gs://%v/*/spark-job-history", name), nil
						}).(pulumi.StringOutput),
					},
				},
				EndpointConfig: &dataproc.ClusterClusterConfigEndpointConfigArgs{
					EnableHttpPortAccess: pulumi.Bool(true),
				},
				MasterConfig: &dataproc.ClusterClusterConfigMasterConfigArgs{
					NumInstances: pulumi.Int(1),
					MachineType:  pulumi.String("e2-standard-2"),
					DiskConfig: &dataproc.ClusterClusterConfigMasterConfigDiskConfigArgs{
						BootDiskSizeGb: pulumi.Int(35),
					},
				},
				MetastoreConfig: &dataproc.ClusterClusterConfigMetastoreConfigArgs{
					DataprocMetastoreService: ms.Name,
				},
			},
		})
		if err != nil {
			return err
		}
		_, err = dataproc.NewBatch(ctx, "example_batch_spark", &dataproc.BatchArgs{
			BatchId:  pulumi.String("dataproc-batch"),
			Location: pulumi.String("us-central1"),
			Labels: pulumi.StringMap{
				"batch_test": pulumi.String("terraform"),
			},
			RuntimeConfig: &dataproc.BatchRuntimeConfigArgs{
				Properties: pulumi.StringMap{
					"spark.dynamicAllocation.enabled": pulumi.String("false"),
					"spark.executor.instances":        pulumi.String("2"),
				},
				Version: pulumi.String("2.2"),
			},
			EnvironmentConfig: &dataproc.BatchEnvironmentConfigArgs{
				ExecutionConfig: &dataproc.BatchEnvironmentConfigExecutionConfigArgs{
					Ttl: pulumi.String("3600s"),
					NetworkTags: pulumi.StringArray{
						pulumi.String("tag1"),
					},
					KmsKey:         pulumi.String("example-key"),
					NetworkUri:     pulumi.String("default"),
					ServiceAccount: pulumi.Sprintf("%v-compute@developer.gserviceaccount.com", project.Number),
					StagingBucket:  bucket.Name,
				},
				PeripheralsConfig: &dataproc.BatchEnvironmentConfigPeripheralsConfigArgs{
					MetastoreService: ms.Name,
					SparkHistoryServerConfig: &dataproc.BatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfigArgs{
						DataprocCluster: basic.ID(),
					},
				},
			},
			SparkBatch: &dataproc.BatchSparkBatchArgs{
				MainClass: pulumi.String("org.apache.spark.examples.SparkPi"),
				Args: pulumi.StringArray{
					pulumi.String("10"),
				},
				JarFileUris: pulumi.StringArray{
					pulumi.String("file:///usr/lib/spark/examples/jars/spark-examples.jar"),
				},
			},
		}, pulumi.DependsOn([]pulumi.Resource{
			cryptoKeyMember1,
		}))
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var project = Gcp.Organizations.GetProject.Invoke();
    var gcsAccount = Gcp.Storage.GetProjectServiceAccount.Invoke();
    var bucket = new Gcp.Storage.Bucket("bucket", new()
    {
        UniformBucketLevelAccess = true,
        Name = "dataproc-bucket",
        Location = "US",
        ForceDestroy = true,
    });
    var cryptoKeyMember1 = new Gcp.Kms.CryptoKeyIAMMember("crypto_key_member_1", new()
    {
        CryptoKeyId = "example-key",
        Role = "roles/cloudkms.cryptoKeyEncrypterDecrypter",
        Member = $"serviceAccount:service-{project.Apply(getProjectResult => getProjectResult.Number)}@dataproc-accounts.iam.gserviceaccount.com",
    });
    var ms = new Gcp.Dataproc.MetastoreService("ms", new()
    {
        ServiceId = "dataproc-batch",
        Location = "us-central1",
        Port = 9080,
        Tier = "DEVELOPER",
        MaintenanceWindow = new Gcp.Dataproc.Inputs.MetastoreServiceMaintenanceWindowArgs
        {
            HourOfDay = 2,
            DayOfWeek = "SUNDAY",
        },
        HiveMetastoreConfig = new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigArgs
        {
            Version = "3.1.2",
        },
    });
    var basic = new Gcp.Dataproc.Cluster("basic", new()
    {
        Name = "dataproc-batch",
        Region = "us-central1",
        ClusterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigArgs
        {
            SoftwareConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigSoftwareConfigArgs
            {
                OverrideProperties = 
                {
                    { "dataproc:dataproc.allow.zero.workers", "true" },
                    { "spark:spark.history.fs.logDirectory", bucket.Name.Apply(name => $"gs://{name}/*/spark-job-history") },
                },
            },
            EndpointConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigEndpointConfigArgs
            {
                EnableHttpPortAccess = true,
            },
            MasterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigMasterConfigArgs
            {
                NumInstances = 1,
                MachineType = "e2-standard-2",
                DiskConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigMasterConfigDiskConfigArgs
                {
                    BootDiskSizeGb = 35,
                },
            },
            MetastoreConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigMetastoreConfigArgs
            {
                DataprocMetastoreService = ms.Name,
            },
        },
    });
    var exampleBatchSpark = new Gcp.Dataproc.Batch("example_batch_spark", new()
    {
        BatchId = "dataproc-batch",
        Location = "us-central1",
        Labels = 
        {
            { "batch_test", "terraform" },
        },
        RuntimeConfig = new Gcp.Dataproc.Inputs.BatchRuntimeConfigArgs
        {
            Properties = 
            {
                { "spark.dynamicAllocation.enabled", "false" },
                { "spark.executor.instances", "2" },
            },
            Version = "2.2",
        },
        EnvironmentConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigArgs
        {
            ExecutionConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigExecutionConfigArgs
            {
                Ttl = "3600s",
                NetworkTags = new[]
                {
                    "tag1",
                },
                KmsKey = "example-key",
                NetworkUri = "default",
                ServiceAccount = $"{project.Apply(getProjectResult => getProjectResult.Number)}-compute@developer.gserviceaccount.com",
                StagingBucket = bucket.Name,
            },
            PeripheralsConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigPeripheralsConfigArgs
            {
                MetastoreService = ms.Name,
                SparkHistoryServerConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfigArgs
                {
                    DataprocCluster = basic.Id,
                },
            },
        },
        SparkBatch = new Gcp.Dataproc.Inputs.BatchSparkBatchArgs
        {
            MainClass = "org.apache.spark.examples.SparkPi",
            Args = new[]
            {
                "10",
            },
            JarFileUris = new[]
            {
                "file:///usr/lib/spark/examples/jars/spark-examples.jar",
            },
        },
    }, new CustomResourceOptions
    {
        DependsOn =
        {
            cryptoKeyMember1,
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.organizations.OrganizationsFunctions;
import com.pulumi.gcp.organizations.inputs.GetProjectArgs;
import com.pulumi.gcp.storage.StorageFunctions;
import com.pulumi.gcp.storage.inputs.GetProjectServiceAccountArgs;
import com.pulumi.gcp.storage.Bucket;
import com.pulumi.gcp.storage.BucketArgs;
import com.pulumi.gcp.kms.CryptoKeyIAMMember;
import com.pulumi.gcp.kms.CryptoKeyIAMMemberArgs;
import com.pulumi.gcp.dataproc.MetastoreService;
import com.pulumi.gcp.dataproc.MetastoreServiceArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceMaintenanceWindowArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceHiveMetastoreConfigArgs;
import com.pulumi.gcp.dataproc.Cluster;
import com.pulumi.gcp.dataproc.ClusterArgs;
import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigArgs;
import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigSoftwareConfigArgs;
import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigEndpointConfigArgs;
import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigMasterConfigArgs;
import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigMasterConfigDiskConfigArgs;
import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigMetastoreConfigArgs;
import com.pulumi.gcp.dataproc.Batch;
import com.pulumi.gcp.dataproc.BatchArgs;
import com.pulumi.gcp.dataproc.inputs.BatchRuntimeConfigArgs;
import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigArgs;
import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigExecutionConfigArgs;
import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigPeripheralsConfigArgs;
import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfigArgs;
import com.pulumi.gcp.dataproc.inputs.BatchSparkBatchArgs;
import com.pulumi.resources.CustomResourceOptions;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        final var project = OrganizationsFunctions.getProject();
        final var gcsAccount = StorageFunctions.getProjectServiceAccount();
        var bucket = new Bucket("bucket", BucketArgs.builder()
            .uniformBucketLevelAccess(true)
            .name("dataproc-bucket")
            .location("US")
            .forceDestroy(true)
            .build());
        var cryptoKeyMember1 = new CryptoKeyIAMMember("cryptoKeyMember1", CryptoKeyIAMMemberArgs.builder()
            .cryptoKeyId("example-key")
            .role("roles/cloudkms.cryptoKeyEncrypterDecrypter")
            .member(String.format("serviceAccount:service-%s@dataproc-accounts.iam.gserviceaccount.com", project.applyValue(getProjectResult -> getProjectResult.number())))
            .build());
        var ms = new MetastoreService("ms", MetastoreServiceArgs.builder()
            .serviceId("dataproc-batch")
            .location("us-central1")
            .port(9080)
            .tier("DEVELOPER")
            .maintenanceWindow(MetastoreServiceMaintenanceWindowArgs.builder()
                .hourOfDay(2)
                .dayOfWeek("SUNDAY")
                .build())
            .hiveMetastoreConfig(MetastoreServiceHiveMetastoreConfigArgs.builder()
                .version("3.1.2")
                .build())
            .build());
        var basic = new Cluster("basic", ClusterArgs.builder()
            .name("dataproc-batch")
            .region("us-central1")
            .clusterConfig(ClusterClusterConfigArgs.builder()
                .softwareConfig(ClusterClusterConfigSoftwareConfigArgs.builder()
                    .overrideProperties(Map.ofEntries(
                        Map.entry("dataproc:dataproc.allow.zero.workers", "true"),
                        Map.entry("spark:spark.history.fs.logDirectory", bucket.name().applyValue(name -> String.format("gs://%s/*/spark-job-history", name)))
                    ))
                    .build())
                .endpointConfig(ClusterClusterConfigEndpointConfigArgs.builder()
                    .enableHttpPortAccess(true)
                    .build())
                .masterConfig(ClusterClusterConfigMasterConfigArgs.builder()
                    .numInstances(1)
                    .machineType("e2-standard-2")
                    .diskConfig(ClusterClusterConfigMasterConfigDiskConfigArgs.builder()
                        .bootDiskSizeGb(35)
                        .build())
                    .build())
                .metastoreConfig(ClusterClusterConfigMetastoreConfigArgs.builder()
                    .dataprocMetastoreService(ms.name())
                    .build())
                .build())
            .build());
        var exampleBatchSpark = new Batch("exampleBatchSpark", BatchArgs.builder()
            .batchId("dataproc-batch")
            .location("us-central1")
            .labels(Map.of("batch_test", "terraform"))
            .runtimeConfig(BatchRuntimeConfigArgs.builder()
                .properties(Map.ofEntries(
                    Map.entry("spark.dynamicAllocation.enabled", "false"),
                    Map.entry("spark.executor.instances", "2")
                ))
                .version("2.2")
                .build())
            .environmentConfig(BatchEnvironmentConfigArgs.builder()
                .executionConfig(BatchEnvironmentConfigExecutionConfigArgs.builder()
                    .ttl("3600s")
                    .networkTags("tag1")
                    .kmsKey("example-key")
                    .networkUri("default")
                    .serviceAccount(String.format("%s-compute@developer.gserviceaccount.com", project.applyValue(getProjectResult -> getProjectResult.number())))
                    .stagingBucket(bucket.name())
                    .build())
                .peripheralsConfig(BatchEnvironmentConfigPeripheralsConfigArgs.builder()
                    .metastoreService(ms.name())
                    .sparkHistoryServerConfig(BatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfigArgs.builder()
                        .dataprocCluster(basic.id())
                        .build())
                    .build())
                .build())
            .sparkBatch(BatchSparkBatchArgs.builder()
                .mainClass("org.apache.spark.examples.SparkPi")
                .args("10")
                .jarFileUris("file:///usr/lib/spark/examples/jars/spark-examples.jar")
                .build())
            .build(), CustomResourceOptions.builder()
                .dependsOn(cryptoKeyMember1)
                .build());
    }
}
resources:
  exampleBatchSpark:
    type: gcp:dataproc:Batch
    name: example_batch_spark
    properties:
      batchId: dataproc-batch
      location: us-central1
      labels:
        batch_test: terraform
      runtimeConfig:
        properties:
          spark.dynamicAllocation.enabled: 'false'
          spark.executor.instances: '2'
        version: '2.2'
      environmentConfig:
        executionConfig:
          ttl: 3600s
          networkTags:
            - tag1
          kmsKey: example-key
          networkUri: default
          serviceAccount: ${project.number}-compute@developer.gserviceaccount.com
          stagingBucket: ${bucket.name}
        peripheralsConfig:
          metastoreService: ${ms.name}
          sparkHistoryServerConfig:
            dataprocCluster: ${basic.id}
      sparkBatch:
        mainClass: org.apache.spark.examples.SparkPi
        args:
          - '10'
        jarFileUris:
          - file:///usr/lib/spark/examples/jars/spark-examples.jar
    options:
      dependsOn:
        - ${cryptoKeyMember1}
  bucket:
    type: gcp:storage:Bucket
    properties:
      uniformBucketLevelAccess: true
      name: dataproc-bucket
      location: US
      forceDestroy: true
  cryptoKeyMember1:
    type: gcp:kms:CryptoKeyIAMMember
    name: crypto_key_member_1
    properties:
      cryptoKeyId: example-key
      role: roles/cloudkms.cryptoKeyEncrypterDecrypter
      member: serviceAccount:service-${project.number}@dataproc-accounts.iam.gserviceaccount.com
  basic:
    type: gcp:dataproc:Cluster
    properties:
      name: dataproc-batch
      region: us-central1
      clusterConfig:
        softwareConfig:
          overrideProperties:
            dataproc:dataproc.allow.zero.workers: 'true'
            spark:spark.history.fs.logDirectory: gs://${bucket.name}/*/spark-job-history
        endpointConfig:
          enableHttpPortAccess: true
        masterConfig:
          numInstances: 1
          machineType: e2-standard-2
          diskConfig:
            bootDiskSizeGb: 35
        metastoreConfig:
          dataprocMetastoreService: ${ms.name}
  ms:
    type: gcp:dataproc:MetastoreService
    properties:
      serviceId: dataproc-batch
      location: us-central1
      port: 9080
      tier: DEVELOPER
      maintenanceWindow:
        hourOfDay: 2
        dayOfWeek: SUNDAY
      hiveMetastoreConfig:
        version: 3.1.2
variables:
  project:
    fn::invoke:
      function: gcp:organizations:getProject
      arguments: {}
  gcsAccount:
    fn::invoke:
      function: gcp:storage:getProjectServiceAccount
      arguments: {}
Dataproc Batch Sparksql
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const exampleBatchSparsql = new gcp.dataproc.Batch("example_batch_sparsql", {
    batchId: "tf-test-batch_39249",
    location: "us-central1",
    runtimeConfig: {
        properties: {
            "spark.dynamicAllocation.enabled": "false",
            "spark.executor.instances": "2",
        },
    },
    environmentConfig: {
        executionConfig: {
            subnetworkUri: "default",
        },
    },
    sparkSqlBatch: {
        queryFileUri: "gs://dataproc-examples/spark-sql/natality/cigarette_correlations.sql",
        jarFileUris: ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
        queryVariables: {
            name: "value",
        },
    },
});
import pulumi
import pulumi_gcp as gcp
example_batch_sparsql = gcp.dataproc.Batch("example_batch_sparsql",
    batch_id="tf-test-batch_39249",
    location="us-central1",
    runtime_config={
        "properties": {
            "spark.dynamicAllocation.enabled": "false",
            "spark.executor.instances": "2",
        },
    },
    environment_config={
        "execution_config": {
            "subnetwork_uri": "default",
        },
    },
    spark_sql_batch={
        "query_file_uri": "gs://dataproc-examples/spark-sql/natality/cigarette_correlations.sql",
        "jar_file_uris": ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
        "query_variables": {
            "name": "value",
        },
    })
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewBatch(ctx, "example_batch_sparsql", &dataproc.BatchArgs{
			BatchId:  pulumi.String("tf-test-batch_39249"),
			Location: pulumi.String("us-central1"),
			RuntimeConfig: &dataproc.BatchRuntimeConfigArgs{
				Properties: pulumi.StringMap{
					"spark.dynamicAllocation.enabled": pulumi.String("false"),
					"spark.executor.instances":        pulumi.String("2"),
				},
			},
			EnvironmentConfig: &dataproc.BatchEnvironmentConfigArgs{
				ExecutionConfig: &dataproc.BatchEnvironmentConfigExecutionConfigArgs{
					SubnetworkUri: pulumi.String("default"),
				},
			},
			SparkSqlBatch: &dataproc.BatchSparkSqlBatchArgs{
				QueryFileUri: pulumi.String("gs://dataproc-examples/spark-sql/natality/cigarette_correlations.sql"),
				JarFileUris: pulumi.StringArray{
					pulumi.String("file:///usr/lib/spark/examples/jars/spark-examples.jar"),
				},
				QueryVariables: pulumi.StringMap{
					"name": pulumi.String("value"),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var exampleBatchSparsql = new Gcp.Dataproc.Batch("example_batch_sparsql", new()
    {
        BatchId = "tf-test-batch_39249",
        Location = "us-central1",
        RuntimeConfig = new Gcp.Dataproc.Inputs.BatchRuntimeConfigArgs
        {
            Properties = 
            {
                { "spark.dynamicAllocation.enabled", "false" },
                { "spark.executor.instances", "2" },
            },
        },
        EnvironmentConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigArgs
        {
            ExecutionConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigExecutionConfigArgs
            {
                SubnetworkUri = "default",
            },
        },
        SparkSqlBatch = new Gcp.Dataproc.Inputs.BatchSparkSqlBatchArgs
        {
            QueryFileUri = "gs://dataproc-examples/spark-sql/natality/cigarette_correlations.sql",
            JarFileUris = new[]
            {
                "file:///usr/lib/spark/examples/jars/spark-examples.jar",
            },
            QueryVariables = 
            {
                { "name", "value" },
            },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataproc.Batch;
import com.pulumi.gcp.dataproc.BatchArgs;
import com.pulumi.gcp.dataproc.inputs.BatchRuntimeConfigArgs;
import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigArgs;
import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigExecutionConfigArgs;
import com.pulumi.gcp.dataproc.inputs.BatchSparkSqlBatchArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var exampleBatchSparsql = new Batch("exampleBatchSparsql", BatchArgs.builder()
            .batchId("tf-test-batch_39249")
            .location("us-central1")
            .runtimeConfig(BatchRuntimeConfigArgs.builder()
                .properties(Map.ofEntries(
                    Map.entry("spark.dynamicAllocation.enabled", "false"),
                    Map.entry("spark.executor.instances", "2")
                ))
                .build())
            .environmentConfig(BatchEnvironmentConfigArgs.builder()
                .executionConfig(BatchEnvironmentConfigExecutionConfigArgs.builder()
                    .subnetworkUri("default")
                    .build())
                .build())
            .sparkSqlBatch(BatchSparkSqlBatchArgs.builder()
                .queryFileUri("gs://dataproc-examples/spark-sql/natality/cigarette_correlations.sql")
                .jarFileUris("file:///usr/lib/spark/examples/jars/spark-examples.jar")
                .queryVariables(Map.of("name", "value"))
                .build())
            .build());
    }
}
resources:
  exampleBatchSparsql:
    type: gcp:dataproc:Batch
    name: example_batch_sparsql
    properties:
      batchId: tf-test-batch_39249
      location: us-central1
      runtimeConfig:
        properties:
          spark.dynamicAllocation.enabled: 'false'
          spark.executor.instances: '2'
      environmentConfig:
        executionConfig:
          subnetworkUri: default
      sparkSqlBatch:
        queryFileUri: gs://dataproc-examples/spark-sql/natality/cigarette_correlations.sql
        jarFileUris:
          - file:///usr/lib/spark/examples/jars/spark-examples.jar
        queryVariables:
          name: value
Dataproc Batch Pyspark
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const exampleBatchPyspark = new gcp.dataproc.Batch("example_batch_pyspark", {
    batchId: "tf-test-batch_74391",
    location: "us-central1",
    runtimeConfig: {
        properties: {
            "spark.dynamicAllocation.enabled": "false",
            "spark.executor.instances": "2",
        },
    },
    environmentConfig: {
        executionConfig: {
            subnetworkUri: "default",
        },
    },
    pysparkBatch: {
        mainPythonFileUri: "https://storage.googleapis.com/terraform-batches/test_util.py",
        args: ["10"],
        jarFileUris: ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
        pythonFileUris: ["gs://dataproc-examples/pyspark/hello-world/hello-world.py"],
        archiveUris: [
            "https://storage.googleapis.com/terraform-batches/animals.txt.tar.gz#unpacked",
            "https://storage.googleapis.com/terraform-batches/animals.txt.jar",
            "https://storage.googleapis.com/terraform-batches/animals.txt",
        ],
        fileUris: ["https://storage.googleapis.com/terraform-batches/people.txt"],
    },
});
import pulumi
import pulumi_gcp as gcp
example_batch_pyspark = gcp.dataproc.Batch("example_batch_pyspark",
    batch_id="tf-test-batch_74391",
    location="us-central1",
    runtime_config={
        "properties": {
            "spark.dynamicAllocation.enabled": "false",
            "spark.executor.instances": "2",
        },
    },
    environment_config={
        "execution_config": {
            "subnetwork_uri": "default",
        },
    },
    pyspark_batch={
        "main_python_file_uri": "https://storage.googleapis.com/terraform-batches/test_util.py",
        "args": ["10"],
        "jar_file_uris": ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
        "python_file_uris": ["gs://dataproc-examples/pyspark/hello-world/hello-world.py"],
        "archive_uris": [
            "https://storage.googleapis.com/terraform-batches/animals.txt.tar.gz#unpacked",
            "https://storage.googleapis.com/terraform-batches/animals.txt.jar",
            "https://storage.googleapis.com/terraform-batches/animals.txt",
        ],
        "file_uris": ["https://storage.googleapis.com/terraform-batches/people.txt"],
    })
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewBatch(ctx, "example_batch_pyspark", &dataproc.BatchArgs{
			BatchId:  pulumi.String("tf-test-batch_74391"),
			Location: pulumi.String("us-central1"),
			RuntimeConfig: &dataproc.BatchRuntimeConfigArgs{
				Properties: pulumi.StringMap{
					"spark.dynamicAllocation.enabled": pulumi.String("false"),
					"spark.executor.instances":        pulumi.String("2"),
				},
			},
			EnvironmentConfig: &dataproc.BatchEnvironmentConfigArgs{
				ExecutionConfig: &dataproc.BatchEnvironmentConfigExecutionConfigArgs{
					SubnetworkUri: pulumi.String("default"),
				},
			},
			PysparkBatch: &dataproc.BatchPysparkBatchArgs{
				MainPythonFileUri: pulumi.String("https://storage.googleapis.com/terraform-batches/test_util.py"),
				Args: pulumi.StringArray{
					pulumi.String("10"),
				},
				JarFileUris: pulumi.StringArray{
					pulumi.String("file:///usr/lib/spark/examples/jars/spark-examples.jar"),
				},
				PythonFileUris: pulumi.StringArray{
					pulumi.String("gs://dataproc-examples/pyspark/hello-world/hello-world.py"),
				},
				ArchiveUris: pulumi.StringArray{
					pulumi.String("https://storage.googleapis.com/terraform-batches/animals.txt.tar.gz#unpacked"),
					pulumi.String("https://storage.googleapis.com/terraform-batches/animals.txt.jar"),
					pulumi.String("https://storage.googleapis.com/terraform-batches/animals.txt"),
				},
				FileUris: pulumi.StringArray{
					pulumi.String("https://storage.googleapis.com/terraform-batches/people.txt"),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var exampleBatchPyspark = new Gcp.Dataproc.Batch("example_batch_pyspark", new()
    {
        BatchId = "tf-test-batch_74391",
        Location = "us-central1",
        RuntimeConfig = new Gcp.Dataproc.Inputs.BatchRuntimeConfigArgs
        {
            Properties = 
            {
                { "spark.dynamicAllocation.enabled", "false" },
                { "spark.executor.instances", "2" },
            },
        },
        EnvironmentConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigArgs
        {
            ExecutionConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigExecutionConfigArgs
            {
                SubnetworkUri = "default",
            },
        },
        PysparkBatch = new Gcp.Dataproc.Inputs.BatchPysparkBatchArgs
        {
            MainPythonFileUri = "https://storage.googleapis.com/terraform-batches/test_util.py",
            Args = new[]
            {
                "10",
            },
            JarFileUris = new[]
            {
                "file:///usr/lib/spark/examples/jars/spark-examples.jar",
            },
            PythonFileUris = new[]
            {
                "gs://dataproc-examples/pyspark/hello-world/hello-world.py",
            },
            ArchiveUris = new[]
            {
                "https://storage.googleapis.com/terraform-batches/animals.txt.tar.gz#unpacked",
                "https://storage.googleapis.com/terraform-batches/animals.txt.jar",
                "https://storage.googleapis.com/terraform-batches/animals.txt",
            },
            FileUris = new[]
            {
                "https://storage.googleapis.com/terraform-batches/people.txt",
            },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataproc.Batch;
import com.pulumi.gcp.dataproc.BatchArgs;
import com.pulumi.gcp.dataproc.inputs.BatchRuntimeConfigArgs;
import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigArgs;
import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigExecutionConfigArgs;
import com.pulumi.gcp.dataproc.inputs.BatchPysparkBatchArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var exampleBatchPyspark = new Batch("exampleBatchPyspark", BatchArgs.builder()
            .batchId("tf-test-batch_74391")
            .location("us-central1")
            .runtimeConfig(BatchRuntimeConfigArgs.builder()
                .properties(Map.ofEntries(
                    Map.entry("spark.dynamicAllocation.enabled", "false"),
                    Map.entry("spark.executor.instances", "2")
                ))
                .build())
            .environmentConfig(BatchEnvironmentConfigArgs.builder()
                .executionConfig(BatchEnvironmentConfigExecutionConfigArgs.builder()
                    .subnetworkUri("default")
                    .build())
                .build())
            .pysparkBatch(BatchPysparkBatchArgs.builder()
                .mainPythonFileUri("https://storage.googleapis.com/terraform-batches/test_util.py")
                .args("10")
                .jarFileUris("file:///usr/lib/spark/examples/jars/spark-examples.jar")
                .pythonFileUris("gs://dataproc-examples/pyspark/hello-world/hello-world.py")
                .archiveUris(                
                    "https://storage.googleapis.com/terraform-batches/animals.txt.tar.gz#unpacked",
                    "https://storage.googleapis.com/terraform-batches/animals.txt.jar",
                    "https://storage.googleapis.com/terraform-batches/animals.txt")
                .fileUris("https://storage.googleapis.com/terraform-batches/people.txt")
                .build())
            .build());
    }
}
resources:
  exampleBatchPyspark:
    type: gcp:dataproc:Batch
    name: example_batch_pyspark
    properties:
      batchId: tf-test-batch_74391
      location: us-central1
      runtimeConfig:
        properties:
          spark.dynamicAllocation.enabled: 'false'
          spark.executor.instances: '2'
      environmentConfig:
        executionConfig:
          subnetworkUri: default
      pysparkBatch:
        mainPythonFileUri: https://storage.googleapis.com/terraform-batches/test_util.py
        args:
          - '10'
        jarFileUris:
          - file:///usr/lib/spark/examples/jars/spark-examples.jar
        pythonFileUris:
          - gs://dataproc-examples/pyspark/hello-world/hello-world.py
        archiveUris:
          - https://storage.googleapis.com/terraform-batches/animals.txt.tar.gz#unpacked
          - https://storage.googleapis.com/terraform-batches/animals.txt.jar
          - https://storage.googleapis.com/terraform-batches/animals.txt
        fileUris:
          - https://storage.googleapis.com/terraform-batches/people.txt
Dataproc Batch Sparkr
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const exampleBatchSparkr = new gcp.dataproc.Batch("example_batch_sparkr", {
    batchId: "tf-test-batch_16511",
    location: "us-central1",
    labels: {
        batch_test: "terraform",
    },
    runtimeConfig: {
        properties: {
            "spark.dynamicAllocation.enabled": "false",
            "spark.executor.instances": "2",
        },
    },
    environmentConfig: {
        executionConfig: {
            subnetworkUri: "default",
            ttl: "3600s",
            networkTags: ["tag1"],
        },
    },
    sparkRBatch: {
        mainRFileUri: "https://storage.googleapis.com/terraform-batches/spark-r-flights.r",
        args: ["https://storage.googleapis.com/terraform-batches/flights.csv"],
    },
});
import pulumi
import pulumi_gcp as gcp
example_batch_sparkr = gcp.dataproc.Batch("example_batch_sparkr",
    batch_id="tf-test-batch_16511",
    location="us-central1",
    labels={
        "batch_test": "terraform",
    },
    runtime_config={
        "properties": {
            "spark.dynamicAllocation.enabled": "false",
            "spark.executor.instances": "2",
        },
    },
    environment_config={
        "execution_config": {
            "subnetwork_uri": "default",
            "ttl": "3600s",
            "network_tags": ["tag1"],
        },
    },
    spark_r_batch={
        "main_r_file_uri": "https://storage.googleapis.com/terraform-batches/spark-r-flights.r",
        "args": ["https://storage.googleapis.com/terraform-batches/flights.csv"],
    })
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewBatch(ctx, "example_batch_sparkr", &dataproc.BatchArgs{
			BatchId:  pulumi.String("tf-test-batch_16511"),
			Location: pulumi.String("us-central1"),
			Labels: pulumi.StringMap{
				"batch_test": pulumi.String("terraform"),
			},
			RuntimeConfig: &dataproc.BatchRuntimeConfigArgs{
				Properties: pulumi.StringMap{
					"spark.dynamicAllocation.enabled": pulumi.String("false"),
					"spark.executor.instances":        pulumi.String("2"),
				},
			},
			EnvironmentConfig: &dataproc.BatchEnvironmentConfigArgs{
				ExecutionConfig: &dataproc.BatchEnvironmentConfigExecutionConfigArgs{
					SubnetworkUri: pulumi.String("default"),
					Ttl:           pulumi.String("3600s"),
					NetworkTags: pulumi.StringArray{
						pulumi.String("tag1"),
					},
				},
			},
			SparkRBatch: &dataproc.BatchSparkRBatchArgs{
				MainRFileUri: pulumi.String("https://storage.googleapis.com/terraform-batches/spark-r-flights.r"),
				Args: pulumi.StringArray{
					pulumi.String("https://storage.googleapis.com/terraform-batches/flights.csv"),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var exampleBatchSparkr = new Gcp.Dataproc.Batch("example_batch_sparkr", new()
    {
        BatchId = "tf-test-batch_16511",
        Location = "us-central1",
        Labels = 
        {
            { "batch_test", "terraform" },
        },
        RuntimeConfig = new Gcp.Dataproc.Inputs.BatchRuntimeConfigArgs
        {
            Properties = 
            {
                { "spark.dynamicAllocation.enabled", "false" },
                { "spark.executor.instances", "2" },
            },
        },
        EnvironmentConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigArgs
        {
            ExecutionConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigExecutionConfigArgs
            {
                SubnetworkUri = "default",
                Ttl = "3600s",
                NetworkTags = new[]
                {
                    "tag1",
                },
            },
        },
        SparkRBatch = new Gcp.Dataproc.Inputs.BatchSparkRBatchArgs
        {
            MainRFileUri = "https://storage.googleapis.com/terraform-batches/spark-r-flights.r",
            Args = new[]
            {
                "https://storage.googleapis.com/terraform-batches/flights.csv",
            },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataproc.Batch;
import com.pulumi.gcp.dataproc.BatchArgs;
import com.pulumi.gcp.dataproc.inputs.BatchRuntimeConfigArgs;
import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigArgs;
import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigExecutionConfigArgs;
import com.pulumi.gcp.dataproc.inputs.BatchSparkRBatchArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var exampleBatchSparkr = new Batch("exampleBatchSparkr", BatchArgs.builder()
            .batchId("tf-test-batch_16511")
            .location("us-central1")
            .labels(Map.of("batch_test", "terraform"))
            .runtimeConfig(BatchRuntimeConfigArgs.builder()
                .properties(Map.ofEntries(
                    Map.entry("spark.dynamicAllocation.enabled", "false"),
                    Map.entry("spark.executor.instances", "2")
                ))
                .build())
            .environmentConfig(BatchEnvironmentConfigArgs.builder()
                .executionConfig(BatchEnvironmentConfigExecutionConfigArgs.builder()
                    .subnetworkUri("default")
                    .ttl("3600s")
                    .networkTags("tag1")
                    .build())
                .build())
            .sparkRBatch(BatchSparkRBatchArgs.builder()
                .mainRFileUri("https://storage.googleapis.com/terraform-batches/spark-r-flights.r")
                .args("https://storage.googleapis.com/terraform-batches/flights.csv")
                .build())
            .build());
    }
}
resources:
  exampleBatchSparkr:
    type: gcp:dataproc:Batch
    name: example_batch_sparkr
    properties:
      batchId: tf-test-batch_16511
      location: us-central1
      labels:
        batch_test: terraform
      runtimeConfig:
        properties:
          spark.dynamicAllocation.enabled: 'false'
          spark.executor.instances: '2'
      environmentConfig:
        executionConfig:
          subnetworkUri: default
          ttl: 3600s
          networkTags:
            - tag1
      sparkRBatch:
        mainRFileUri: https://storage.googleapis.com/terraform-batches/spark-r-flights.r
        args:
          - https://storage.googleapis.com/terraform-batches/flights.csv
Dataproc Batch Autotuning
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const exampleBatchAutotuning = new gcp.dataproc.Batch("example_batch_autotuning", {
    batchId: "tf-test-batch_8493",
    location: "us-central1",
    labels: {
        batch_test: "terraform",
    },
    runtimeConfig: {
        version: "2.2",
        properties: {
            "spark.dynamicAllocation.enabled": "false",
            "spark.executor.instances": "2",
        },
        cohort: "tf-dataproc-batch-example",
        autotuningConfig: {
            scenarios: [
                "SCALING",
                "MEMORY",
            ],
        },
    },
    environmentConfig: {
        executionConfig: {
            subnetworkUri: "default",
            ttl: "3600s",
        },
    },
    sparkBatch: {
        mainClass: "org.apache.spark.examples.SparkPi",
        args: ["10"],
        jarFileUris: ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
    },
});
import pulumi
import pulumi_gcp as gcp
example_batch_autotuning = gcp.dataproc.Batch("example_batch_autotuning",
    batch_id="tf-test-batch_8493",
    location="us-central1",
    labels={
        "batch_test": "terraform",
    },
    runtime_config={
        "version": "2.2",
        "properties": {
            "spark.dynamicAllocation.enabled": "false",
            "spark.executor.instances": "2",
        },
        "cohort": "tf-dataproc-batch-example",
        "autotuning_config": {
            "scenarios": [
                "SCALING",
                "MEMORY",
            ],
        },
    },
    environment_config={
        "execution_config": {
            "subnetwork_uri": "default",
            "ttl": "3600s",
        },
    },
    spark_batch={
        "main_class": "org.apache.spark.examples.SparkPi",
        "args": ["10"],
        "jar_file_uris": ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
    })
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewBatch(ctx, "example_batch_autotuning", &dataproc.BatchArgs{
			BatchId:  pulumi.String("tf-test-batch_8493"),
			Location: pulumi.String("us-central1"),
			Labels: pulumi.StringMap{
				"batch_test": pulumi.String("terraform"),
			},
			RuntimeConfig: &dataproc.BatchRuntimeConfigArgs{
				Version: pulumi.String("2.2"),
				Properties: pulumi.StringMap{
					"spark.dynamicAllocation.enabled": pulumi.String("false"),
					"spark.executor.instances":        pulumi.String("2"),
				},
				Cohort: pulumi.String("tf-dataproc-batch-example"),
				AutotuningConfig: &dataproc.BatchRuntimeConfigAutotuningConfigArgs{
					Scenarios: pulumi.StringArray{
						pulumi.String("SCALING"),
						pulumi.String("MEMORY"),
					},
				},
			},
			EnvironmentConfig: &dataproc.BatchEnvironmentConfigArgs{
				ExecutionConfig: &dataproc.BatchEnvironmentConfigExecutionConfigArgs{
					SubnetworkUri: pulumi.String("default"),
					Ttl:           pulumi.String("3600s"),
				},
			},
			SparkBatch: &dataproc.BatchSparkBatchArgs{
				MainClass: pulumi.String("org.apache.spark.examples.SparkPi"),
				Args: pulumi.StringArray{
					pulumi.String("10"),
				},
				JarFileUris: pulumi.StringArray{
					pulumi.String("file:///usr/lib/spark/examples/jars/spark-examples.jar"),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var exampleBatchAutotuning = new Gcp.Dataproc.Batch("example_batch_autotuning", new()
    {
        BatchId = "tf-test-batch_8493",
        Location = "us-central1",
        Labels = 
        {
            { "batch_test", "terraform" },
        },
        RuntimeConfig = new Gcp.Dataproc.Inputs.BatchRuntimeConfigArgs
        {
            Version = "2.2",
            Properties = 
            {
                { "spark.dynamicAllocation.enabled", "false" },
                { "spark.executor.instances", "2" },
            },
            Cohort = "tf-dataproc-batch-example",
            AutotuningConfig = new Gcp.Dataproc.Inputs.BatchRuntimeConfigAutotuningConfigArgs
            {
                Scenarios = new[]
                {
                    "SCALING",
                    "MEMORY",
                },
            },
        },
        EnvironmentConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigArgs
        {
            ExecutionConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigExecutionConfigArgs
            {
                SubnetworkUri = "default",
                Ttl = "3600s",
            },
        },
        SparkBatch = new Gcp.Dataproc.Inputs.BatchSparkBatchArgs
        {
            MainClass = "org.apache.spark.examples.SparkPi",
            Args = new[]
            {
                "10",
            },
            JarFileUris = new[]
            {
                "file:///usr/lib/spark/examples/jars/spark-examples.jar",
            },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataproc.Batch;
import com.pulumi.gcp.dataproc.BatchArgs;
import com.pulumi.gcp.dataproc.inputs.BatchRuntimeConfigArgs;
import com.pulumi.gcp.dataproc.inputs.BatchRuntimeConfigAutotuningConfigArgs;
import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigArgs;
import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigExecutionConfigArgs;
import com.pulumi.gcp.dataproc.inputs.BatchSparkBatchArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var exampleBatchAutotuning = new Batch("exampleBatchAutotuning", BatchArgs.builder()
            .batchId("tf-test-batch_8493")
            .location("us-central1")
            .labels(Map.of("batch_test", "terraform"))
            .runtimeConfig(BatchRuntimeConfigArgs.builder()
                .version("2.2")
                .properties(Map.ofEntries(
                    Map.entry("spark.dynamicAllocation.enabled", "false"),
                    Map.entry("spark.executor.instances", "2")
                ))
                .cohort("tf-dataproc-batch-example")
                .autotuningConfig(BatchRuntimeConfigAutotuningConfigArgs.builder()
                    .scenarios(                    
                        "SCALING",
                        "MEMORY")
                    .build())
                .build())
            .environmentConfig(BatchEnvironmentConfigArgs.builder()
                .executionConfig(BatchEnvironmentConfigExecutionConfigArgs.builder()
                    .subnetworkUri("default")
                    .ttl("3600s")
                    .build())
                .build())
            .sparkBatch(BatchSparkBatchArgs.builder()
                .mainClass("org.apache.spark.examples.SparkPi")
                .args("10")
                .jarFileUris("file:///usr/lib/spark/examples/jars/spark-examples.jar")
                .build())
            .build());
    }
}
resources:
  exampleBatchAutotuning:
    type: gcp:dataproc:Batch
    name: example_batch_autotuning
    properties:
      batchId: tf-test-batch_8493
      location: us-central1
      labels:
        batch_test: terraform
      runtimeConfig:
        version: '2.2'
        properties:
          spark.dynamicAllocation.enabled: 'false'
          spark.executor.instances: '2'
        cohort: tf-dataproc-batch-example
        autotuningConfig:
          scenarios:
            - SCALING
            - MEMORY
      environmentConfig:
        executionConfig:
          subnetworkUri: default
          ttl: 3600s
      sparkBatch:
        mainClass: org.apache.spark.examples.SparkPi
        args:
          - '10'
        jarFileUris:
          - file:///usr/lib/spark/examples/jars/spark-examples.jar
Create Batch Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Batch(name: string, args?: BatchArgs, opts?: CustomResourceOptions);@overload
def Batch(resource_name: str,
          args: Optional[BatchArgs] = None,
          opts: Optional[ResourceOptions] = None)
@overload
def Batch(resource_name: str,
          opts: Optional[ResourceOptions] = None,
          batch_id: Optional[str] = None,
          environment_config: Optional[BatchEnvironmentConfigArgs] = None,
          labels: Optional[Mapping[str, str]] = None,
          location: Optional[str] = None,
          project: Optional[str] = None,
          pyspark_batch: Optional[BatchPysparkBatchArgs] = None,
          runtime_config: Optional[BatchRuntimeConfigArgs] = None,
          spark_batch: Optional[BatchSparkBatchArgs] = None,
          spark_r_batch: Optional[BatchSparkRBatchArgs] = None,
          spark_sql_batch: Optional[BatchSparkSqlBatchArgs] = None)func NewBatch(ctx *Context, name string, args *BatchArgs, opts ...ResourceOption) (*Batch, error)public Batch(string name, BatchArgs? args = null, CustomResourceOptions? opts = null)type: gcp:dataproc:Batch
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args BatchArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args BatchArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args BatchArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args BatchArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args BatchArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var batchResource = new Gcp.Dataproc.Batch("batchResource", new()
{
    BatchId = "string",
    EnvironmentConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigArgs
    {
        ExecutionConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigExecutionConfigArgs
        {
            KmsKey = "string",
            NetworkTags = new[]
            {
                "string",
            },
            NetworkUri = "string",
            ServiceAccount = "string",
            StagingBucket = "string",
            SubnetworkUri = "string",
            Ttl = "string",
        },
        PeripheralsConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigPeripheralsConfigArgs
        {
            MetastoreService = "string",
            SparkHistoryServerConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfigArgs
            {
                DataprocCluster = "string",
            },
        },
    },
    Labels = 
    {
        { "string", "string" },
    },
    Location = "string",
    Project = "string",
    PysparkBatch = new Gcp.Dataproc.Inputs.BatchPysparkBatchArgs
    {
        ArchiveUris = new[]
        {
            "string",
        },
        Args = new[]
        {
            "string",
        },
        FileUris = new[]
        {
            "string",
        },
        JarFileUris = new[]
        {
            "string",
        },
        MainPythonFileUri = "string",
        PythonFileUris = new[]
        {
            "string",
        },
    },
    RuntimeConfig = new Gcp.Dataproc.Inputs.BatchRuntimeConfigArgs
    {
        AutotuningConfig = new Gcp.Dataproc.Inputs.BatchRuntimeConfigAutotuningConfigArgs
        {
            Scenarios = new[]
            {
                "string",
            },
        },
        Cohort = "string",
        ContainerImage = "string",
        EffectiveProperties = 
        {
            { "string", "string" },
        },
        Properties = 
        {
            { "string", "string" },
        },
        Version = "string",
    },
    SparkBatch = new Gcp.Dataproc.Inputs.BatchSparkBatchArgs
    {
        ArchiveUris = new[]
        {
            "string",
        },
        Args = new[]
        {
            "string",
        },
        FileUris = new[]
        {
            "string",
        },
        JarFileUris = new[]
        {
            "string",
        },
        MainClass = "string",
        MainJarFileUri = "string",
    },
    SparkRBatch = new Gcp.Dataproc.Inputs.BatchSparkRBatchArgs
    {
        ArchiveUris = new[]
        {
            "string",
        },
        Args = new[]
        {
            "string",
        },
        FileUris = new[]
        {
            "string",
        },
        MainRFileUri = "string",
    },
    SparkSqlBatch = new Gcp.Dataproc.Inputs.BatchSparkSqlBatchArgs
    {
        JarFileUris = new[]
        {
            "string",
        },
        QueryFileUri = "string",
        QueryVariables = 
        {
            { "string", "string" },
        },
    },
});
example, err := dataproc.NewBatch(ctx, "batchResource", &dataproc.BatchArgs{
	BatchId: pulumi.String("string"),
	EnvironmentConfig: &dataproc.BatchEnvironmentConfigArgs{
		ExecutionConfig: &dataproc.BatchEnvironmentConfigExecutionConfigArgs{
			KmsKey: pulumi.String("string"),
			NetworkTags: pulumi.StringArray{
				pulumi.String("string"),
			},
			NetworkUri:     pulumi.String("string"),
			ServiceAccount: pulumi.String("string"),
			StagingBucket:  pulumi.String("string"),
			SubnetworkUri:  pulumi.String("string"),
			Ttl:            pulumi.String("string"),
		},
		PeripheralsConfig: &dataproc.BatchEnvironmentConfigPeripheralsConfigArgs{
			MetastoreService: pulumi.String("string"),
			SparkHistoryServerConfig: &dataproc.BatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfigArgs{
				DataprocCluster: pulumi.String("string"),
			},
		},
	},
	Labels: pulumi.StringMap{
		"string": pulumi.String("string"),
	},
	Location: pulumi.String("string"),
	Project:  pulumi.String("string"),
	PysparkBatch: &dataproc.BatchPysparkBatchArgs{
		ArchiveUris: pulumi.StringArray{
			pulumi.String("string"),
		},
		Args: pulumi.StringArray{
			pulumi.String("string"),
		},
		FileUris: pulumi.StringArray{
			pulumi.String("string"),
		},
		JarFileUris: pulumi.StringArray{
			pulumi.String("string"),
		},
		MainPythonFileUri: pulumi.String("string"),
		PythonFileUris: pulumi.StringArray{
			pulumi.String("string"),
		},
	},
	RuntimeConfig: &dataproc.BatchRuntimeConfigArgs{
		AutotuningConfig: &dataproc.BatchRuntimeConfigAutotuningConfigArgs{
			Scenarios: pulumi.StringArray{
				pulumi.String("string"),
			},
		},
		Cohort:         pulumi.String("string"),
		ContainerImage: pulumi.String("string"),
		EffectiveProperties: pulumi.StringMap{
			"string": pulumi.String("string"),
		},
		Properties: pulumi.StringMap{
			"string": pulumi.String("string"),
		},
		Version: pulumi.String("string"),
	},
	SparkBatch: &dataproc.BatchSparkBatchArgs{
		ArchiveUris: pulumi.StringArray{
			pulumi.String("string"),
		},
		Args: pulumi.StringArray{
			pulumi.String("string"),
		},
		FileUris: pulumi.StringArray{
			pulumi.String("string"),
		},
		JarFileUris: pulumi.StringArray{
			pulumi.String("string"),
		},
		MainClass:      pulumi.String("string"),
		MainJarFileUri: pulumi.String("string"),
	},
	SparkRBatch: &dataproc.BatchSparkRBatchArgs{
		ArchiveUris: pulumi.StringArray{
			pulumi.String("string"),
		},
		Args: pulumi.StringArray{
			pulumi.String("string"),
		},
		FileUris: pulumi.StringArray{
			pulumi.String("string"),
		},
		MainRFileUri: pulumi.String("string"),
	},
	SparkSqlBatch: &dataproc.BatchSparkSqlBatchArgs{
		JarFileUris: pulumi.StringArray{
			pulumi.String("string"),
		},
		QueryFileUri: pulumi.String("string"),
		QueryVariables: pulumi.StringMap{
			"string": pulumi.String("string"),
		},
	},
})
var batchResource = new Batch("batchResource", BatchArgs.builder()
    .batchId("string")
    .environmentConfig(BatchEnvironmentConfigArgs.builder()
        .executionConfig(BatchEnvironmentConfigExecutionConfigArgs.builder()
            .kmsKey("string")
            .networkTags("string")
            .networkUri("string")
            .serviceAccount("string")
            .stagingBucket("string")
            .subnetworkUri("string")
            .ttl("string")
            .build())
        .peripheralsConfig(BatchEnvironmentConfigPeripheralsConfigArgs.builder()
            .metastoreService("string")
            .sparkHistoryServerConfig(BatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfigArgs.builder()
                .dataprocCluster("string")
                .build())
            .build())
        .build())
    .labels(Map.of("string", "string"))
    .location("string")
    .project("string")
    .pysparkBatch(BatchPysparkBatchArgs.builder()
        .archiveUris("string")
        .args("string")
        .fileUris("string")
        .jarFileUris("string")
        .mainPythonFileUri("string")
        .pythonFileUris("string")
        .build())
    .runtimeConfig(BatchRuntimeConfigArgs.builder()
        .autotuningConfig(BatchRuntimeConfigAutotuningConfigArgs.builder()
            .scenarios("string")
            .build())
        .cohort("string")
        .containerImage("string")
        .effectiveProperties(Map.of("string", "string"))
        .properties(Map.of("string", "string"))
        .version("string")
        .build())
    .sparkBatch(BatchSparkBatchArgs.builder()
        .archiveUris("string")
        .args("string")
        .fileUris("string")
        .jarFileUris("string")
        .mainClass("string")
        .mainJarFileUri("string")
        .build())
    .sparkRBatch(BatchSparkRBatchArgs.builder()
        .archiveUris("string")
        .args("string")
        .fileUris("string")
        .mainRFileUri("string")
        .build())
    .sparkSqlBatch(BatchSparkSqlBatchArgs.builder()
        .jarFileUris("string")
        .queryFileUri("string")
        .queryVariables(Map.of("string", "string"))
        .build())
    .build());
batch_resource = gcp.dataproc.Batch("batchResource",
    batch_id="string",
    environment_config={
        "execution_config": {
            "kms_key": "string",
            "network_tags": ["string"],
            "network_uri": "string",
            "service_account": "string",
            "staging_bucket": "string",
            "subnetwork_uri": "string",
            "ttl": "string",
        },
        "peripherals_config": {
            "metastore_service": "string",
            "spark_history_server_config": {
                "dataproc_cluster": "string",
            },
        },
    },
    labels={
        "string": "string",
    },
    location="string",
    project="string",
    pyspark_batch={
        "archive_uris": ["string"],
        "args": ["string"],
        "file_uris": ["string"],
        "jar_file_uris": ["string"],
        "main_python_file_uri": "string",
        "python_file_uris": ["string"],
    },
    runtime_config={
        "autotuning_config": {
            "scenarios": ["string"],
        },
        "cohort": "string",
        "container_image": "string",
        "effective_properties": {
            "string": "string",
        },
        "properties": {
            "string": "string",
        },
        "version": "string",
    },
    spark_batch={
        "archive_uris": ["string"],
        "args": ["string"],
        "file_uris": ["string"],
        "jar_file_uris": ["string"],
        "main_class": "string",
        "main_jar_file_uri": "string",
    },
    spark_r_batch={
        "archive_uris": ["string"],
        "args": ["string"],
        "file_uris": ["string"],
        "main_r_file_uri": "string",
    },
    spark_sql_batch={
        "jar_file_uris": ["string"],
        "query_file_uri": "string",
        "query_variables": {
            "string": "string",
        },
    })
const batchResource = new gcp.dataproc.Batch("batchResource", {
    batchId: "string",
    environmentConfig: {
        executionConfig: {
            kmsKey: "string",
            networkTags: ["string"],
            networkUri: "string",
            serviceAccount: "string",
            stagingBucket: "string",
            subnetworkUri: "string",
            ttl: "string",
        },
        peripheralsConfig: {
            metastoreService: "string",
            sparkHistoryServerConfig: {
                dataprocCluster: "string",
            },
        },
    },
    labels: {
        string: "string",
    },
    location: "string",
    project: "string",
    pysparkBatch: {
        archiveUris: ["string"],
        args: ["string"],
        fileUris: ["string"],
        jarFileUris: ["string"],
        mainPythonFileUri: "string",
        pythonFileUris: ["string"],
    },
    runtimeConfig: {
        autotuningConfig: {
            scenarios: ["string"],
        },
        cohort: "string",
        containerImage: "string",
        effectiveProperties: {
            string: "string",
        },
        properties: {
            string: "string",
        },
        version: "string",
    },
    sparkBatch: {
        archiveUris: ["string"],
        args: ["string"],
        fileUris: ["string"],
        jarFileUris: ["string"],
        mainClass: "string",
        mainJarFileUri: "string",
    },
    sparkRBatch: {
        archiveUris: ["string"],
        args: ["string"],
        fileUris: ["string"],
        mainRFileUri: "string",
    },
    sparkSqlBatch: {
        jarFileUris: ["string"],
        queryFileUri: "string",
        queryVariables: {
            string: "string",
        },
    },
});
type: gcp:dataproc:Batch
properties:
    batchId: string
    environmentConfig:
        executionConfig:
            kmsKey: string
            networkTags:
                - string
            networkUri: string
            serviceAccount: string
            stagingBucket: string
            subnetworkUri: string
            ttl: string
        peripheralsConfig:
            metastoreService: string
            sparkHistoryServerConfig:
                dataprocCluster: string
    labels:
        string: string
    location: string
    project: string
    pysparkBatch:
        archiveUris:
            - string
        args:
            - string
        fileUris:
            - string
        jarFileUris:
            - string
        mainPythonFileUri: string
        pythonFileUris:
            - string
    runtimeConfig:
        autotuningConfig:
            scenarios:
                - string
        cohort: string
        containerImage: string
        effectiveProperties:
            string: string
        properties:
            string: string
        version: string
    sparkBatch:
        archiveUris:
            - string
        args:
            - string
        fileUris:
            - string
        jarFileUris:
            - string
        mainClass: string
        mainJarFileUri: string
    sparkRBatch:
        archiveUris:
            - string
        args:
            - string
        fileUris:
            - string
        mainRFileUri: string
    sparkSqlBatch:
        jarFileUris:
            - string
        queryFileUri: string
        queryVariables:
            string: string
Batch Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The Batch resource accepts the following input properties:
- BatchId string
- The ID to use for the batch, which will become the final component of the batch's resource name. This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.
- EnvironmentConfig BatchEnvironment Config 
- Environment configuration for the batch execution. Structure is documented below.
- Labels Dictionary<string, string>
- The labels to associate with this batch. - Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field - effective_labelsfor all of the labels present on the resource.
- Location string
- The location in which the batch will be created in.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- PysparkBatch BatchPyspark Batch 
- PySpark batch config. Structure is documented below.
- RuntimeConfig BatchRuntime Config 
- Runtime configuration for the batch execution. Structure is documented below.
- SparkBatch BatchSpark Batch 
- Spark batch config. Structure is documented below.
- SparkRBatch BatchSpark RBatch 
- SparkR batch config. Structure is documented below.
- SparkSql BatchBatch Spark Sql Batch 
- Spark SQL batch config. Structure is documented below.
- BatchId string
- The ID to use for the batch, which will become the final component of the batch's resource name. This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.
- EnvironmentConfig BatchEnvironment Config Args 
- Environment configuration for the batch execution. Structure is documented below.
- Labels map[string]string
- The labels to associate with this batch. - Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field - effective_labelsfor all of the labels present on the resource.
- Location string
- The location in which the batch will be created in.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- PysparkBatch BatchPyspark Batch Args 
- PySpark batch config. Structure is documented below.
- RuntimeConfig BatchRuntime Config Args 
- Runtime configuration for the batch execution. Structure is documented below.
- SparkBatch BatchSpark Batch Args 
- Spark batch config. Structure is documented below.
- SparkRBatch BatchSpark RBatch Args 
- SparkR batch config. Structure is documented below.
- SparkSql BatchBatch Spark Sql Batch Args 
- Spark SQL batch config. Structure is documented below.
- batchId String
- The ID to use for the batch, which will become the final component of the batch's resource name. This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.
- environmentConfig BatchEnvironment Config 
- Environment configuration for the batch execution. Structure is documented below.
- labels Map<String,String>
- The labels to associate with this batch. - Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field - effective_labelsfor all of the labels present on the resource.
- location String
- The location in which the batch will be created in.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- pysparkBatch BatchPyspark Batch 
- PySpark batch config. Structure is documented below.
- runtimeConfig BatchRuntime Config 
- Runtime configuration for the batch execution. Structure is documented below.
- sparkBatch BatchSpark Batch 
- Spark batch config. Structure is documented below.
- sparkRBatch BatchSpark RBatch 
- SparkR batch config. Structure is documented below.
- sparkSql BatchBatch Spark Sql Batch 
- Spark SQL batch config. Structure is documented below.
- batchId string
- The ID to use for the batch, which will become the final component of the batch's resource name. This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.
- environmentConfig BatchEnvironment Config 
- Environment configuration for the batch execution. Structure is documented below.
- labels {[key: string]: string}
- The labels to associate with this batch. - Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field - effective_labelsfor all of the labels present on the resource.
- location string
- The location in which the batch will be created in.
- project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- pysparkBatch BatchPyspark Batch 
- PySpark batch config. Structure is documented below.
- runtimeConfig BatchRuntime Config 
- Runtime configuration for the batch execution. Structure is documented below.
- sparkBatch BatchSpark Batch 
- Spark batch config. Structure is documented below.
- sparkRBatch BatchSpark RBatch 
- SparkR batch config. Structure is documented below.
- sparkSql BatchBatch Spark Sql Batch 
- Spark SQL batch config. Structure is documented below.
- batch_id str
- The ID to use for the batch, which will become the final component of the batch's resource name. This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.
- environment_config BatchEnvironment Config Args 
- Environment configuration for the batch execution. Structure is documented below.
- labels Mapping[str, str]
- The labels to associate with this batch. - Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field - effective_labelsfor all of the labels present on the resource.
- location str
- The location in which the batch will be created in.
- project str
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- pyspark_batch BatchPyspark Batch Args 
- PySpark batch config. Structure is documented below.
- runtime_config BatchRuntime Config Args 
- Runtime configuration for the batch execution. Structure is documented below.
- spark_batch BatchSpark Batch Args 
- Spark batch config. Structure is documented below.
- spark_r_ Batchbatch Spark RBatch Args 
- SparkR batch config. Structure is documented below.
- spark_sql_ Batchbatch Spark Sql Batch Args 
- Spark SQL batch config. Structure is documented below.
- batchId String
- The ID to use for the batch, which will become the final component of the batch's resource name. This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.
- environmentConfig Property Map
- Environment configuration for the batch execution. Structure is documented below.
- labels Map<String>
- The labels to associate with this batch. - Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field - effective_labelsfor all of the labels present on the resource.
- location String
- The location in which the batch will be created in.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- pysparkBatch Property Map
- PySpark batch config. Structure is documented below.
- runtimeConfig Property Map
- Runtime configuration for the batch execution. Structure is documented below.
- sparkBatch Property Map
- Spark batch config. Structure is documented below.
- sparkRBatch Property Map
- SparkR batch config. Structure is documented below.
- sparkSql Property MapBatch 
- Spark SQL batch config. Structure is documented below.
Outputs
All input properties are implicitly available as output properties. Additionally, the Batch resource produces the following output properties:
- CreateTime string
- The time when the batch was created.
- Creator string
- The email address of the user who created the batch.
- EffectiveLabels Dictionary<string, string>
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- Id string
- The provider-assigned unique ID for this managed resource.
- Name string
- The resource name of the batch.
- Operation string
- The resource name of the operation associated with this batch.
- PulumiLabels Dictionary<string, string>
- The combination of labels configured directly on the resource and default labels configured on the provider.
- RuntimeInfos List<BatchRuntime Info> 
- Runtime information about batch execution. Structure is documented below.
- State string
- (Output) The state of the batch at this point in history. For possible values, see the API documentation.
- StateHistories List<BatchState History> 
- Historical state information for the batch. Structure is documented below.
- StateMessage string
- (Output) Details about the state at this point in history.
- StateTime string
- Batch state details, such as a failure description if the state is FAILED.
- Uuid string
- A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.
- CreateTime string
- The time when the batch was created.
- Creator string
- The email address of the user who created the batch.
- EffectiveLabels map[string]string
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- Id string
- The provider-assigned unique ID for this managed resource.
- Name string
- The resource name of the batch.
- Operation string
- The resource name of the operation associated with this batch.
- PulumiLabels map[string]string
- The combination of labels configured directly on the resource and default labels configured on the provider.
- RuntimeInfos []BatchRuntime Info 
- Runtime information about batch execution. Structure is documented below.
- State string
- (Output) The state of the batch at this point in history. For possible values, see the API documentation.
- StateHistories []BatchState History 
- Historical state information for the batch. Structure is documented below.
- StateMessage string
- (Output) Details about the state at this point in history.
- StateTime string
- Batch state details, such as a failure description if the state is FAILED.
- Uuid string
- A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.
- createTime String
- The time when the batch was created.
- creator String
- The email address of the user who created the batch.
- effectiveLabels Map<String,String>
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- id String
- The provider-assigned unique ID for this managed resource.
- name String
- The resource name of the batch.
- operation String
- The resource name of the operation associated with this batch.
- pulumiLabels Map<String,String>
- The combination of labels configured directly on the resource and default labels configured on the provider.
- runtimeInfos List<BatchRuntime Info> 
- Runtime information about batch execution. Structure is documented below.
- state String
- (Output) The state of the batch at this point in history. For possible values, see the API documentation.
- stateHistories List<BatchState History> 
- Historical state information for the batch. Structure is documented below.
- stateMessage String
- (Output) Details about the state at this point in history.
- stateTime String
- Batch state details, such as a failure description if the state is FAILED.
- uuid String
- A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.
- createTime string
- The time when the batch was created.
- creator string
- The email address of the user who created the batch.
- effectiveLabels {[key: string]: string}
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- id string
- The provider-assigned unique ID for this managed resource.
- name string
- The resource name of the batch.
- operation string
- The resource name of the operation associated with this batch.
- pulumiLabels {[key: string]: string}
- The combination of labels configured directly on the resource and default labels configured on the provider.
- runtimeInfos BatchRuntime Info[] 
- Runtime information about batch execution. Structure is documented below.
- state string
- (Output) The state of the batch at this point in history. For possible values, see the API documentation.
- stateHistories BatchState History[] 
- Historical state information for the batch. Structure is documented below.
- stateMessage string
- (Output) Details about the state at this point in history.
- stateTime string
- Batch state details, such as a failure description if the state is FAILED.
- uuid string
- A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.
- create_time str
- The time when the batch was created.
- creator str
- The email address of the user who created the batch.
- effective_labels Mapping[str, str]
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- id str
- The provider-assigned unique ID for this managed resource.
- name str
- The resource name of the batch.
- operation str
- The resource name of the operation associated with this batch.
- pulumi_labels Mapping[str, str]
- The combination of labels configured directly on the resource and default labels configured on the provider.
- runtime_infos Sequence[BatchRuntime Info] 
- Runtime information about batch execution. Structure is documented below.
- state str
- (Output) The state of the batch at this point in history. For possible values, see the API documentation.
- state_histories Sequence[BatchState History] 
- Historical state information for the batch. Structure is documented below.
- state_message str
- (Output) Details about the state at this point in history.
- state_time str
- Batch state details, such as a failure description if the state is FAILED.
- uuid str
- A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.
- createTime String
- The time when the batch was created.
- creator String
- The email address of the user who created the batch.
- effectiveLabels Map<String>
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- id String
- The provider-assigned unique ID for this managed resource.
- name String
- The resource name of the batch.
- operation String
- The resource name of the operation associated with this batch.
- pulumiLabels Map<String>
- The combination of labels configured directly on the resource and default labels configured on the provider.
- runtimeInfos List<Property Map>
- Runtime information about batch execution. Structure is documented below.
- state String
- (Output) The state of the batch at this point in history. For possible values, see the API documentation.
- stateHistories List<Property Map>
- Historical state information for the batch. Structure is documented below.
- stateMessage String
- (Output) Details about the state at this point in history.
- stateTime String
- Batch state details, such as a failure description if the state is FAILED.
- uuid String
- A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.
Look up Existing Batch Resource
Get an existing Batch resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: BatchState, opts?: CustomResourceOptions): Batch@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        batch_id: Optional[str] = None,
        create_time: Optional[str] = None,
        creator: Optional[str] = None,
        effective_labels: Optional[Mapping[str, str]] = None,
        environment_config: Optional[BatchEnvironmentConfigArgs] = None,
        labels: Optional[Mapping[str, str]] = None,
        location: Optional[str] = None,
        name: Optional[str] = None,
        operation: Optional[str] = None,
        project: Optional[str] = None,
        pulumi_labels: Optional[Mapping[str, str]] = None,
        pyspark_batch: Optional[BatchPysparkBatchArgs] = None,
        runtime_config: Optional[BatchRuntimeConfigArgs] = None,
        runtime_infos: Optional[Sequence[BatchRuntimeInfoArgs]] = None,
        spark_batch: Optional[BatchSparkBatchArgs] = None,
        spark_r_batch: Optional[BatchSparkRBatchArgs] = None,
        spark_sql_batch: Optional[BatchSparkSqlBatchArgs] = None,
        state: Optional[str] = None,
        state_histories: Optional[Sequence[BatchStateHistoryArgs]] = None,
        state_message: Optional[str] = None,
        state_time: Optional[str] = None,
        uuid: Optional[str] = None) -> Batchfunc GetBatch(ctx *Context, name string, id IDInput, state *BatchState, opts ...ResourceOption) (*Batch, error)public static Batch Get(string name, Input<string> id, BatchState? state, CustomResourceOptions? opts = null)public static Batch get(String name, Output<String> id, BatchState state, CustomResourceOptions options)resources:  _:    type: gcp:dataproc:Batch    get:      id: ${id}- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- BatchId string
- The ID to use for the batch, which will become the final component of the batch's resource name. This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.
- CreateTime string
- The time when the batch was created.
- Creator string
- The email address of the user who created the batch.
- EffectiveLabels Dictionary<string, string>
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- EnvironmentConfig BatchEnvironment Config 
- Environment configuration for the batch execution. Structure is documented below.
- Labels Dictionary<string, string>
- The labels to associate with this batch. - Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field - effective_labelsfor all of the labels present on the resource.
- Location string
- The location in which the batch will be created in.
- Name string
- The resource name of the batch.
- Operation string
- The resource name of the operation associated with this batch.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- PulumiLabels Dictionary<string, string>
- The combination of labels configured directly on the resource and default labels configured on the provider.
- PysparkBatch BatchPyspark Batch 
- PySpark batch config. Structure is documented below.
- RuntimeConfig BatchRuntime Config 
- Runtime configuration for the batch execution. Structure is documented below.
- RuntimeInfos List<BatchRuntime Info> 
- Runtime information about batch execution. Structure is documented below.
- SparkBatch BatchSpark Batch 
- Spark batch config. Structure is documented below.
- SparkRBatch BatchSpark RBatch 
- SparkR batch config. Structure is documented below.
- SparkSql BatchBatch Spark Sql Batch 
- Spark SQL batch config. Structure is documented below.
- State string
- (Output) The state of the batch at this point in history. For possible values, see the API documentation.
- StateHistories List<BatchState History> 
- Historical state information for the batch. Structure is documented below.
- StateMessage string
- (Output) Details about the state at this point in history.
- StateTime string
- Batch state details, such as a failure description if the state is FAILED.
- Uuid string
- A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.
- BatchId string
- The ID to use for the batch, which will become the final component of the batch's resource name. This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.
- CreateTime string
- The time when the batch was created.
- Creator string
- The email address of the user who created the batch.
- EffectiveLabels map[string]string
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- EnvironmentConfig BatchEnvironment Config Args 
- Environment configuration for the batch execution. Structure is documented below.
- Labels map[string]string
- The labels to associate with this batch. - Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field - effective_labelsfor all of the labels present on the resource.
- Location string
- The location in which the batch will be created in.
- Name string
- The resource name of the batch.
- Operation string
- The resource name of the operation associated with this batch.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- PulumiLabels map[string]string
- The combination of labels configured directly on the resource and default labels configured on the provider.
- PysparkBatch BatchPyspark Batch Args 
- PySpark batch config. Structure is documented below.
- RuntimeConfig BatchRuntime Config Args 
- Runtime configuration for the batch execution. Structure is documented below.
- RuntimeInfos []BatchRuntime Info Args 
- Runtime information about batch execution. Structure is documented below.
- SparkBatch BatchSpark Batch Args 
- Spark batch config. Structure is documented below.
- SparkRBatch BatchSpark RBatch Args 
- SparkR batch config. Structure is documented below.
- SparkSql BatchBatch Spark Sql Batch Args 
- Spark SQL batch config. Structure is documented below.
- State string
- (Output) The state of the batch at this point in history. For possible values, see the API documentation.
- StateHistories []BatchState History Args 
- Historical state information for the batch. Structure is documented below.
- StateMessage string
- (Output) Details about the state at this point in history.
- StateTime string
- Batch state details, such as a failure description if the state is FAILED.
- Uuid string
- A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.
- batchId String
- The ID to use for the batch, which will become the final component of the batch's resource name. This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.
- createTime String
- The time when the batch was created.
- creator String
- The email address of the user who created the batch.
- effectiveLabels Map<String,String>
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- environmentConfig BatchEnvironment Config 
- Environment configuration for the batch execution. Structure is documented below.
- labels Map<String,String>
- The labels to associate with this batch. - Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field - effective_labelsfor all of the labels present on the resource.
- location String
- The location in which the batch will be created in.
- name String
- The resource name of the batch.
- operation String
- The resource name of the operation associated with this batch.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- pulumiLabels Map<String,String>
- The combination of labels configured directly on the resource and default labels configured on the provider.
- pysparkBatch BatchPyspark Batch 
- PySpark batch config. Structure is documented below.
- runtimeConfig BatchRuntime Config 
- Runtime configuration for the batch execution. Structure is documented below.
- runtimeInfos List<BatchRuntime Info> 
- Runtime information about batch execution. Structure is documented below.
- sparkBatch BatchSpark Batch 
- Spark batch config. Structure is documented below.
- sparkRBatch BatchSpark RBatch 
- SparkR batch config. Structure is documented below.
- sparkSql BatchBatch Spark Sql Batch 
- Spark SQL batch config. Structure is documented below.
- state String
- (Output) The state of the batch at this point in history. For possible values, see the API documentation.
- stateHistories List<BatchState History> 
- Historical state information for the batch. Structure is documented below.
- stateMessage String
- (Output) Details about the state at this point in history.
- stateTime String
- Batch state details, such as a failure description if the state is FAILED.
- uuid String
- A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.
- batchId string
- The ID to use for the batch, which will become the final component of the batch's resource name. This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.
- createTime string
- The time when the batch was created.
- creator string
- The email address of the user who created the batch.
- effectiveLabels {[key: string]: string}
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- environmentConfig BatchEnvironment Config 
- Environment configuration for the batch execution. Structure is documented below.
- labels {[key: string]: string}
- The labels to associate with this batch. - Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field - effective_labelsfor all of the labels present on the resource.
- location string
- The location in which the batch will be created in.
- name string
- The resource name of the batch.
- operation string
- The resource name of the operation associated with this batch.
- project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- pulumiLabels {[key: string]: string}
- The combination of labels configured directly on the resource and default labels configured on the provider.
- pysparkBatch BatchPyspark Batch 
- PySpark batch config. Structure is documented below.
- runtimeConfig BatchRuntime Config 
- Runtime configuration for the batch execution. Structure is documented below.
- runtimeInfos BatchRuntime Info[] 
- Runtime information about batch execution. Structure is documented below.
- sparkBatch BatchSpark Batch 
- Spark batch config. Structure is documented below.
- sparkRBatch BatchSpark RBatch 
- SparkR batch config. Structure is documented below.
- sparkSql BatchBatch Spark Sql Batch 
- Spark SQL batch config. Structure is documented below.
- state string
- (Output) The state of the batch at this point in history. For possible values, see the API documentation.
- stateHistories BatchState History[] 
- Historical state information for the batch. Structure is documented below.
- stateMessage string
- (Output) Details about the state at this point in history.
- stateTime string
- Batch state details, such as a failure description if the state is FAILED.
- uuid string
- A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.
- batch_id str
- The ID to use for the batch, which will become the final component of the batch's resource name. This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.
- create_time str
- The time when the batch was created.
- creator str
- The email address of the user who created the batch.
- effective_labels Mapping[str, str]
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- environment_config BatchEnvironment Config Args 
- Environment configuration for the batch execution. Structure is documented below.
- labels Mapping[str, str]
- The labels to associate with this batch. - Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field - effective_labelsfor all of the labels present on the resource.
- location str
- The location in which the batch will be created in.
- name str
- The resource name of the batch.
- operation str
- The resource name of the operation associated with this batch.
- project str
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- pulumi_labels Mapping[str, str]
- The combination of labels configured directly on the resource and default labels configured on the provider.
- pyspark_batch BatchPyspark Batch Args 
- PySpark batch config. Structure is documented below.
- runtime_config BatchRuntime Config Args 
- Runtime configuration for the batch execution. Structure is documented below.
- runtime_infos Sequence[BatchRuntime Info Args] 
- Runtime information about batch execution. Structure is documented below.
- spark_batch BatchSpark Batch Args 
- Spark batch config. Structure is documented below.
- spark_r_ Batchbatch Spark RBatch Args 
- SparkR batch config. Structure is documented below.
- spark_sql_ Batchbatch Spark Sql Batch Args 
- Spark SQL batch config. Structure is documented below.
- state str
- (Output) The state of the batch at this point in history. For possible values, see the API documentation.
- state_histories Sequence[BatchState History Args] 
- Historical state information for the batch. Structure is documented below.
- state_message str
- (Output) Details about the state at this point in history.
- state_time str
- Batch state details, such as a failure description if the state is FAILED.
- uuid str
- A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.
- batchId String
- The ID to use for the batch, which will become the final component of the batch's resource name. This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.
- createTime String
- The time when the batch was created.
- creator String
- The email address of the user who created the batch.
- effectiveLabels Map<String>
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- environmentConfig Property Map
- Environment configuration for the batch execution. Structure is documented below.
- labels Map<String>
- The labels to associate with this batch. - Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field - effective_labelsfor all of the labels present on the resource.
- location String
- The location in which the batch will be created in.
- name String
- The resource name of the batch.
- operation String
- The resource name of the operation associated with this batch.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- pulumiLabels Map<String>
- The combination of labels configured directly on the resource and default labels configured on the provider.
- pysparkBatch Property Map
- PySpark batch config. Structure is documented below.
- runtimeConfig Property Map
- Runtime configuration for the batch execution. Structure is documented below.
- runtimeInfos List<Property Map>
- Runtime information about batch execution. Structure is documented below.
- sparkBatch Property Map
- Spark batch config. Structure is documented below.
- sparkRBatch Property Map
- SparkR batch config. Structure is documented below.
- sparkSql Property MapBatch 
- Spark SQL batch config. Structure is documented below.
- state String
- (Output) The state of the batch at this point in history. For possible values, see the API documentation.
- stateHistories List<Property Map>
- Historical state information for the batch. Structure is documented below.
- stateMessage String
- (Output) Details about the state at this point in history.
- stateTime String
- Batch state details, such as a failure description if the state is FAILED.
- uuid String
- A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.
Supporting Types
BatchEnvironmentConfig, BatchEnvironmentConfigArgs      
- ExecutionConfig BatchEnvironment Config Execution Config 
- Execution configuration for a workload. Structure is documented below.
- PeripheralsConfig BatchEnvironment Config Peripherals Config 
- Peripherals configuration that workload has access to. Structure is documented below.
- ExecutionConfig BatchEnvironment Config Execution Config 
- Execution configuration for a workload. Structure is documented below.
- PeripheralsConfig BatchEnvironment Config Peripherals Config 
- Peripherals configuration that workload has access to. Structure is documented below.
- executionConfig BatchEnvironment Config Execution Config 
- Execution configuration for a workload. Structure is documented below.
- peripheralsConfig BatchEnvironment Config Peripherals Config 
- Peripherals configuration that workload has access to. Structure is documented below.
- executionConfig BatchEnvironment Config Execution Config 
- Execution configuration for a workload. Structure is documented below.
- peripheralsConfig BatchEnvironment Config Peripherals Config 
- Peripherals configuration that workload has access to. Structure is documented below.
- execution_config BatchEnvironment Config Execution Config 
- Execution configuration for a workload. Structure is documented below.
- peripherals_config BatchEnvironment Config Peripherals Config 
- Peripherals configuration that workload has access to. Structure is documented below.
- executionConfig Property Map
- Execution configuration for a workload. Structure is documented below.
- peripheralsConfig Property Map
- Peripherals configuration that workload has access to. Structure is documented below.
BatchEnvironmentConfigExecutionConfig, BatchEnvironmentConfigExecutionConfigArgs          
- KmsKey string
- The Cloud KMS key to use for encryption.
- List<string>
- Tags used for network traffic control.
- NetworkUri string
- Network configuration for workload execution.
- ServiceAccount string
- Service account that used to execute workload.
- StagingBucket string
- A Cloud Storage bucket used to stage workload dependencies, config files, and store workload output and other ephemeral data, such as Spark history files. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location according to the region where your workload is running, and then create and manage project-level, per-location staging and temporary buckets. This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.
- SubnetworkUri string
- Subnetwork configuration for workload execution.
- Ttl string
- The duration after which the workload will be terminated. When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it exits naturally (or run forever without exiting). If ttl is not specified for an interactive session, it defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours. Minimum value is 10 minutes; maximum value is 14 days. If both ttl and idleTtl are specified (for an interactive session), the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idleTtl or when ttl has been exceeded, whichever occurs first.
- KmsKey string
- The Cloud KMS key to use for encryption.
- []string
- Tags used for network traffic control.
- NetworkUri string
- Network configuration for workload execution.
- ServiceAccount string
- Service account that used to execute workload.
- StagingBucket string
- A Cloud Storage bucket used to stage workload dependencies, config files, and store workload output and other ephemeral data, such as Spark history files. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location according to the region where your workload is running, and then create and manage project-level, per-location staging and temporary buckets. This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.
- SubnetworkUri string
- Subnetwork configuration for workload execution.
- Ttl string
- The duration after which the workload will be terminated. When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it exits naturally (or run forever without exiting). If ttl is not specified for an interactive session, it defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours. Minimum value is 10 minutes; maximum value is 14 days. If both ttl and idleTtl are specified (for an interactive session), the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idleTtl or when ttl has been exceeded, whichever occurs first.
- kmsKey String
- The Cloud KMS key to use for encryption.
- List<String>
- Tags used for network traffic control.
- networkUri String
- Network configuration for workload execution.
- serviceAccount String
- Service account that used to execute workload.
- stagingBucket String
- A Cloud Storage bucket used to stage workload dependencies, config files, and store workload output and other ephemeral data, such as Spark history files. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location according to the region where your workload is running, and then create and manage project-level, per-location staging and temporary buckets. This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.
- subnetworkUri String
- Subnetwork configuration for workload execution.
- ttl String
- The duration after which the workload will be terminated. When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it exits naturally (or run forever without exiting). If ttl is not specified for an interactive session, it defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours. Minimum value is 10 minutes; maximum value is 14 days. If both ttl and idleTtl are specified (for an interactive session), the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idleTtl or when ttl has been exceeded, whichever occurs first.
- kmsKey string
- The Cloud KMS key to use for encryption.
- string[]
- Tags used for network traffic control.
- networkUri string
- Network configuration for workload execution.
- serviceAccount string
- Service account that used to execute workload.
- stagingBucket string
- A Cloud Storage bucket used to stage workload dependencies, config files, and store workload output and other ephemeral data, such as Spark history files. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location according to the region where your workload is running, and then create and manage project-level, per-location staging and temporary buckets. This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.
- subnetworkUri string
- Subnetwork configuration for workload execution.
- ttl string
- The duration after which the workload will be terminated. When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it exits naturally (or run forever without exiting). If ttl is not specified for an interactive session, it defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours. Minimum value is 10 minutes; maximum value is 14 days. If both ttl and idleTtl are specified (for an interactive session), the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idleTtl or when ttl has been exceeded, whichever occurs first.
- kms_key str
- The Cloud KMS key to use for encryption.
- Sequence[str]
- Tags used for network traffic control.
- network_uri str
- Network configuration for workload execution.
- service_account str
- Service account that used to execute workload.
- staging_bucket str
- A Cloud Storage bucket used to stage workload dependencies, config files, and store workload output and other ephemeral data, such as Spark history files. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location according to the region where your workload is running, and then create and manage project-level, per-location staging and temporary buckets. This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.
- subnetwork_uri str
- Subnetwork configuration for workload execution.
- ttl str
- The duration after which the workload will be terminated. When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it exits naturally (or run forever without exiting). If ttl is not specified for an interactive session, it defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours. Minimum value is 10 minutes; maximum value is 14 days. If both ttl and idleTtl are specified (for an interactive session), the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idleTtl or when ttl has been exceeded, whichever occurs first.
- kmsKey String
- The Cloud KMS key to use for encryption.
- List<String>
- Tags used for network traffic control.
- networkUri String
- Network configuration for workload execution.
- serviceAccount String
- Service account that used to execute workload.
- stagingBucket String
- A Cloud Storage bucket used to stage workload dependencies, config files, and store workload output and other ephemeral data, such as Spark history files. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location according to the region where your workload is running, and then create and manage project-level, per-location staging and temporary buckets. This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.
- subnetworkUri String
- Subnetwork configuration for workload execution.
- ttl String
- The duration after which the workload will be terminated. When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it exits naturally (or run forever without exiting). If ttl is not specified for an interactive session, it defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours. Minimum value is 10 minutes; maximum value is 14 days. If both ttl and idleTtl are specified (for an interactive session), the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idleTtl or when ttl has been exceeded, whichever occurs first.
BatchEnvironmentConfigPeripheralsConfig, BatchEnvironmentConfigPeripheralsConfigArgs          
- MetastoreService string
- Resource name of an existing Dataproc Metastore service.
- SparkHistory BatchServer Config Environment Config Peripherals Config Spark History Server Config 
- The Spark History Server configuration for the workload. Structure is documented below.
- MetastoreService string
- Resource name of an existing Dataproc Metastore service.
- SparkHistory BatchServer Config Environment Config Peripherals Config Spark History Server Config 
- The Spark History Server configuration for the workload. Structure is documented below.
- metastoreService String
- Resource name of an existing Dataproc Metastore service.
- sparkHistory BatchServer Config Environment Config Peripherals Config Spark History Server Config 
- The Spark History Server configuration for the workload. Structure is documented below.
- metastoreService string
- Resource name of an existing Dataproc Metastore service.
- sparkHistory BatchServer Config Environment Config Peripherals Config Spark History Server Config 
- The Spark History Server configuration for the workload. Structure is documented below.
- metastore_service str
- Resource name of an existing Dataproc Metastore service.
- spark_history_ Batchserver_ config Environment Config Peripherals Config Spark History Server Config 
- The Spark History Server configuration for the workload. Structure is documented below.
- metastoreService String
- Resource name of an existing Dataproc Metastore service.
- sparkHistory Property MapServer Config 
- The Spark History Server configuration for the workload. Structure is documented below.
BatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfig, BatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfigArgs                  
- DataprocCluster string
- Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.
- DataprocCluster string
- Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.
- dataprocCluster String
- Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.
- dataprocCluster string
- Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.
- dataproc_cluster str
- Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.
- dataprocCluster String
- Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.
BatchPysparkBatch, BatchPysparkBatchArgs      
- ArchiveUris List<string>
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- Args List<string>
- The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
- FileUris List<string>
- HCFS URIs of files to be placed in the working directory of each executor.
- JarFile List<string>Uris 
- HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
- MainPython stringFile Uri 
- The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file.
- PythonFile List<string>Uris 
- HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
- ArchiveUris []string
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- Args []string
- The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
- FileUris []string
- HCFS URIs of files to be placed in the working directory of each executor.
- JarFile []stringUris 
- HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
- MainPython stringFile Uri 
- The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file.
- PythonFile []stringUris 
- HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
- archiveUris List<String>
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- args List<String>
- The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
- fileUris List<String>
- HCFS URIs of files to be placed in the working directory of each executor.
- jarFile List<String>Uris 
- HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
- mainPython StringFile Uri 
- The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file.
- pythonFile List<String>Uris 
- HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
- archiveUris string[]
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- args string[]
- The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
- fileUris string[]
- HCFS URIs of files to be placed in the working directory of each executor.
- jarFile string[]Uris 
- HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
- mainPython stringFile Uri 
- The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file.
- pythonFile string[]Uris 
- HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
- archive_uris Sequence[str]
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- args Sequence[str]
- The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
- file_uris Sequence[str]
- HCFS URIs of files to be placed in the working directory of each executor.
- jar_file_ Sequence[str]uris 
- HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
- main_python_ strfile_ uri 
- The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file.
- python_file_ Sequence[str]uris 
- HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
- archiveUris List<String>
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- args List<String>
- The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
- fileUris List<String>
- HCFS URIs of files to be placed in the working directory of each executor.
- jarFile List<String>Uris 
- HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
- mainPython StringFile Uri 
- The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file.
- pythonFile List<String>Uris 
- HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
BatchRuntimeConfig, BatchRuntimeConfigArgs      
- AutotuningConfig BatchRuntime Config Autotuning Config 
- Optional. Autotuning configuration of the workload. Structure is documented below.
- Cohort string
- Optional. Cohort identifier. Identifies families of the workloads having the same shape, e.g. daily ETL jobs.
- ContainerImage string
- Optional custom container image for the job runtime environment. If not specified, a default container image will be used.
- EffectiveProperties Dictionary<string, string>
- (Output) A mapping of property names to values, which are used to configure workload execution.
- Properties Dictionary<string, string>
- A mapping of property names to values, which are used to configure workload execution.
- Version string
- Version of the batch runtime.
- AutotuningConfig BatchRuntime Config Autotuning Config 
- Optional. Autotuning configuration of the workload. Structure is documented below.
- Cohort string
- Optional. Cohort identifier. Identifies families of the workloads having the same shape, e.g. daily ETL jobs.
- ContainerImage string
- Optional custom container image for the job runtime environment. If not specified, a default container image will be used.
- EffectiveProperties map[string]string
- (Output) A mapping of property names to values, which are used to configure workload execution.
- Properties map[string]string
- A mapping of property names to values, which are used to configure workload execution.
- Version string
- Version of the batch runtime.
- autotuningConfig BatchRuntime Config Autotuning Config 
- Optional. Autotuning configuration of the workload. Structure is documented below.
- cohort String
- Optional. Cohort identifier. Identifies families of the workloads having the same shape, e.g. daily ETL jobs.
- containerImage String
- Optional custom container image for the job runtime environment. If not specified, a default container image will be used.
- effectiveProperties Map<String,String>
- (Output) A mapping of property names to values, which are used to configure workload execution.
- properties Map<String,String>
- A mapping of property names to values, which are used to configure workload execution.
- version String
- Version of the batch runtime.
- autotuningConfig BatchRuntime Config Autotuning Config 
- Optional. Autotuning configuration of the workload. Structure is documented below.
- cohort string
- Optional. Cohort identifier. Identifies families of the workloads having the same shape, e.g. daily ETL jobs.
- containerImage string
- Optional custom container image for the job runtime environment. If not specified, a default container image will be used.
- effectiveProperties {[key: string]: string}
- (Output) A mapping of property names to values, which are used to configure workload execution.
- properties {[key: string]: string}
- A mapping of property names to values, which are used to configure workload execution.
- version string
- Version of the batch runtime.
- autotuning_config BatchRuntime Config Autotuning Config 
- Optional. Autotuning configuration of the workload. Structure is documented below.
- cohort str
- Optional. Cohort identifier. Identifies families of the workloads having the same shape, e.g. daily ETL jobs.
- container_image str
- Optional custom container image for the job runtime environment. If not specified, a default container image will be used.
- effective_properties Mapping[str, str]
- (Output) A mapping of property names to values, which are used to configure workload execution.
- properties Mapping[str, str]
- A mapping of property names to values, which are used to configure workload execution.
- version str
- Version of the batch runtime.
- autotuningConfig Property Map
- Optional. Autotuning configuration of the workload. Structure is documented below.
- cohort String
- Optional. Cohort identifier. Identifies families of the workloads having the same shape, e.g. daily ETL jobs.
- containerImage String
- Optional custom container image for the job runtime environment. If not specified, a default container image will be used.
- effectiveProperties Map<String>
- (Output) A mapping of property names to values, which are used to configure workload execution.
- properties Map<String>
- A mapping of property names to values, which are used to configure workload execution.
- version String
- Version of the batch runtime.
BatchRuntimeConfigAutotuningConfig, BatchRuntimeConfigAutotuningConfigArgs          
- Scenarios List<string>
- Optional. Scenarios for which tunings are applied.
Each value may be one of: SCALING,BROADCAST_HASH_JOIN,MEMORY.
- Scenarios []string
- Optional. Scenarios for which tunings are applied.
Each value may be one of: SCALING,BROADCAST_HASH_JOIN,MEMORY.
- scenarios List<String>
- Optional. Scenarios for which tunings are applied.
Each value may be one of: SCALING,BROADCAST_HASH_JOIN,MEMORY.
- scenarios string[]
- Optional. Scenarios for which tunings are applied.
Each value may be one of: SCALING,BROADCAST_HASH_JOIN,MEMORY.
- scenarios Sequence[str]
- Optional. Scenarios for which tunings are applied.
Each value may be one of: SCALING,BROADCAST_HASH_JOIN,MEMORY.
- scenarios List<String>
- Optional. Scenarios for which tunings are applied.
Each value may be one of: SCALING,BROADCAST_HASH_JOIN,MEMORY.
BatchRuntimeInfo, BatchRuntimeInfoArgs      
- ApproximateUsages List<BatchRuntime Info Approximate Usage> 
- (Output) Approximate workload resource usage, calculated when the workload completes(see Dataproc Serverless pricing) Structure is documented below.
- CurrentUsages List<BatchRuntime Info Current Usage> 
- (Output) Snapshot of current workload resource usage(see Dataproc Serverless pricing) Structure is documented below.
- DiagnosticOutput stringUri 
- (Output) A URI pointing to the location of the diagnostics tarball.
- Endpoints Dictionary<string, string>
- (Output) Map of remote access endpoints (such as web interfaces and APIs) to their URIs.
- OutputUri string
- (Output) A URI pointing to the location of the stdout and stderr of the workload.
- ApproximateUsages []BatchRuntime Info Approximate Usage 
- (Output) Approximate workload resource usage, calculated when the workload completes(see Dataproc Serverless pricing) Structure is documented below.
- CurrentUsages []BatchRuntime Info Current Usage 
- (Output) Snapshot of current workload resource usage(see Dataproc Serverless pricing) Structure is documented below.
- DiagnosticOutput stringUri 
- (Output) A URI pointing to the location of the diagnostics tarball.
- Endpoints map[string]string
- (Output) Map of remote access endpoints (such as web interfaces and APIs) to their URIs.
- OutputUri string
- (Output) A URI pointing to the location of the stdout and stderr of the workload.
- approximateUsages List<BatchRuntime Info Approximate Usage> 
- (Output) Approximate workload resource usage, calculated when the workload completes(see Dataproc Serverless pricing) Structure is documented below.
- currentUsages List<BatchRuntime Info Current Usage> 
- (Output) Snapshot of current workload resource usage(see Dataproc Serverless pricing) Structure is documented below.
- diagnosticOutput StringUri 
- (Output) A URI pointing to the location of the diagnostics tarball.
- endpoints Map<String,String>
- (Output) Map of remote access endpoints (such as web interfaces and APIs) to their URIs.
- outputUri String
- (Output) A URI pointing to the location of the stdout and stderr of the workload.
- approximateUsages BatchRuntime Info Approximate Usage[] 
- (Output) Approximate workload resource usage, calculated when the workload completes(see Dataproc Serverless pricing) Structure is documented below.
- currentUsages BatchRuntime Info Current Usage[] 
- (Output) Snapshot of current workload resource usage(see Dataproc Serverless pricing) Structure is documented below.
- diagnosticOutput stringUri 
- (Output) A URI pointing to the location of the diagnostics tarball.
- endpoints {[key: string]: string}
- (Output) Map of remote access endpoints (such as web interfaces and APIs) to their URIs.
- outputUri string
- (Output) A URI pointing to the location of the stdout and stderr of the workload.
- approximate_usages Sequence[BatchRuntime Info Approximate Usage] 
- (Output) Approximate workload resource usage, calculated when the workload completes(see Dataproc Serverless pricing) Structure is documented below.
- current_usages Sequence[BatchRuntime Info Current Usage] 
- (Output) Snapshot of current workload resource usage(see Dataproc Serverless pricing) Structure is documented below.
- diagnostic_output_ struri 
- (Output) A URI pointing to the location of the diagnostics tarball.
- endpoints Mapping[str, str]
- (Output) Map of remote access endpoints (such as web interfaces and APIs) to their URIs.
- output_uri str
- (Output) A URI pointing to the location of the stdout and stderr of the workload.
- approximateUsages List<Property Map>
- (Output) Approximate workload resource usage, calculated when the workload completes(see Dataproc Serverless pricing) Structure is documented below.
- currentUsages List<Property Map>
- (Output) Snapshot of current workload resource usage(see Dataproc Serverless pricing) Structure is documented below.
- diagnosticOutput StringUri 
- (Output) A URI pointing to the location of the diagnostics tarball.
- endpoints Map<String>
- (Output) Map of remote access endpoints (such as web interfaces and APIs) to their URIs.
- outputUri String
- (Output) A URI pointing to the location of the stdout and stderr of the workload.
BatchRuntimeInfoApproximateUsage, BatchRuntimeInfoApproximateUsageArgs          
- AcceleratorType string
- (Output) Accelerator type being used, if any.
- MilliAccelerator stringSeconds 
- (Output) Accelerator usage in (milliAccelerator x seconds)
- MilliDcu stringSeconds 
- (Output) DCU (Dataproc Compute Units) usage in (milliDCU x seconds)
- ShuffleStorage stringGb Seconds 
- (Output) Shuffle storage usage in (GB x seconds)
- AcceleratorType string
- (Output) Accelerator type being used, if any.
- MilliAccelerator stringSeconds 
- (Output) Accelerator usage in (milliAccelerator x seconds)
- MilliDcu stringSeconds 
- (Output) DCU (Dataproc Compute Units) usage in (milliDCU x seconds)
- ShuffleStorage stringGb Seconds 
- (Output) Shuffle storage usage in (GB x seconds)
- acceleratorType String
- (Output) Accelerator type being used, if any.
- milliAccelerator StringSeconds 
- (Output) Accelerator usage in (milliAccelerator x seconds)
- milliDcu StringSeconds 
- (Output) DCU (Dataproc Compute Units) usage in (milliDCU x seconds)
- shuffleStorage StringGb Seconds 
- (Output) Shuffle storage usage in (GB x seconds)
- acceleratorType string
- (Output) Accelerator type being used, if any.
- milliAccelerator stringSeconds 
- (Output) Accelerator usage in (milliAccelerator x seconds)
- milliDcu stringSeconds 
- (Output) DCU (Dataproc Compute Units) usage in (milliDCU x seconds)
- shuffleStorage stringGb Seconds 
- (Output) Shuffle storage usage in (GB x seconds)
- accelerator_type str
- (Output) Accelerator type being used, if any.
- milli_accelerator_ strseconds 
- (Output) Accelerator usage in (milliAccelerator x seconds)
- milli_dcu_ strseconds 
- (Output) DCU (Dataproc Compute Units) usage in (milliDCU x seconds)
- shuffle_storage_ strgb_ seconds 
- (Output) Shuffle storage usage in (GB x seconds)
- acceleratorType String
- (Output) Accelerator type being used, if any.
- milliAccelerator StringSeconds 
- (Output) Accelerator usage in (milliAccelerator x seconds)
- milliDcu StringSeconds 
- (Output) DCU (Dataproc Compute Units) usage in (milliDCU x seconds)
- shuffleStorage StringGb Seconds 
- (Output) Shuffle storage usage in (GB x seconds)
BatchRuntimeInfoCurrentUsage, BatchRuntimeInfoCurrentUsageArgs          
- AcceleratorType string
- (Output) Accelerator type being used, if any.
- MilliAccelerator string
- (Output) Milli (one-thousandth) accelerator..
- MilliDcu string
- (Output) Milli (one-thousandth) Dataproc Compute Units (DCUs).
- string
- (Output) Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier.
- ShuffleStorage stringGb 
- (Output) Shuffle Storage in gigabytes (GB).
- string
- (Output) Shuffle Storage in gigabytes (GB) charged at premium tier.
- SnapshotTime string
- (Output) The timestamp of the usage snapshot.
- AcceleratorType string
- (Output) Accelerator type being used, if any.
- MilliAccelerator string
- (Output) Milli (one-thousandth) accelerator..
- MilliDcu string
- (Output) Milli (one-thousandth) Dataproc Compute Units (DCUs).
- string
- (Output) Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier.
- ShuffleStorage stringGb 
- (Output) Shuffle Storage in gigabytes (GB).
- string
- (Output) Shuffle Storage in gigabytes (GB) charged at premium tier.
- SnapshotTime string
- (Output) The timestamp of the usage snapshot.
- acceleratorType String
- (Output) Accelerator type being used, if any.
- milliAccelerator String
- (Output) Milli (one-thousandth) accelerator..
- milliDcu String
- (Output) Milli (one-thousandth) Dataproc Compute Units (DCUs).
- String
- (Output) Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier.
- shuffleStorage StringGb 
- (Output) Shuffle Storage in gigabytes (GB).
- String
- (Output) Shuffle Storage in gigabytes (GB) charged at premium tier.
- snapshotTime String
- (Output) The timestamp of the usage snapshot.
- acceleratorType string
- (Output) Accelerator type being used, if any.
- milliAccelerator string
- (Output) Milli (one-thousandth) accelerator..
- milliDcu string
- (Output) Milli (one-thousandth) Dataproc Compute Units (DCUs).
- string
- (Output) Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier.
- shuffleStorage stringGb 
- (Output) Shuffle Storage in gigabytes (GB).
- string
- (Output) Shuffle Storage in gigabytes (GB) charged at premium tier.
- snapshotTime string
- (Output) The timestamp of the usage snapshot.
- accelerator_type str
- (Output) Accelerator type being used, if any.
- milli_accelerator str
- (Output) Milli (one-thousandth) accelerator..
- milli_dcu str
- (Output) Milli (one-thousandth) Dataproc Compute Units (DCUs).
- str
- (Output) Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier.
- shuffle_storage_ strgb 
- (Output) Shuffle Storage in gigabytes (GB).
- str
- (Output) Shuffle Storage in gigabytes (GB) charged at premium tier.
- snapshot_time str
- (Output) The timestamp of the usage snapshot.
- acceleratorType String
- (Output) Accelerator type being used, if any.
- milliAccelerator String
- (Output) Milli (one-thousandth) accelerator..
- milliDcu String
- (Output) Milli (one-thousandth) Dataproc Compute Units (DCUs).
- String
- (Output) Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier.
- shuffleStorage StringGb 
- (Output) Shuffle Storage in gigabytes (GB).
- String
- (Output) Shuffle Storage in gigabytes (GB) charged at premium tier.
- snapshotTime String
- (Output) The timestamp of the usage snapshot.
BatchSparkBatch, BatchSparkBatchArgs      
- ArchiveUris List<string>
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- Args List<string>
- The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
- FileUris List<string>
- HCFS URIs of files to be placed in the working directory of each executor.
- JarFile List<string>Uris 
- HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
- MainClass string
- The name of the driver main class. The jar file that contains the class must be in the classpath or specified in jarFileUris.
- MainJar stringFile Uri 
- The HCFS URI of the jar file that contains the main class.
- ArchiveUris []string
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- Args []string
- The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
- FileUris []string
- HCFS URIs of files to be placed in the working directory of each executor.
- JarFile []stringUris 
- HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
- MainClass string
- The name of the driver main class. The jar file that contains the class must be in the classpath or specified in jarFileUris.
- MainJar stringFile Uri 
- The HCFS URI of the jar file that contains the main class.
- archiveUris List<String>
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- args List<String>
- The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
- fileUris List<String>
- HCFS URIs of files to be placed in the working directory of each executor.
- jarFile List<String>Uris 
- HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
- mainClass String
- The name of the driver main class. The jar file that contains the class must be in the classpath or specified in jarFileUris.
- mainJar StringFile Uri 
- The HCFS URI of the jar file that contains the main class.
- archiveUris string[]
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- args string[]
- The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
- fileUris string[]
- HCFS URIs of files to be placed in the working directory of each executor.
- jarFile string[]Uris 
- HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
- mainClass string
- The name of the driver main class. The jar file that contains the class must be in the classpath or specified in jarFileUris.
- mainJar stringFile Uri 
- The HCFS URI of the jar file that contains the main class.
- archive_uris Sequence[str]
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- args Sequence[str]
- The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
- file_uris Sequence[str]
- HCFS URIs of files to be placed in the working directory of each executor.
- jar_file_ Sequence[str]uris 
- HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
- main_class str
- The name of the driver main class. The jar file that contains the class must be in the classpath or specified in jarFileUris.
- main_jar_ strfile_ uri 
- The HCFS URI of the jar file that contains the main class.
- archiveUris List<String>
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- args List<String>
- The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
- fileUris List<String>
- HCFS URIs of files to be placed in the working directory of each executor.
- jarFile List<String>Uris 
- HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
- mainClass String
- The name of the driver main class. The jar file that contains the class must be in the classpath or specified in jarFileUris.
- mainJar StringFile Uri 
- The HCFS URI of the jar file that contains the main class.
BatchSparkRBatch, BatchSparkRBatchArgs      
- ArchiveUris List<string>
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- Args List<string>
- The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
- FileUris List<string>
- HCFS URIs of files to be placed in the working directory of each executor.
- MainRFile stringUri 
- The HCFS URI of the main R file to use as the driver. Must be a .R or .r file.
- ArchiveUris []string
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- Args []string
- The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
- FileUris []string
- HCFS URIs of files to be placed in the working directory of each executor.
- MainRFile stringUri 
- The HCFS URI of the main R file to use as the driver. Must be a .R or .r file.
- archiveUris List<String>
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- args List<String>
- The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
- fileUris List<String>
- HCFS URIs of files to be placed in the working directory of each executor.
- mainRFile StringUri 
- The HCFS URI of the main R file to use as the driver. Must be a .R or .r file.
- archiveUris string[]
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- args string[]
- The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
- fileUris string[]
- HCFS URIs of files to be placed in the working directory of each executor.
- mainRFile stringUri 
- The HCFS URI of the main R file to use as the driver. Must be a .R or .r file.
- archive_uris Sequence[str]
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- args Sequence[str]
- The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
- file_uris Sequence[str]
- HCFS URIs of files to be placed in the working directory of each executor.
- main_r_ strfile_ uri 
- The HCFS URI of the main R file to use as the driver. Must be a .R or .r file.
- archiveUris List<String>
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- args List<String>
- The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
- fileUris List<String>
- HCFS URIs of files to be placed in the working directory of each executor.
- mainRFile StringUri 
- The HCFS URI of the main R file to use as the driver. Must be a .R or .r file.
BatchSparkSqlBatch, BatchSparkSqlBatchArgs        
- JarFile List<string>Uris 
- HCFS URIs of jar files to be added to the Spark CLASSPATH.
- QueryFile stringUri 
- The HCFS URI of the script that contains Spark SQL queries to execute.
- QueryVariables Dictionary<string, string>
- Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).
- JarFile []stringUris 
- HCFS URIs of jar files to be added to the Spark CLASSPATH.
- QueryFile stringUri 
- The HCFS URI of the script that contains Spark SQL queries to execute.
- QueryVariables map[string]string
- Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).
- jarFile List<String>Uris 
- HCFS URIs of jar files to be added to the Spark CLASSPATH.
- queryFile StringUri 
- The HCFS URI of the script that contains Spark SQL queries to execute.
- queryVariables Map<String,String>
- Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).
- jarFile string[]Uris 
- HCFS URIs of jar files to be added to the Spark CLASSPATH.
- queryFile stringUri 
- The HCFS URI of the script that contains Spark SQL queries to execute.
- queryVariables {[key: string]: string}
- Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).
- jar_file_ Sequence[str]uris 
- HCFS URIs of jar files to be added to the Spark CLASSPATH.
- query_file_ struri 
- The HCFS URI of the script that contains Spark SQL queries to execute.
- query_variables Mapping[str, str]
- Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).
- jarFile List<String>Uris 
- HCFS URIs of jar files to be added to the Spark CLASSPATH.
- queryFile StringUri 
- The HCFS URI of the script that contains Spark SQL queries to execute.
- queryVariables Map<String>
- Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).
BatchStateHistory, BatchStateHistoryArgs      
- State string
- (Output) The state of the batch at this point in history. For possible values, see the API documentation.
- StateMessage string
- (Output) Details about the state at this point in history.
- StateStart stringTime 
- (Output) The time when the batch entered the historical state.
- State string
- (Output) The state of the batch at this point in history. For possible values, see the API documentation.
- StateMessage string
- (Output) Details about the state at this point in history.
- StateStart stringTime 
- (Output) The time when the batch entered the historical state.
- state String
- (Output) The state of the batch at this point in history. For possible values, see the API documentation.
- stateMessage String
- (Output) Details about the state at this point in history.
- stateStart StringTime 
- (Output) The time when the batch entered the historical state.
- state string
- (Output) The state of the batch at this point in history. For possible values, see the API documentation.
- stateMessage string
- (Output) Details about the state at this point in history.
- stateStart stringTime 
- (Output) The time when the batch entered the historical state.
- state str
- (Output) The state of the batch at this point in history. For possible values, see the API documentation.
- state_message str
- (Output) Details about the state at this point in history.
- state_start_ strtime 
- (Output) The time when the batch entered the historical state.
- state String
- (Output) The state of the batch at this point in history. For possible values, see the API documentation.
- stateMessage String
- (Output) Details about the state at this point in history.
- stateStart StringTime 
- (Output) The time when the batch entered the historical state.
Import
Batch can be imported using any of these accepted formats:
- projects/{{project}}/locations/{{location}}/batches/{{batch_id}}
- {{project}}/{{location}}/{{batch_id}}
- {{location}}/{{batch_id}}
When using the pulumi import command, Batch can be imported using one of the formats above. For example:
$ pulumi import gcp:dataproc/batch:Batch default projects/{{project}}/locations/{{location}}/batches/{{batch_id}}
$ pulumi import gcp:dataproc/batch:Batch default {{project}}/{{location}}/{{batch_id}}
$ pulumi import gcp:dataproc/batch:Batch default {{location}}/{{batch_id}}
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Google Cloud (GCP) Classic pulumi/pulumi-gcp
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the google-betaTerraform Provider.