gcp.dataproc.Cluster
Explore with Pulumi AI
Manages a Cloud Dataproc cluster resource within GCP.
- API documentation
- How-to Guides
!> Warning: Due to limitations of the API, all arguments except
labels,cluster_config.worker_config.num_instances and cluster_config.preemptible_worker_config.num_instances are non-updatable. Changing cluster_config.worker_config.min_num_instances will be ignored. Changing others will cause recreation of the
whole cluster!
Example Usage
Basic
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const simplecluster = new gcp.dataproc.Cluster("simplecluster", {
    name: "simplecluster",
    region: "us-central1",
});
import pulumi
import pulumi_gcp as gcp
simplecluster = gcp.dataproc.Cluster("simplecluster",
    name="simplecluster",
    region="us-central1")
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewCluster(ctx, "simplecluster", &dataproc.ClusterArgs{
			Name:   pulumi.String("simplecluster"),
			Region: pulumi.String("us-central1"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var simplecluster = new Gcp.Dataproc.Cluster("simplecluster", new()
    {
        Name = "simplecluster",
        Region = "us-central1",
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataproc.Cluster;
import com.pulumi.gcp.dataproc.ClusterArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var simplecluster = new Cluster("simplecluster", ClusterArgs.builder()
            .name("simplecluster")
            .region("us-central1")
            .build());
    }
}
resources:
  simplecluster:
    type: gcp:dataproc:Cluster
    properties:
      name: simplecluster
      region: us-central1
Advanced
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const _default = new gcp.serviceaccount.Account("default", {
    accountId: "service-account-id",
    displayName: "Service Account",
});
const mycluster = new gcp.dataproc.Cluster("mycluster", {
    name: "mycluster",
    region: "us-central1",
    gracefulDecommissionTimeout: "120s",
    labels: {
        foo: "bar",
    },
    clusterConfig: {
        stagingBucket: "dataproc-staging-bucket",
        masterConfig: {
            numInstances: 1,
            machineType: "e2-medium",
            diskConfig: {
                bootDiskType: "pd-ssd",
                bootDiskSizeGb: 30,
            },
        },
        workerConfig: {
            numInstances: 2,
            machineType: "e2-medium",
            minCpuPlatform: "Intel Skylake",
            diskConfig: {
                bootDiskSizeGb: 30,
                numLocalSsds: 1,
            },
        },
        preemptibleWorkerConfig: {
            numInstances: 0,
        },
        softwareConfig: {
            imageVersion: "2.0.35-debian10",
            overrideProperties: {
                "dataproc:dataproc.allow.zero.workers": "true",
            },
        },
        gceClusterConfig: {
            tags: [
                "foo",
                "bar",
            ],
            serviceAccount: _default.email,
            serviceAccountScopes: ["cloud-platform"],
        },
        initializationActions: [{
            script: "gs://dataproc-initialization-actions/stackdriver/stackdriver.sh",
            timeoutSec: 500,
        }],
    },
});
import pulumi
import pulumi_gcp as gcp
default = gcp.serviceaccount.Account("default",
    account_id="service-account-id",
    display_name="Service Account")
mycluster = gcp.dataproc.Cluster("mycluster",
    name="mycluster",
    region="us-central1",
    graceful_decommission_timeout="120s",
    labels={
        "foo": "bar",
    },
    cluster_config={
        "staging_bucket": "dataproc-staging-bucket",
        "master_config": {
            "num_instances": 1,
            "machine_type": "e2-medium",
            "disk_config": {
                "boot_disk_type": "pd-ssd",
                "boot_disk_size_gb": 30,
            },
        },
        "worker_config": {
            "num_instances": 2,
            "machine_type": "e2-medium",
            "min_cpu_platform": "Intel Skylake",
            "disk_config": {
                "boot_disk_size_gb": 30,
                "num_local_ssds": 1,
            },
        },
        "preemptible_worker_config": {
            "num_instances": 0,
        },
        "software_config": {
            "image_version": "2.0.35-debian10",
            "override_properties": {
                "dataproc:dataproc.allow.zero.workers": "true",
            },
        },
        "gce_cluster_config": {
            "tags": [
                "foo",
                "bar",
            ],
            "service_account": default.email,
            "service_account_scopes": ["cloud-platform"],
        },
        "initialization_actions": [{
            "script": "gs://dataproc-initialization-actions/stackdriver/stackdriver.sh",
            "timeout_sec": 500,
        }],
    })
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/serviceaccount"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_default, err := serviceaccount.NewAccount(ctx, "default", &serviceaccount.AccountArgs{
			AccountId:   pulumi.String("service-account-id"),
			DisplayName: pulumi.String("Service Account"),
		})
		if err != nil {
			return err
		}
		_, err = dataproc.NewCluster(ctx, "mycluster", &dataproc.ClusterArgs{
			Name:                        pulumi.String("mycluster"),
			Region:                      pulumi.String("us-central1"),
			GracefulDecommissionTimeout: pulumi.String("120s"),
			Labels: pulumi.StringMap{
				"foo": pulumi.String("bar"),
			},
			ClusterConfig: &dataproc.ClusterClusterConfigArgs{
				StagingBucket: pulumi.String("dataproc-staging-bucket"),
				MasterConfig: &dataproc.ClusterClusterConfigMasterConfigArgs{
					NumInstances: pulumi.Int(1),
					MachineType:  pulumi.String("e2-medium"),
					DiskConfig: &dataproc.ClusterClusterConfigMasterConfigDiskConfigArgs{
						BootDiskType:   pulumi.String("pd-ssd"),
						BootDiskSizeGb: pulumi.Int(30),
					},
				},
				WorkerConfig: &dataproc.ClusterClusterConfigWorkerConfigArgs{
					NumInstances:   pulumi.Int(2),
					MachineType:    pulumi.String("e2-medium"),
					MinCpuPlatform: pulumi.String("Intel Skylake"),
					DiskConfig: &dataproc.ClusterClusterConfigWorkerConfigDiskConfigArgs{
						BootDiskSizeGb: pulumi.Int(30),
						NumLocalSsds:   pulumi.Int(1),
					},
				},
				PreemptibleWorkerConfig: &dataproc.ClusterClusterConfigPreemptibleWorkerConfigArgs{
					NumInstances: pulumi.Int(0),
				},
				SoftwareConfig: &dataproc.ClusterClusterConfigSoftwareConfigArgs{
					ImageVersion: pulumi.String("2.0.35-debian10"),
					OverrideProperties: pulumi.StringMap{
						"dataproc:dataproc.allow.zero.workers": pulumi.String("true"),
					},
				},
				GceClusterConfig: &dataproc.ClusterClusterConfigGceClusterConfigArgs{
					Tags: pulumi.StringArray{
						pulumi.String("foo"),
						pulumi.String("bar"),
					},
					ServiceAccount: _default.Email,
					ServiceAccountScopes: pulumi.StringArray{
						pulumi.String("cloud-platform"),
					},
				},
				InitializationActions: dataproc.ClusterClusterConfigInitializationActionArray{
					&dataproc.ClusterClusterConfigInitializationActionArgs{
						Script:     pulumi.String("gs://dataproc-initialization-actions/stackdriver/stackdriver.sh"),
						TimeoutSec: pulumi.Int(500),
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var @default = new Gcp.ServiceAccount.Account("default", new()
    {
        AccountId = "service-account-id",
        DisplayName = "Service Account",
    });
    var mycluster = new Gcp.Dataproc.Cluster("mycluster", new()
    {
        Name = "mycluster",
        Region = "us-central1",
        GracefulDecommissionTimeout = "120s",
        Labels = 
        {
            { "foo", "bar" },
        },
        ClusterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigArgs
        {
            StagingBucket = "dataproc-staging-bucket",
            MasterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigMasterConfigArgs
            {
                NumInstances = 1,
                MachineType = "e2-medium",
                DiskConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigMasterConfigDiskConfigArgs
                {
                    BootDiskType = "pd-ssd",
                    BootDiskSizeGb = 30,
                },
            },
            WorkerConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigWorkerConfigArgs
            {
                NumInstances = 2,
                MachineType = "e2-medium",
                MinCpuPlatform = "Intel Skylake",
                DiskConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigWorkerConfigDiskConfigArgs
                {
                    BootDiskSizeGb = 30,
                    NumLocalSsds = 1,
                },
            },
            PreemptibleWorkerConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigPreemptibleWorkerConfigArgs
            {
                NumInstances = 0,
            },
            SoftwareConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigSoftwareConfigArgs
            {
                ImageVersion = "2.0.35-debian10",
                OverrideProperties = 
                {
                    { "dataproc:dataproc.allow.zero.workers", "true" },
                },
            },
            GceClusterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigGceClusterConfigArgs
            {
                Tags = new[]
                {
                    "foo",
                    "bar",
                },
                ServiceAccount = @default.Email,
                ServiceAccountScopes = new[]
                {
                    "cloud-platform",
                },
            },
            InitializationActions = new[]
            {
                new Gcp.Dataproc.Inputs.ClusterClusterConfigInitializationActionArgs
                {
                    Script = "gs://dataproc-initialization-actions/stackdriver/stackdriver.sh",
                    TimeoutSec = 500,
                },
            },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.serviceaccount.Account;
import com.pulumi.gcp.serviceaccount.AccountArgs;
import com.pulumi.gcp.dataproc.Cluster;
import com.pulumi.gcp.dataproc.ClusterArgs;
import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigArgs;
import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigMasterConfigArgs;
import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigMasterConfigDiskConfigArgs;
import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigWorkerConfigArgs;
import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigWorkerConfigDiskConfigArgs;
import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigPreemptibleWorkerConfigArgs;
import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigSoftwareConfigArgs;
import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigGceClusterConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var default_ = new Account("default", AccountArgs.builder()
            .accountId("service-account-id")
            .displayName("Service Account")
            .build());
        var mycluster = new Cluster("mycluster", ClusterArgs.builder()
            .name("mycluster")
            .region("us-central1")
            .gracefulDecommissionTimeout("120s")
            .labels(Map.of("foo", "bar"))
            .clusterConfig(ClusterClusterConfigArgs.builder()
                .stagingBucket("dataproc-staging-bucket")
                .masterConfig(ClusterClusterConfigMasterConfigArgs.builder()
                    .numInstances(1)
                    .machineType("e2-medium")
                    .diskConfig(ClusterClusterConfigMasterConfigDiskConfigArgs.builder()
                        .bootDiskType("pd-ssd")
                        .bootDiskSizeGb(30)
                        .build())
                    .build())
                .workerConfig(ClusterClusterConfigWorkerConfigArgs.builder()
                    .numInstances(2)
                    .machineType("e2-medium")
                    .minCpuPlatform("Intel Skylake")
                    .diskConfig(ClusterClusterConfigWorkerConfigDiskConfigArgs.builder()
                        .bootDiskSizeGb(30)
                        .numLocalSsds(1)
                        .build())
                    .build())
                .preemptibleWorkerConfig(ClusterClusterConfigPreemptibleWorkerConfigArgs.builder()
                    .numInstances(0)
                    .build())
                .softwareConfig(ClusterClusterConfigSoftwareConfigArgs.builder()
                    .imageVersion("2.0.35-debian10")
                    .overrideProperties(Map.of("dataproc:dataproc.allow.zero.workers", "true"))
                    .build())
                .gceClusterConfig(ClusterClusterConfigGceClusterConfigArgs.builder()
                    .tags(                    
                        "foo",
                        "bar")
                    .serviceAccount(default_.email())
                    .serviceAccountScopes("cloud-platform")
                    .build())
                .initializationActions(ClusterClusterConfigInitializationActionArgs.builder()
                    .script("gs://dataproc-initialization-actions/stackdriver/stackdriver.sh")
                    .timeoutSec(500)
                    .build())
                .build())
            .build());
    }
}
resources:
  default:
    type: gcp:serviceaccount:Account
    properties:
      accountId: service-account-id
      displayName: Service Account
  mycluster:
    type: gcp:dataproc:Cluster
    properties:
      name: mycluster
      region: us-central1
      gracefulDecommissionTimeout: 120s
      labels:
        foo: bar
      clusterConfig:
        stagingBucket: dataproc-staging-bucket
        masterConfig:
          numInstances: 1
          machineType: e2-medium
          diskConfig:
            bootDiskType: pd-ssd
            bootDiskSizeGb: 30
        workerConfig:
          numInstances: 2
          machineType: e2-medium
          minCpuPlatform: Intel Skylake
          diskConfig:
            bootDiskSizeGb: 30
            numLocalSsds: 1
        preemptibleWorkerConfig:
          numInstances: 0
        softwareConfig:
          imageVersion: 2.0.35-debian10
          overrideProperties:
            dataproc:dataproc.allow.zero.workers: 'true'
        gceClusterConfig:
          tags:
            - foo
            - bar
          serviceAccount: ${default.email}
          serviceAccountScopes:
            - cloud-platform
        initializationActions:
          - script: gs://dataproc-initialization-actions/stackdriver/stackdriver.sh
            timeoutSec: 500
Using A GPU Accelerator
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const acceleratedCluster = new gcp.dataproc.Cluster("accelerated_cluster", {
    name: "my-cluster-with-gpu",
    region: "us-central1",
    clusterConfig: {
        gceClusterConfig: {
            zone: "us-central1-a",
        },
        masterConfig: {
            accelerators: [{
                acceleratorType: "nvidia-tesla-k80",
                acceleratorCount: 1,
            }],
        },
    },
});
import pulumi
import pulumi_gcp as gcp
accelerated_cluster = gcp.dataproc.Cluster("accelerated_cluster",
    name="my-cluster-with-gpu",
    region="us-central1",
    cluster_config={
        "gce_cluster_config": {
            "zone": "us-central1-a",
        },
        "master_config": {
            "accelerators": [{
                "accelerator_type": "nvidia-tesla-k80",
                "accelerator_count": 1,
            }],
        },
    })
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewCluster(ctx, "accelerated_cluster", &dataproc.ClusterArgs{
			Name:   pulumi.String("my-cluster-with-gpu"),
			Region: pulumi.String("us-central1"),
			ClusterConfig: &dataproc.ClusterClusterConfigArgs{
				GceClusterConfig: &dataproc.ClusterClusterConfigGceClusterConfigArgs{
					Zone: pulumi.String("us-central1-a"),
				},
				MasterConfig: &dataproc.ClusterClusterConfigMasterConfigArgs{
					Accelerators: dataproc.ClusterClusterConfigMasterConfigAcceleratorArray{
						&dataproc.ClusterClusterConfigMasterConfigAcceleratorArgs{
							AcceleratorType:  pulumi.String("nvidia-tesla-k80"),
							AcceleratorCount: pulumi.Int(1),
						},
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var acceleratedCluster = new Gcp.Dataproc.Cluster("accelerated_cluster", new()
    {
        Name = "my-cluster-with-gpu",
        Region = "us-central1",
        ClusterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigArgs
        {
            GceClusterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigGceClusterConfigArgs
            {
                Zone = "us-central1-a",
            },
            MasterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigMasterConfigArgs
            {
                Accelerators = new[]
                {
                    new Gcp.Dataproc.Inputs.ClusterClusterConfigMasterConfigAcceleratorArgs
                    {
                        AcceleratorType = "nvidia-tesla-k80",
                        AcceleratorCount = 1,
                    },
                },
            },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataproc.Cluster;
import com.pulumi.gcp.dataproc.ClusterArgs;
import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigArgs;
import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigGceClusterConfigArgs;
import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigMasterConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var acceleratedCluster = new Cluster("acceleratedCluster", ClusterArgs.builder()
            .name("my-cluster-with-gpu")
            .region("us-central1")
            .clusterConfig(ClusterClusterConfigArgs.builder()
                .gceClusterConfig(ClusterClusterConfigGceClusterConfigArgs.builder()
                    .zone("us-central1-a")
                    .build())
                .masterConfig(ClusterClusterConfigMasterConfigArgs.builder()
                    .accelerators(ClusterClusterConfigMasterConfigAcceleratorArgs.builder()
                        .acceleratorType("nvidia-tesla-k80")
                        .acceleratorCount("1")
                        .build())
                    .build())
                .build())
            .build());
    }
}
resources:
  acceleratedCluster:
    type: gcp:dataproc:Cluster
    name: accelerated_cluster
    properties:
      name: my-cluster-with-gpu
      region: us-central1
      clusterConfig:
        gceClusterConfig:
          zone: us-central1-a
        masterConfig:
          accelerators:
            - acceleratorType: nvidia-tesla-k80
              acceleratorCount: '1'
Create Cluster Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Cluster(name: string, args?: ClusterArgs, opts?: CustomResourceOptions);@overload
def Cluster(resource_name: str,
            args: Optional[ClusterArgs] = None,
            opts: Optional[ResourceOptions] = None)
@overload
def Cluster(resource_name: str,
            opts: Optional[ResourceOptions] = None,
            cluster_config: Optional[ClusterClusterConfigArgs] = None,
            graceful_decommission_timeout: Optional[str] = None,
            labels: Optional[Mapping[str, str]] = None,
            name: Optional[str] = None,
            project: Optional[str] = None,
            region: Optional[str] = None,
            virtual_cluster_config: Optional[ClusterVirtualClusterConfigArgs] = None)func NewCluster(ctx *Context, name string, args *ClusterArgs, opts ...ResourceOption) (*Cluster, error)public Cluster(string name, ClusterArgs? args = null, CustomResourceOptions? opts = null)
public Cluster(String name, ClusterArgs args)
public Cluster(String name, ClusterArgs args, CustomResourceOptions options)
type: gcp:dataproc:Cluster
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args ClusterArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args ClusterArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args ClusterArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args ClusterArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args ClusterArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var exampleclusterResourceResourceFromDataproccluster = new Gcp.Dataproc.Cluster("exampleclusterResourceResourceFromDataproccluster", new()
{
    ClusterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigArgs
    {
        AutoscalingConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigAutoscalingConfigArgs
        {
            PolicyUri = "string",
        },
        AuxiliaryNodeGroups = new[]
        {
            new Gcp.Dataproc.Inputs.ClusterClusterConfigAuxiliaryNodeGroupArgs
            {
                NodeGroups = new[]
                {
                    new Gcp.Dataproc.Inputs.ClusterClusterConfigAuxiliaryNodeGroupNodeGroupArgs
                    {
                        Roles = new[]
                        {
                            "string",
                        },
                        Name = "string",
                        NodeGroupConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigArgs
                        {
                            Accelerators = new[]
                            {
                                new Gcp.Dataproc.Inputs.ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigAcceleratorArgs
                                {
                                    AcceleratorCount = 0,
                                    AcceleratorType = "string",
                                },
                            },
                            DiskConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigDiskConfigArgs
                            {
                                BootDiskSizeGb = 0,
                                BootDiskType = "string",
                                LocalSsdInterface = "string",
                                NumLocalSsds = 0,
                            },
                            InstanceNames = new[]
                            {
                                "string",
                            },
                            MachineType = "string",
                            MinCpuPlatform = "string",
                            NumInstances = 0,
                        },
                    },
                },
                NodeGroupId = "string",
            },
        },
        Bucket = "string",
        DataprocMetricConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigDataprocMetricConfigArgs
        {
            Metrics = new[]
            {
                new Gcp.Dataproc.Inputs.ClusterClusterConfigDataprocMetricConfigMetricArgs
                {
                    MetricSource = "string",
                    MetricOverrides = new[]
                    {
                        "string",
                    },
                },
            },
        },
        EncryptionConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigEncryptionConfigArgs
        {
            KmsKeyName = "string",
        },
        EndpointConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigEndpointConfigArgs
        {
            EnableHttpPortAccess = false,
            HttpPorts = 
            {
                { "string", "string" },
            },
        },
        GceClusterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigGceClusterConfigArgs
        {
            ConfidentialInstanceConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigGceClusterConfigConfidentialInstanceConfigArgs
            {
                EnableConfidentialCompute = false,
            },
            InternalIpOnly = false,
            Metadata = 
            {
                { "string", "string" },
            },
            Network = "string",
            NodeGroupAffinity = new Gcp.Dataproc.Inputs.ClusterClusterConfigGceClusterConfigNodeGroupAffinityArgs
            {
                NodeGroupUri = "string",
            },
            ReservationAffinity = new Gcp.Dataproc.Inputs.ClusterClusterConfigGceClusterConfigReservationAffinityArgs
            {
                ConsumeReservationType = "string",
                Key = "string",
                Values = new[]
                {
                    "string",
                },
            },
            ServiceAccount = "string",
            ServiceAccountScopes = new[]
            {
                "string",
            },
            ShieldedInstanceConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigGceClusterConfigShieldedInstanceConfigArgs
            {
                EnableIntegrityMonitoring = false,
                EnableSecureBoot = false,
                EnableVtpm = false,
            },
            Subnetwork = "string",
            Tags = new[]
            {
                "string",
            },
            Zone = "string",
        },
        InitializationActions = new[]
        {
            new Gcp.Dataproc.Inputs.ClusterClusterConfigInitializationActionArgs
            {
                Script = "string",
                TimeoutSec = 0,
            },
        },
        LifecycleConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigLifecycleConfigArgs
        {
            AutoDeleteTime = "string",
            IdleDeleteTtl = "string",
            IdleStartTime = "string",
        },
        MasterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigMasterConfigArgs
        {
            Accelerators = new[]
            {
                new Gcp.Dataproc.Inputs.ClusterClusterConfigMasterConfigAcceleratorArgs
                {
                    AcceleratorCount = 0,
                    AcceleratorType = "string",
                },
            },
            DiskConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigMasterConfigDiskConfigArgs
            {
                BootDiskSizeGb = 0,
                BootDiskType = "string",
                LocalSsdInterface = "string",
                NumLocalSsds = 0,
            },
            ImageUri = "string",
            InstanceNames = new[]
            {
                "string",
            },
            MachineType = "string",
            MinCpuPlatform = "string",
            NumInstances = 0,
        },
        MetastoreConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigMetastoreConfigArgs
        {
            DataprocMetastoreService = "string",
        },
        PreemptibleWorkerConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigPreemptibleWorkerConfigArgs
        {
            DiskConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs
            {
                BootDiskSizeGb = 0,
                BootDiskType = "string",
                LocalSsdInterface = "string",
                NumLocalSsds = 0,
            },
            InstanceFlexibilityPolicy = new Gcp.Dataproc.Inputs.ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyArgs
            {
                InstanceSelectionLists = new[]
                {
                    new Gcp.Dataproc.Inputs.ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyInstanceSelectionListArgs
                    {
                        MachineTypes = new[]
                        {
                            "string",
                        },
                        Rank = 0,
                    },
                },
                InstanceSelectionResults = new[]
                {
                    new Gcp.Dataproc.Inputs.ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyInstanceSelectionResultArgs
                    {
                        MachineType = "string",
                        VmCount = 0,
                    },
                },
                ProvisioningModelMix = new Gcp.Dataproc.Inputs.ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyProvisioningModelMixArgs
                {
                    StandardCapacityBase = 0,
                    StandardCapacityPercentAboveBase = 0,
                },
            },
            InstanceNames = new[]
            {
                "string",
            },
            NumInstances = 0,
            Preemptibility = "string",
        },
        SecurityConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigSecurityConfigArgs
        {
            KerberosConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigSecurityConfigKerberosConfigArgs
            {
                KmsKeyUri = "string",
                RootPrincipalPasswordUri = "string",
                CrossRealmTrustSharedPasswordUri = "string",
                CrossRealmTrustAdminServer = "string",
                EnableKerberos = false,
                KdcDbKeyUri = "string",
                KeyPasswordUri = "string",
                KeystorePasswordUri = "string",
                KeystoreUri = "string",
                CrossRealmTrustRealm = "string",
                Realm = "string",
                CrossRealmTrustKdc = "string",
                TgtLifetimeHours = 0,
                TruststorePasswordUri = "string",
                TruststoreUri = "string",
            },
        },
        SoftwareConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigSoftwareConfigArgs
        {
            ImageVersion = "string",
            OptionalComponents = new[]
            {
                "string",
            },
            OverrideProperties = 
            {
                { "string", "string" },
            },
            Properties = 
            {
                { "string", "string" },
            },
        },
        StagingBucket = "string",
        TempBucket = "string",
        WorkerConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigWorkerConfigArgs
        {
            Accelerators = new[]
            {
                new Gcp.Dataproc.Inputs.ClusterClusterConfigWorkerConfigAcceleratorArgs
                {
                    AcceleratorCount = 0,
                    AcceleratorType = "string",
                },
            },
            DiskConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigWorkerConfigDiskConfigArgs
            {
                BootDiskSizeGb = 0,
                BootDiskType = "string",
                LocalSsdInterface = "string",
                NumLocalSsds = 0,
            },
            ImageUri = "string",
            InstanceNames = new[]
            {
                "string",
            },
            MachineType = "string",
            MinCpuPlatform = "string",
            MinNumInstances = 0,
            NumInstances = 0,
        },
    },
    GracefulDecommissionTimeout = "string",
    Labels = 
    {
        { "string", "string" },
    },
    Name = "string",
    Project = "string",
    Region = "string",
    VirtualClusterConfig = new Gcp.Dataproc.Inputs.ClusterVirtualClusterConfigArgs
    {
        AuxiliaryServicesConfig = new Gcp.Dataproc.Inputs.ClusterVirtualClusterConfigAuxiliaryServicesConfigArgs
        {
            MetastoreConfig = new Gcp.Dataproc.Inputs.ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigArgs
            {
                DataprocMetastoreService = "string",
            },
            SparkHistoryServerConfig = new Gcp.Dataproc.Inputs.ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigArgs
            {
                DataprocCluster = "string",
            },
        },
        KubernetesClusterConfig = new Gcp.Dataproc.Inputs.ClusterVirtualClusterConfigKubernetesClusterConfigArgs
        {
            GkeClusterConfig = new Gcp.Dataproc.Inputs.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigArgs
            {
                GkeClusterTarget = "string",
                NodePoolTargets = new[]
                {
                    new Gcp.Dataproc.Inputs.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetArgs
                    {
                        NodePool = "string",
                        Roles = new[]
                        {
                            "string",
                        },
                        NodePoolConfig = new Gcp.Dataproc.Inputs.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigArgs
                        {
                            Locations = new[]
                            {
                                "string",
                            },
                            Autoscaling = new Gcp.Dataproc.Inputs.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingArgs
                            {
                                MaxNodeCount = 0,
                                MinNodeCount = 0,
                            },
                            Config = new Gcp.Dataproc.Inputs.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigArgs
                            {
                                LocalSsdCount = 0,
                                MachineType = "string",
                                MinCpuPlatform = "string",
                                Preemptible = false,
                                Spot = false,
                            },
                        },
                    },
                },
            },
            KubernetesSoftwareConfig = new Gcp.Dataproc.Inputs.ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigArgs
            {
                ComponentVersion = 
                {
                    { "string", "string" },
                },
                Properties = 
                {
                    { "string", "string" },
                },
            },
            KubernetesNamespace = "string",
        },
        StagingBucket = "string",
    },
});
example, err := dataproc.NewCluster(ctx, "exampleclusterResourceResourceFromDataproccluster", &dataproc.ClusterArgs{
	ClusterConfig: &dataproc.ClusterClusterConfigArgs{
		AutoscalingConfig: &dataproc.ClusterClusterConfigAutoscalingConfigArgs{
			PolicyUri: pulumi.String("string"),
		},
		AuxiliaryNodeGroups: dataproc.ClusterClusterConfigAuxiliaryNodeGroupArray{
			&dataproc.ClusterClusterConfigAuxiliaryNodeGroupArgs{
				NodeGroups: dataproc.ClusterClusterConfigAuxiliaryNodeGroupNodeGroupArray{
					&dataproc.ClusterClusterConfigAuxiliaryNodeGroupNodeGroupArgs{
						Roles: pulumi.StringArray{
							pulumi.String("string"),
						},
						Name: pulumi.String("string"),
						NodeGroupConfig: &dataproc.ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigArgs{
							Accelerators: dataproc.ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigAcceleratorArray{
								&dataproc.ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigAcceleratorArgs{
									AcceleratorCount: pulumi.Int(0),
									AcceleratorType:  pulumi.String("string"),
								},
							},
							DiskConfig: &dataproc.ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigDiskConfigArgs{
								BootDiskSizeGb:    pulumi.Int(0),
								BootDiskType:      pulumi.String("string"),
								LocalSsdInterface: pulumi.String("string"),
								NumLocalSsds:      pulumi.Int(0),
							},
							InstanceNames: pulumi.StringArray{
								pulumi.String("string"),
							},
							MachineType:    pulumi.String("string"),
							MinCpuPlatform: pulumi.String("string"),
							NumInstances:   pulumi.Int(0),
						},
					},
				},
				NodeGroupId: pulumi.String("string"),
			},
		},
		Bucket: pulumi.String("string"),
		DataprocMetricConfig: &dataproc.ClusterClusterConfigDataprocMetricConfigArgs{
			Metrics: dataproc.ClusterClusterConfigDataprocMetricConfigMetricArray{
				&dataproc.ClusterClusterConfigDataprocMetricConfigMetricArgs{
					MetricSource: pulumi.String("string"),
					MetricOverrides: pulumi.StringArray{
						pulumi.String("string"),
					},
				},
			},
		},
		EncryptionConfig: &dataproc.ClusterClusterConfigEncryptionConfigArgs{
			KmsKeyName: pulumi.String("string"),
		},
		EndpointConfig: &dataproc.ClusterClusterConfigEndpointConfigArgs{
			EnableHttpPortAccess: pulumi.Bool(false),
			HttpPorts: pulumi.StringMap{
				"string": pulumi.String("string"),
			},
		},
		GceClusterConfig: &dataproc.ClusterClusterConfigGceClusterConfigArgs{
			ConfidentialInstanceConfig: &dataproc.ClusterClusterConfigGceClusterConfigConfidentialInstanceConfigArgs{
				EnableConfidentialCompute: pulumi.Bool(false),
			},
			InternalIpOnly: pulumi.Bool(false),
			Metadata: pulumi.StringMap{
				"string": pulumi.String("string"),
			},
			Network: pulumi.String("string"),
			NodeGroupAffinity: &dataproc.ClusterClusterConfigGceClusterConfigNodeGroupAffinityArgs{
				NodeGroupUri: pulumi.String("string"),
			},
			ReservationAffinity: &dataproc.ClusterClusterConfigGceClusterConfigReservationAffinityArgs{
				ConsumeReservationType: pulumi.String("string"),
				Key:                    pulumi.String("string"),
				Values: pulumi.StringArray{
					pulumi.String("string"),
				},
			},
			ServiceAccount: pulumi.String("string"),
			ServiceAccountScopes: pulumi.StringArray{
				pulumi.String("string"),
			},
			ShieldedInstanceConfig: &dataproc.ClusterClusterConfigGceClusterConfigShieldedInstanceConfigArgs{
				EnableIntegrityMonitoring: pulumi.Bool(false),
				EnableSecureBoot:          pulumi.Bool(false),
				EnableVtpm:                pulumi.Bool(false),
			},
			Subnetwork: pulumi.String("string"),
			Tags: pulumi.StringArray{
				pulumi.String("string"),
			},
			Zone: pulumi.String("string"),
		},
		InitializationActions: dataproc.ClusterClusterConfigInitializationActionArray{
			&dataproc.ClusterClusterConfigInitializationActionArgs{
				Script:     pulumi.String("string"),
				TimeoutSec: pulumi.Int(0),
			},
		},
		LifecycleConfig: &dataproc.ClusterClusterConfigLifecycleConfigArgs{
			AutoDeleteTime: pulumi.String("string"),
			IdleDeleteTtl:  pulumi.String("string"),
			IdleStartTime:  pulumi.String("string"),
		},
		MasterConfig: &dataproc.ClusterClusterConfigMasterConfigArgs{
			Accelerators: dataproc.ClusterClusterConfigMasterConfigAcceleratorArray{
				&dataproc.ClusterClusterConfigMasterConfigAcceleratorArgs{
					AcceleratorCount: pulumi.Int(0),
					AcceleratorType:  pulumi.String("string"),
				},
			},
			DiskConfig: &dataproc.ClusterClusterConfigMasterConfigDiskConfigArgs{
				BootDiskSizeGb:    pulumi.Int(0),
				BootDiskType:      pulumi.String("string"),
				LocalSsdInterface: pulumi.String("string"),
				NumLocalSsds:      pulumi.Int(0),
			},
			ImageUri: pulumi.String("string"),
			InstanceNames: pulumi.StringArray{
				pulumi.String("string"),
			},
			MachineType:    pulumi.String("string"),
			MinCpuPlatform: pulumi.String("string"),
			NumInstances:   pulumi.Int(0),
		},
		MetastoreConfig: &dataproc.ClusterClusterConfigMetastoreConfigArgs{
			DataprocMetastoreService: pulumi.String("string"),
		},
		PreemptibleWorkerConfig: &dataproc.ClusterClusterConfigPreemptibleWorkerConfigArgs{
			DiskConfig: &dataproc.ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs{
				BootDiskSizeGb:    pulumi.Int(0),
				BootDiskType:      pulumi.String("string"),
				LocalSsdInterface: pulumi.String("string"),
				NumLocalSsds:      pulumi.Int(0),
			},
			InstanceFlexibilityPolicy: &dataproc.ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyArgs{
				InstanceSelectionLists: dataproc.ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyInstanceSelectionListArray{
					&dataproc.ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyInstanceSelectionListArgs{
						MachineTypes: pulumi.StringArray{
							pulumi.String("string"),
						},
						Rank: pulumi.Int(0),
					},
				},
				InstanceSelectionResults: dataproc.ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyInstanceSelectionResultArray{
					&dataproc.ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyInstanceSelectionResultArgs{
						MachineType: pulumi.String("string"),
						VmCount:     pulumi.Int(0),
					},
				},
				ProvisioningModelMix: &dataproc.ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyProvisioningModelMixArgs{
					StandardCapacityBase:             pulumi.Int(0),
					StandardCapacityPercentAboveBase: pulumi.Int(0),
				},
			},
			InstanceNames: pulumi.StringArray{
				pulumi.String("string"),
			},
			NumInstances:   pulumi.Int(0),
			Preemptibility: pulumi.String("string"),
		},
		SecurityConfig: &dataproc.ClusterClusterConfigSecurityConfigArgs{
			KerberosConfig: &dataproc.ClusterClusterConfigSecurityConfigKerberosConfigArgs{
				KmsKeyUri:                        pulumi.String("string"),
				RootPrincipalPasswordUri:         pulumi.String("string"),
				CrossRealmTrustSharedPasswordUri: pulumi.String("string"),
				CrossRealmTrustAdminServer:       pulumi.String("string"),
				EnableKerberos:                   pulumi.Bool(false),
				KdcDbKeyUri:                      pulumi.String("string"),
				KeyPasswordUri:                   pulumi.String("string"),
				KeystorePasswordUri:              pulumi.String("string"),
				KeystoreUri:                      pulumi.String("string"),
				CrossRealmTrustRealm:             pulumi.String("string"),
				Realm:                            pulumi.String("string"),
				CrossRealmTrustKdc:               pulumi.String("string"),
				TgtLifetimeHours:                 pulumi.Int(0),
				TruststorePasswordUri:            pulumi.String("string"),
				TruststoreUri:                    pulumi.String("string"),
			},
		},
		SoftwareConfig: &dataproc.ClusterClusterConfigSoftwareConfigArgs{
			ImageVersion: pulumi.String("string"),
			OptionalComponents: pulumi.StringArray{
				pulumi.String("string"),
			},
			OverrideProperties: pulumi.StringMap{
				"string": pulumi.String("string"),
			},
			Properties: pulumi.StringMap{
				"string": pulumi.String("string"),
			},
		},
		StagingBucket: pulumi.String("string"),
		TempBucket:    pulumi.String("string"),
		WorkerConfig: &dataproc.ClusterClusterConfigWorkerConfigArgs{
			Accelerators: dataproc.ClusterClusterConfigWorkerConfigAcceleratorArray{
				&dataproc.ClusterClusterConfigWorkerConfigAcceleratorArgs{
					AcceleratorCount: pulumi.Int(0),
					AcceleratorType:  pulumi.String("string"),
				},
			},
			DiskConfig: &dataproc.ClusterClusterConfigWorkerConfigDiskConfigArgs{
				BootDiskSizeGb:    pulumi.Int(0),
				BootDiskType:      pulumi.String("string"),
				LocalSsdInterface: pulumi.String("string"),
				NumLocalSsds:      pulumi.Int(0),
			},
			ImageUri: pulumi.String("string"),
			InstanceNames: pulumi.StringArray{
				pulumi.String("string"),
			},
			MachineType:     pulumi.String("string"),
			MinCpuPlatform:  pulumi.String("string"),
			MinNumInstances: pulumi.Int(0),
			NumInstances:    pulumi.Int(0),
		},
	},
	GracefulDecommissionTimeout: pulumi.String("string"),
	Labels: pulumi.StringMap{
		"string": pulumi.String("string"),
	},
	Name:    pulumi.String("string"),
	Project: pulumi.String("string"),
	Region:  pulumi.String("string"),
	VirtualClusterConfig: &dataproc.ClusterVirtualClusterConfigArgs{
		AuxiliaryServicesConfig: &dataproc.ClusterVirtualClusterConfigAuxiliaryServicesConfigArgs{
			MetastoreConfig: &dataproc.ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigArgs{
				DataprocMetastoreService: pulumi.String("string"),
			},
			SparkHistoryServerConfig: &dataproc.ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigArgs{
				DataprocCluster: pulumi.String("string"),
			},
		},
		KubernetesClusterConfig: &dataproc.ClusterVirtualClusterConfigKubernetesClusterConfigArgs{
			GkeClusterConfig: &dataproc.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigArgs{
				GkeClusterTarget: pulumi.String("string"),
				NodePoolTargets: dataproc.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetArray{
					&dataproc.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetArgs{
						NodePool: pulumi.String("string"),
						Roles: pulumi.StringArray{
							pulumi.String("string"),
						},
						NodePoolConfig: &dataproc.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigArgs{
							Locations: pulumi.StringArray{
								pulumi.String("string"),
							},
							Autoscaling: &dataproc.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingArgs{
								MaxNodeCount: pulumi.Int(0),
								MinNodeCount: pulumi.Int(0),
							},
							Config: &dataproc.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigArgs{
								LocalSsdCount:  pulumi.Int(0),
								MachineType:    pulumi.String("string"),
								MinCpuPlatform: pulumi.String("string"),
								Preemptible:    pulumi.Bool(false),
								Spot:           pulumi.Bool(false),
							},
						},
					},
				},
			},
			KubernetesSoftwareConfig: &dataproc.ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigArgs{
				ComponentVersion: pulumi.StringMap{
					"string": pulumi.String("string"),
				},
				Properties: pulumi.StringMap{
					"string": pulumi.String("string"),
				},
			},
			KubernetesNamespace: pulumi.String("string"),
		},
		StagingBucket: pulumi.String("string"),
	},
})
var exampleclusterResourceResourceFromDataproccluster = new Cluster("exampleclusterResourceResourceFromDataproccluster", ClusterArgs.builder()
    .clusterConfig(ClusterClusterConfigArgs.builder()
        .autoscalingConfig(ClusterClusterConfigAutoscalingConfigArgs.builder()
            .policyUri("string")
            .build())
        .auxiliaryNodeGroups(ClusterClusterConfigAuxiliaryNodeGroupArgs.builder()
            .nodeGroups(ClusterClusterConfigAuxiliaryNodeGroupNodeGroupArgs.builder()
                .roles("string")
                .name("string")
                .nodeGroupConfig(ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigArgs.builder()
                    .accelerators(ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigAcceleratorArgs.builder()
                        .acceleratorCount(0)
                        .acceleratorType("string")
                        .build())
                    .diskConfig(ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigDiskConfigArgs.builder()
                        .bootDiskSizeGb(0)
                        .bootDiskType("string")
                        .localSsdInterface("string")
                        .numLocalSsds(0)
                        .build())
                    .instanceNames("string")
                    .machineType("string")
                    .minCpuPlatform("string")
                    .numInstances(0)
                    .build())
                .build())
            .nodeGroupId("string")
            .build())
        .bucket("string")
        .dataprocMetricConfig(ClusterClusterConfigDataprocMetricConfigArgs.builder()
            .metrics(ClusterClusterConfigDataprocMetricConfigMetricArgs.builder()
                .metricSource("string")
                .metricOverrides("string")
                .build())
            .build())
        .encryptionConfig(ClusterClusterConfigEncryptionConfigArgs.builder()
            .kmsKeyName("string")
            .build())
        .endpointConfig(ClusterClusterConfigEndpointConfigArgs.builder()
            .enableHttpPortAccess(false)
            .httpPorts(Map.of("string", "string"))
            .build())
        .gceClusterConfig(ClusterClusterConfigGceClusterConfigArgs.builder()
            .confidentialInstanceConfig(ClusterClusterConfigGceClusterConfigConfidentialInstanceConfigArgs.builder()
                .enableConfidentialCompute(false)
                .build())
            .internalIpOnly(false)
            .metadata(Map.of("string", "string"))
            .network("string")
            .nodeGroupAffinity(ClusterClusterConfigGceClusterConfigNodeGroupAffinityArgs.builder()
                .nodeGroupUri("string")
                .build())
            .reservationAffinity(ClusterClusterConfigGceClusterConfigReservationAffinityArgs.builder()
                .consumeReservationType("string")
                .key("string")
                .values("string")
                .build())
            .serviceAccount("string")
            .serviceAccountScopes("string")
            .shieldedInstanceConfig(ClusterClusterConfigGceClusterConfigShieldedInstanceConfigArgs.builder()
                .enableIntegrityMonitoring(false)
                .enableSecureBoot(false)
                .enableVtpm(false)
                .build())
            .subnetwork("string")
            .tags("string")
            .zone("string")
            .build())
        .initializationActions(ClusterClusterConfigInitializationActionArgs.builder()
            .script("string")
            .timeoutSec(0)
            .build())
        .lifecycleConfig(ClusterClusterConfigLifecycleConfigArgs.builder()
            .autoDeleteTime("string")
            .idleDeleteTtl("string")
            .idleStartTime("string")
            .build())
        .masterConfig(ClusterClusterConfigMasterConfigArgs.builder()
            .accelerators(ClusterClusterConfigMasterConfigAcceleratorArgs.builder()
                .acceleratorCount(0)
                .acceleratorType("string")
                .build())
            .diskConfig(ClusterClusterConfigMasterConfigDiskConfigArgs.builder()
                .bootDiskSizeGb(0)
                .bootDiskType("string")
                .localSsdInterface("string")
                .numLocalSsds(0)
                .build())
            .imageUri("string")
            .instanceNames("string")
            .machineType("string")
            .minCpuPlatform("string")
            .numInstances(0)
            .build())
        .metastoreConfig(ClusterClusterConfigMetastoreConfigArgs.builder()
            .dataprocMetastoreService("string")
            .build())
        .preemptibleWorkerConfig(ClusterClusterConfigPreemptibleWorkerConfigArgs.builder()
            .diskConfig(ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs.builder()
                .bootDiskSizeGb(0)
                .bootDiskType("string")
                .localSsdInterface("string")
                .numLocalSsds(0)
                .build())
            .instanceFlexibilityPolicy(ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyArgs.builder()
                .instanceSelectionLists(ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyInstanceSelectionListArgs.builder()
                    .machineTypes("string")
                    .rank(0)
                    .build())
                .instanceSelectionResults(ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyInstanceSelectionResultArgs.builder()
                    .machineType("string")
                    .vmCount(0)
                    .build())
                .provisioningModelMix(ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyProvisioningModelMixArgs.builder()
                    .standardCapacityBase(0)
                    .standardCapacityPercentAboveBase(0)
                    .build())
                .build())
            .instanceNames("string")
            .numInstances(0)
            .preemptibility("string")
            .build())
        .securityConfig(ClusterClusterConfigSecurityConfigArgs.builder()
            .kerberosConfig(ClusterClusterConfigSecurityConfigKerberosConfigArgs.builder()
                .kmsKeyUri("string")
                .rootPrincipalPasswordUri("string")
                .crossRealmTrustSharedPasswordUri("string")
                .crossRealmTrustAdminServer("string")
                .enableKerberos(false)
                .kdcDbKeyUri("string")
                .keyPasswordUri("string")
                .keystorePasswordUri("string")
                .keystoreUri("string")
                .crossRealmTrustRealm("string")
                .realm("string")
                .crossRealmTrustKdc("string")
                .tgtLifetimeHours(0)
                .truststorePasswordUri("string")
                .truststoreUri("string")
                .build())
            .build())
        .softwareConfig(ClusterClusterConfigSoftwareConfigArgs.builder()
            .imageVersion("string")
            .optionalComponents("string")
            .overrideProperties(Map.of("string", "string"))
            .properties(Map.of("string", "string"))
            .build())
        .stagingBucket("string")
        .tempBucket("string")
        .workerConfig(ClusterClusterConfigWorkerConfigArgs.builder()
            .accelerators(ClusterClusterConfigWorkerConfigAcceleratorArgs.builder()
                .acceleratorCount(0)
                .acceleratorType("string")
                .build())
            .diskConfig(ClusterClusterConfigWorkerConfigDiskConfigArgs.builder()
                .bootDiskSizeGb(0)
                .bootDiskType("string")
                .localSsdInterface("string")
                .numLocalSsds(0)
                .build())
            .imageUri("string")
            .instanceNames("string")
            .machineType("string")
            .minCpuPlatform("string")
            .minNumInstances(0)
            .numInstances(0)
            .build())
        .build())
    .gracefulDecommissionTimeout("string")
    .labels(Map.of("string", "string"))
    .name("string")
    .project("string")
    .region("string")
    .virtualClusterConfig(ClusterVirtualClusterConfigArgs.builder()
        .auxiliaryServicesConfig(ClusterVirtualClusterConfigAuxiliaryServicesConfigArgs.builder()
            .metastoreConfig(ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigArgs.builder()
                .dataprocMetastoreService("string")
                .build())
            .sparkHistoryServerConfig(ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigArgs.builder()
                .dataprocCluster("string")
                .build())
            .build())
        .kubernetesClusterConfig(ClusterVirtualClusterConfigKubernetesClusterConfigArgs.builder()
            .gkeClusterConfig(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigArgs.builder()
                .gkeClusterTarget("string")
                .nodePoolTargets(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetArgs.builder()
                    .nodePool("string")
                    .roles("string")
                    .nodePoolConfig(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigArgs.builder()
                        .locations("string")
                        .autoscaling(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingArgs.builder()
                            .maxNodeCount(0)
                            .minNodeCount(0)
                            .build())
                        .config(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigArgs.builder()
                            .localSsdCount(0)
                            .machineType("string")
                            .minCpuPlatform("string")
                            .preemptible(false)
                            .spot(false)
                            .build())
                        .build())
                    .build())
                .build())
            .kubernetesSoftwareConfig(ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigArgs.builder()
                .componentVersion(Map.of("string", "string"))
                .properties(Map.of("string", "string"))
                .build())
            .kubernetesNamespace("string")
            .build())
        .stagingBucket("string")
        .build())
    .build());
examplecluster_resource_resource_from_dataproccluster = gcp.dataproc.Cluster("exampleclusterResourceResourceFromDataproccluster",
    cluster_config={
        "autoscaling_config": {
            "policy_uri": "string",
        },
        "auxiliary_node_groups": [{
            "node_groups": [{
                "roles": ["string"],
                "name": "string",
                "node_group_config": {
                    "accelerators": [{
                        "accelerator_count": 0,
                        "accelerator_type": "string",
                    }],
                    "disk_config": {
                        "boot_disk_size_gb": 0,
                        "boot_disk_type": "string",
                        "local_ssd_interface": "string",
                        "num_local_ssds": 0,
                    },
                    "instance_names": ["string"],
                    "machine_type": "string",
                    "min_cpu_platform": "string",
                    "num_instances": 0,
                },
            }],
            "node_group_id": "string",
        }],
        "bucket": "string",
        "dataproc_metric_config": {
            "metrics": [{
                "metric_source": "string",
                "metric_overrides": ["string"],
            }],
        },
        "encryption_config": {
            "kms_key_name": "string",
        },
        "endpoint_config": {
            "enable_http_port_access": False,
            "http_ports": {
                "string": "string",
            },
        },
        "gce_cluster_config": {
            "confidential_instance_config": {
                "enable_confidential_compute": False,
            },
            "internal_ip_only": False,
            "metadata": {
                "string": "string",
            },
            "network": "string",
            "node_group_affinity": {
                "node_group_uri": "string",
            },
            "reservation_affinity": {
                "consume_reservation_type": "string",
                "key": "string",
                "values": ["string"],
            },
            "service_account": "string",
            "service_account_scopes": ["string"],
            "shielded_instance_config": {
                "enable_integrity_monitoring": False,
                "enable_secure_boot": False,
                "enable_vtpm": False,
            },
            "subnetwork": "string",
            "tags": ["string"],
            "zone": "string",
        },
        "initialization_actions": [{
            "script": "string",
            "timeout_sec": 0,
        }],
        "lifecycle_config": {
            "auto_delete_time": "string",
            "idle_delete_ttl": "string",
            "idle_start_time": "string",
        },
        "master_config": {
            "accelerators": [{
                "accelerator_count": 0,
                "accelerator_type": "string",
            }],
            "disk_config": {
                "boot_disk_size_gb": 0,
                "boot_disk_type": "string",
                "local_ssd_interface": "string",
                "num_local_ssds": 0,
            },
            "image_uri": "string",
            "instance_names": ["string"],
            "machine_type": "string",
            "min_cpu_platform": "string",
            "num_instances": 0,
        },
        "metastore_config": {
            "dataproc_metastore_service": "string",
        },
        "preemptible_worker_config": {
            "disk_config": {
                "boot_disk_size_gb": 0,
                "boot_disk_type": "string",
                "local_ssd_interface": "string",
                "num_local_ssds": 0,
            },
            "instance_flexibility_policy": {
                "instance_selection_lists": [{
                    "machine_types": ["string"],
                    "rank": 0,
                }],
                "instance_selection_results": [{
                    "machine_type": "string",
                    "vm_count": 0,
                }],
                "provisioning_model_mix": {
                    "standard_capacity_base": 0,
                    "standard_capacity_percent_above_base": 0,
                },
            },
            "instance_names": ["string"],
            "num_instances": 0,
            "preemptibility": "string",
        },
        "security_config": {
            "kerberos_config": {
                "kms_key_uri": "string",
                "root_principal_password_uri": "string",
                "cross_realm_trust_shared_password_uri": "string",
                "cross_realm_trust_admin_server": "string",
                "enable_kerberos": False,
                "kdc_db_key_uri": "string",
                "key_password_uri": "string",
                "keystore_password_uri": "string",
                "keystore_uri": "string",
                "cross_realm_trust_realm": "string",
                "realm": "string",
                "cross_realm_trust_kdc": "string",
                "tgt_lifetime_hours": 0,
                "truststore_password_uri": "string",
                "truststore_uri": "string",
            },
        },
        "software_config": {
            "image_version": "string",
            "optional_components": ["string"],
            "override_properties": {
                "string": "string",
            },
            "properties": {
                "string": "string",
            },
        },
        "staging_bucket": "string",
        "temp_bucket": "string",
        "worker_config": {
            "accelerators": [{
                "accelerator_count": 0,
                "accelerator_type": "string",
            }],
            "disk_config": {
                "boot_disk_size_gb": 0,
                "boot_disk_type": "string",
                "local_ssd_interface": "string",
                "num_local_ssds": 0,
            },
            "image_uri": "string",
            "instance_names": ["string"],
            "machine_type": "string",
            "min_cpu_platform": "string",
            "min_num_instances": 0,
            "num_instances": 0,
        },
    },
    graceful_decommission_timeout="string",
    labels={
        "string": "string",
    },
    name="string",
    project="string",
    region="string",
    virtual_cluster_config={
        "auxiliary_services_config": {
            "metastore_config": {
                "dataproc_metastore_service": "string",
            },
            "spark_history_server_config": {
                "dataproc_cluster": "string",
            },
        },
        "kubernetes_cluster_config": {
            "gke_cluster_config": {
                "gke_cluster_target": "string",
                "node_pool_targets": [{
                    "node_pool": "string",
                    "roles": ["string"],
                    "node_pool_config": {
                        "locations": ["string"],
                        "autoscaling": {
                            "max_node_count": 0,
                            "min_node_count": 0,
                        },
                        "config": {
                            "local_ssd_count": 0,
                            "machine_type": "string",
                            "min_cpu_platform": "string",
                            "preemptible": False,
                            "spot": False,
                        },
                    },
                }],
            },
            "kubernetes_software_config": {
                "component_version": {
                    "string": "string",
                },
                "properties": {
                    "string": "string",
                },
            },
            "kubernetes_namespace": "string",
        },
        "staging_bucket": "string",
    })
const exampleclusterResourceResourceFromDataproccluster = new gcp.dataproc.Cluster("exampleclusterResourceResourceFromDataproccluster", {
    clusterConfig: {
        autoscalingConfig: {
            policyUri: "string",
        },
        auxiliaryNodeGroups: [{
            nodeGroups: [{
                roles: ["string"],
                name: "string",
                nodeGroupConfig: {
                    accelerators: [{
                        acceleratorCount: 0,
                        acceleratorType: "string",
                    }],
                    diskConfig: {
                        bootDiskSizeGb: 0,
                        bootDiskType: "string",
                        localSsdInterface: "string",
                        numLocalSsds: 0,
                    },
                    instanceNames: ["string"],
                    machineType: "string",
                    minCpuPlatform: "string",
                    numInstances: 0,
                },
            }],
            nodeGroupId: "string",
        }],
        bucket: "string",
        dataprocMetricConfig: {
            metrics: [{
                metricSource: "string",
                metricOverrides: ["string"],
            }],
        },
        encryptionConfig: {
            kmsKeyName: "string",
        },
        endpointConfig: {
            enableHttpPortAccess: false,
            httpPorts: {
                string: "string",
            },
        },
        gceClusterConfig: {
            confidentialInstanceConfig: {
                enableConfidentialCompute: false,
            },
            internalIpOnly: false,
            metadata: {
                string: "string",
            },
            network: "string",
            nodeGroupAffinity: {
                nodeGroupUri: "string",
            },
            reservationAffinity: {
                consumeReservationType: "string",
                key: "string",
                values: ["string"],
            },
            serviceAccount: "string",
            serviceAccountScopes: ["string"],
            shieldedInstanceConfig: {
                enableIntegrityMonitoring: false,
                enableSecureBoot: false,
                enableVtpm: false,
            },
            subnetwork: "string",
            tags: ["string"],
            zone: "string",
        },
        initializationActions: [{
            script: "string",
            timeoutSec: 0,
        }],
        lifecycleConfig: {
            autoDeleteTime: "string",
            idleDeleteTtl: "string",
            idleStartTime: "string",
        },
        masterConfig: {
            accelerators: [{
                acceleratorCount: 0,
                acceleratorType: "string",
            }],
            diskConfig: {
                bootDiskSizeGb: 0,
                bootDiskType: "string",
                localSsdInterface: "string",
                numLocalSsds: 0,
            },
            imageUri: "string",
            instanceNames: ["string"],
            machineType: "string",
            minCpuPlatform: "string",
            numInstances: 0,
        },
        metastoreConfig: {
            dataprocMetastoreService: "string",
        },
        preemptibleWorkerConfig: {
            diskConfig: {
                bootDiskSizeGb: 0,
                bootDiskType: "string",
                localSsdInterface: "string",
                numLocalSsds: 0,
            },
            instanceFlexibilityPolicy: {
                instanceSelectionLists: [{
                    machineTypes: ["string"],
                    rank: 0,
                }],
                instanceSelectionResults: [{
                    machineType: "string",
                    vmCount: 0,
                }],
                provisioningModelMix: {
                    standardCapacityBase: 0,
                    standardCapacityPercentAboveBase: 0,
                },
            },
            instanceNames: ["string"],
            numInstances: 0,
            preemptibility: "string",
        },
        securityConfig: {
            kerberosConfig: {
                kmsKeyUri: "string",
                rootPrincipalPasswordUri: "string",
                crossRealmTrustSharedPasswordUri: "string",
                crossRealmTrustAdminServer: "string",
                enableKerberos: false,
                kdcDbKeyUri: "string",
                keyPasswordUri: "string",
                keystorePasswordUri: "string",
                keystoreUri: "string",
                crossRealmTrustRealm: "string",
                realm: "string",
                crossRealmTrustKdc: "string",
                tgtLifetimeHours: 0,
                truststorePasswordUri: "string",
                truststoreUri: "string",
            },
        },
        softwareConfig: {
            imageVersion: "string",
            optionalComponents: ["string"],
            overrideProperties: {
                string: "string",
            },
            properties: {
                string: "string",
            },
        },
        stagingBucket: "string",
        tempBucket: "string",
        workerConfig: {
            accelerators: [{
                acceleratorCount: 0,
                acceleratorType: "string",
            }],
            diskConfig: {
                bootDiskSizeGb: 0,
                bootDiskType: "string",
                localSsdInterface: "string",
                numLocalSsds: 0,
            },
            imageUri: "string",
            instanceNames: ["string"],
            machineType: "string",
            minCpuPlatform: "string",
            minNumInstances: 0,
            numInstances: 0,
        },
    },
    gracefulDecommissionTimeout: "string",
    labels: {
        string: "string",
    },
    name: "string",
    project: "string",
    region: "string",
    virtualClusterConfig: {
        auxiliaryServicesConfig: {
            metastoreConfig: {
                dataprocMetastoreService: "string",
            },
            sparkHistoryServerConfig: {
                dataprocCluster: "string",
            },
        },
        kubernetesClusterConfig: {
            gkeClusterConfig: {
                gkeClusterTarget: "string",
                nodePoolTargets: [{
                    nodePool: "string",
                    roles: ["string"],
                    nodePoolConfig: {
                        locations: ["string"],
                        autoscaling: {
                            maxNodeCount: 0,
                            minNodeCount: 0,
                        },
                        config: {
                            localSsdCount: 0,
                            machineType: "string",
                            minCpuPlatform: "string",
                            preemptible: false,
                            spot: false,
                        },
                    },
                }],
            },
            kubernetesSoftwareConfig: {
                componentVersion: {
                    string: "string",
                },
                properties: {
                    string: "string",
                },
            },
            kubernetesNamespace: "string",
        },
        stagingBucket: "string",
    },
});
type: gcp:dataproc:Cluster
properties:
    clusterConfig:
        autoscalingConfig:
            policyUri: string
        auxiliaryNodeGroups:
            - nodeGroupId: string
              nodeGroups:
                - name: string
                  nodeGroupConfig:
                    accelerators:
                        - acceleratorCount: 0
                          acceleratorType: string
                    diskConfig:
                        bootDiskSizeGb: 0
                        bootDiskType: string
                        localSsdInterface: string
                        numLocalSsds: 0
                    instanceNames:
                        - string
                    machineType: string
                    minCpuPlatform: string
                    numInstances: 0
                  roles:
                    - string
        bucket: string
        dataprocMetricConfig:
            metrics:
                - metricOverrides:
                    - string
                  metricSource: string
        encryptionConfig:
            kmsKeyName: string
        endpointConfig:
            enableHttpPortAccess: false
            httpPorts:
                string: string
        gceClusterConfig:
            confidentialInstanceConfig:
                enableConfidentialCompute: false
            internalIpOnly: false
            metadata:
                string: string
            network: string
            nodeGroupAffinity:
                nodeGroupUri: string
            reservationAffinity:
                consumeReservationType: string
                key: string
                values:
                    - string
            serviceAccount: string
            serviceAccountScopes:
                - string
            shieldedInstanceConfig:
                enableIntegrityMonitoring: false
                enableSecureBoot: false
                enableVtpm: false
            subnetwork: string
            tags:
                - string
            zone: string
        initializationActions:
            - script: string
              timeoutSec: 0
        lifecycleConfig:
            autoDeleteTime: string
            idleDeleteTtl: string
            idleStartTime: string
        masterConfig:
            accelerators:
                - acceleratorCount: 0
                  acceleratorType: string
            diskConfig:
                bootDiskSizeGb: 0
                bootDiskType: string
                localSsdInterface: string
                numLocalSsds: 0
            imageUri: string
            instanceNames:
                - string
            machineType: string
            minCpuPlatform: string
            numInstances: 0
        metastoreConfig:
            dataprocMetastoreService: string
        preemptibleWorkerConfig:
            diskConfig:
                bootDiskSizeGb: 0
                bootDiskType: string
                localSsdInterface: string
                numLocalSsds: 0
            instanceFlexibilityPolicy:
                instanceSelectionLists:
                    - machineTypes:
                        - string
                      rank: 0
                instanceSelectionResults:
                    - machineType: string
                      vmCount: 0
                provisioningModelMix:
                    standardCapacityBase: 0
                    standardCapacityPercentAboveBase: 0
            instanceNames:
                - string
            numInstances: 0
            preemptibility: string
        securityConfig:
            kerberosConfig:
                crossRealmTrustAdminServer: string
                crossRealmTrustKdc: string
                crossRealmTrustRealm: string
                crossRealmTrustSharedPasswordUri: string
                enableKerberos: false
                kdcDbKeyUri: string
                keyPasswordUri: string
                keystorePasswordUri: string
                keystoreUri: string
                kmsKeyUri: string
                realm: string
                rootPrincipalPasswordUri: string
                tgtLifetimeHours: 0
                truststorePasswordUri: string
                truststoreUri: string
        softwareConfig:
            imageVersion: string
            optionalComponents:
                - string
            overrideProperties:
                string: string
            properties:
                string: string
        stagingBucket: string
        tempBucket: string
        workerConfig:
            accelerators:
                - acceleratorCount: 0
                  acceleratorType: string
            diskConfig:
                bootDiskSizeGb: 0
                bootDiskType: string
                localSsdInterface: string
                numLocalSsds: 0
            imageUri: string
            instanceNames:
                - string
            machineType: string
            minCpuPlatform: string
            minNumInstances: 0
            numInstances: 0
    gracefulDecommissionTimeout: string
    labels:
        string: string
    name: string
    project: string
    region: string
    virtualClusterConfig:
        auxiliaryServicesConfig:
            metastoreConfig:
                dataprocMetastoreService: string
            sparkHistoryServerConfig:
                dataprocCluster: string
        kubernetesClusterConfig:
            gkeClusterConfig:
                gkeClusterTarget: string
                nodePoolTargets:
                    - nodePool: string
                      nodePoolConfig:
                        autoscaling:
                            maxNodeCount: 0
                            minNodeCount: 0
                        config:
                            localSsdCount: 0
                            machineType: string
                            minCpuPlatform: string
                            preemptible: false
                            spot: false
                        locations:
                            - string
                      roles:
                        - string
            kubernetesNamespace: string
            kubernetesSoftwareConfig:
                componentVersion:
                    string: string
                properties:
                    string: string
        stagingBucket: string
Cluster Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The Cluster resource accepts the following input properties:
- ClusterConfig ClusterCluster Config 
- Allows you to configure various aspects of the cluster. Structure defined below.
- GracefulDecommission stringTimeout 
- Labels Dictionary<string, string>
- The list of the labels (key/value pairs) configured on the resource and to be applied to instances in the cluster. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- Name string
- The name of the cluster, unique within the project and
zone.
- Project string
- The ID of the project in which the clusterwill exist. If it is not provided, the provider project is used.
- Region string
- The region in which the cluster and associated nodes will be created in.
Defaults to global.
- VirtualCluster ClusterConfig Virtual Cluster Config 
- Allows you to configure a virtual Dataproc on GKE cluster. Structure defined below.
- ClusterConfig ClusterCluster Config Args 
- Allows you to configure various aspects of the cluster. Structure defined below.
- GracefulDecommission stringTimeout 
- Labels map[string]string
- The list of the labels (key/value pairs) configured on the resource and to be applied to instances in the cluster. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- Name string
- The name of the cluster, unique within the project and
zone.
- Project string
- The ID of the project in which the clusterwill exist. If it is not provided, the provider project is used.
- Region string
- The region in which the cluster and associated nodes will be created in.
Defaults to global.
- VirtualCluster ClusterConfig Virtual Cluster Config Args 
- Allows you to configure a virtual Dataproc on GKE cluster. Structure defined below.
- clusterConfig ClusterCluster Config 
- Allows you to configure various aspects of the cluster. Structure defined below.
- gracefulDecommission StringTimeout 
- labels Map<String,String>
- The list of the labels (key/value pairs) configured on the resource and to be applied to instances in the cluster. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- name String
- The name of the cluster, unique within the project and
zone.
- project String
- The ID of the project in which the clusterwill exist. If it is not provided, the provider project is used.
- region String
- The region in which the cluster and associated nodes will be created in.
Defaults to global.
- virtualCluster ClusterConfig Virtual Cluster Config 
- Allows you to configure a virtual Dataproc on GKE cluster. Structure defined below.
- clusterConfig ClusterCluster Config 
- Allows you to configure various aspects of the cluster. Structure defined below.
- gracefulDecommission stringTimeout 
- labels {[key: string]: string}
- The list of the labels (key/value pairs) configured on the resource and to be applied to instances in the cluster. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- name string
- The name of the cluster, unique within the project and
zone.
- project string
- The ID of the project in which the clusterwill exist. If it is not provided, the provider project is used.
- region string
- The region in which the cluster and associated nodes will be created in.
Defaults to global.
- virtualCluster ClusterConfig Virtual Cluster Config 
- Allows you to configure a virtual Dataproc on GKE cluster. Structure defined below.
- cluster_config ClusterCluster Config Args 
- Allows you to configure various aspects of the cluster. Structure defined below.
- graceful_decommission_ strtimeout 
- labels Mapping[str, str]
- The list of the labels (key/value pairs) configured on the resource and to be applied to instances in the cluster. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- name str
- The name of the cluster, unique within the project and
zone.
- project str
- The ID of the project in which the clusterwill exist. If it is not provided, the provider project is used.
- region str
- The region in which the cluster and associated nodes will be created in.
Defaults to global.
- virtual_cluster_ Clusterconfig Virtual Cluster Config Args 
- Allows you to configure a virtual Dataproc on GKE cluster. Structure defined below.
- clusterConfig Property Map
- Allows you to configure various aspects of the cluster. Structure defined below.
- gracefulDecommission StringTimeout 
- labels Map<String>
- The list of the labels (key/value pairs) configured on the resource and to be applied to instances in the cluster. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- name String
- The name of the cluster, unique within the project and
zone.
- project String
- The ID of the project in which the clusterwill exist. If it is not provided, the provider project is used.
- region String
- The region in which the cluster and associated nodes will be created in.
Defaults to global.
- virtualCluster Property MapConfig 
- Allows you to configure a virtual Dataproc on GKE cluster. Structure defined below.
Outputs
All input properties are implicitly available as output properties. Additionally, the Cluster resource produces the following output properties:
- EffectiveLabels Dictionary<string, string>
- The list of labels (key/value pairs) to be applied to
instances in the cluster. GCP generates some itself including goog-dataproc-cluster-namewhich is the name of the cluster.
- Id string
- The provider-assigned unique ID for this managed resource.
- PulumiLabels Dictionary<string, string>
- The combination of labels configured directly on the resource and default labels configured on the provider.
- EffectiveLabels map[string]string
- The list of labels (key/value pairs) to be applied to
instances in the cluster. GCP generates some itself including goog-dataproc-cluster-namewhich is the name of the cluster.
- Id string
- The provider-assigned unique ID for this managed resource.
- PulumiLabels map[string]string
- The combination of labels configured directly on the resource and default labels configured on the provider.
- effectiveLabels Map<String,String>
- The list of labels (key/value pairs) to be applied to
instances in the cluster. GCP generates some itself including goog-dataproc-cluster-namewhich is the name of the cluster.
- id String
- The provider-assigned unique ID for this managed resource.
- pulumiLabels Map<String,String>
- The combination of labels configured directly on the resource and default labels configured on the provider.
- effectiveLabels {[key: string]: string}
- The list of labels (key/value pairs) to be applied to
instances in the cluster. GCP generates some itself including goog-dataproc-cluster-namewhich is the name of the cluster.
- id string
- The provider-assigned unique ID for this managed resource.
- pulumiLabels {[key: string]: string}
- The combination of labels configured directly on the resource and default labels configured on the provider.
- effective_labels Mapping[str, str]
- The list of labels (key/value pairs) to be applied to
instances in the cluster. GCP generates some itself including goog-dataproc-cluster-namewhich is the name of the cluster.
- id str
- The provider-assigned unique ID for this managed resource.
- pulumi_labels Mapping[str, str]
- The combination of labels configured directly on the resource and default labels configured on the provider.
- effectiveLabels Map<String>
- The list of labels (key/value pairs) to be applied to
instances in the cluster. GCP generates some itself including goog-dataproc-cluster-namewhich is the name of the cluster.
- id String
- The provider-assigned unique ID for this managed resource.
- pulumiLabels Map<String>
- The combination of labels configured directly on the resource and default labels configured on the provider.
Look up Existing Cluster Resource
Get an existing Cluster resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: ClusterState, opts?: CustomResourceOptions): Cluster@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        cluster_config: Optional[ClusterClusterConfigArgs] = None,
        effective_labels: Optional[Mapping[str, str]] = None,
        graceful_decommission_timeout: Optional[str] = None,
        labels: Optional[Mapping[str, str]] = None,
        name: Optional[str] = None,
        project: Optional[str] = None,
        pulumi_labels: Optional[Mapping[str, str]] = None,
        region: Optional[str] = None,
        virtual_cluster_config: Optional[ClusterVirtualClusterConfigArgs] = None) -> Clusterfunc GetCluster(ctx *Context, name string, id IDInput, state *ClusterState, opts ...ResourceOption) (*Cluster, error)public static Cluster Get(string name, Input<string> id, ClusterState? state, CustomResourceOptions? opts = null)public static Cluster get(String name, Output<String> id, ClusterState state, CustomResourceOptions options)resources:  _:    type: gcp:dataproc:Cluster    get:      id: ${id}- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- ClusterConfig ClusterCluster Config 
- Allows you to configure various aspects of the cluster. Structure defined below.
- EffectiveLabels Dictionary<string, string>
- The list of labels (key/value pairs) to be applied to
instances in the cluster. GCP generates some itself including goog-dataproc-cluster-namewhich is the name of the cluster.
- GracefulDecommission stringTimeout 
- Labels Dictionary<string, string>
- The list of the labels (key/value pairs) configured on the resource and to be applied to instances in the cluster. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- Name string
- The name of the cluster, unique within the project and
zone.
- Project string
- The ID of the project in which the clusterwill exist. If it is not provided, the provider project is used.
- PulumiLabels Dictionary<string, string>
- The combination of labels configured directly on the resource and default labels configured on the provider.
- Region string
- The region in which the cluster and associated nodes will be created in.
Defaults to global.
- VirtualCluster ClusterConfig Virtual Cluster Config 
- Allows you to configure a virtual Dataproc on GKE cluster. Structure defined below.
- ClusterConfig ClusterCluster Config Args 
- Allows you to configure various aspects of the cluster. Structure defined below.
- EffectiveLabels map[string]string
- The list of labels (key/value pairs) to be applied to
instances in the cluster. GCP generates some itself including goog-dataproc-cluster-namewhich is the name of the cluster.
- GracefulDecommission stringTimeout 
- Labels map[string]string
- The list of the labels (key/value pairs) configured on the resource and to be applied to instances in the cluster. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- Name string
- The name of the cluster, unique within the project and
zone.
- Project string
- The ID of the project in which the clusterwill exist. If it is not provided, the provider project is used.
- PulumiLabels map[string]string
- The combination of labels configured directly on the resource and default labels configured on the provider.
- Region string
- The region in which the cluster and associated nodes will be created in.
Defaults to global.
- VirtualCluster ClusterConfig Virtual Cluster Config Args 
- Allows you to configure a virtual Dataproc on GKE cluster. Structure defined below.
- clusterConfig ClusterCluster Config 
- Allows you to configure various aspects of the cluster. Structure defined below.
- effectiveLabels Map<String,String>
- The list of labels (key/value pairs) to be applied to
instances in the cluster. GCP generates some itself including goog-dataproc-cluster-namewhich is the name of the cluster.
- gracefulDecommission StringTimeout 
- labels Map<String,String>
- The list of the labels (key/value pairs) configured on the resource and to be applied to instances in the cluster. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- name String
- The name of the cluster, unique within the project and
zone.
- project String
- The ID of the project in which the clusterwill exist. If it is not provided, the provider project is used.
- pulumiLabels Map<String,String>
- The combination of labels configured directly on the resource and default labels configured on the provider.
- region String
- The region in which the cluster and associated nodes will be created in.
Defaults to global.
- virtualCluster ClusterConfig Virtual Cluster Config 
- Allows you to configure a virtual Dataproc on GKE cluster. Structure defined below.
- clusterConfig ClusterCluster Config 
- Allows you to configure various aspects of the cluster. Structure defined below.
- effectiveLabels {[key: string]: string}
- The list of labels (key/value pairs) to be applied to
instances in the cluster. GCP generates some itself including goog-dataproc-cluster-namewhich is the name of the cluster.
- gracefulDecommission stringTimeout 
- labels {[key: string]: string}
- The list of the labels (key/value pairs) configured on the resource and to be applied to instances in the cluster. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- name string
- The name of the cluster, unique within the project and
zone.
- project string
- The ID of the project in which the clusterwill exist. If it is not provided, the provider project is used.
- pulumiLabels {[key: string]: string}
- The combination of labels configured directly on the resource and default labels configured on the provider.
- region string
- The region in which the cluster and associated nodes will be created in.
Defaults to global.
- virtualCluster ClusterConfig Virtual Cluster Config 
- Allows you to configure a virtual Dataproc on GKE cluster. Structure defined below.
- cluster_config ClusterCluster Config Args 
- Allows you to configure various aspects of the cluster. Structure defined below.
- effective_labels Mapping[str, str]
- The list of labels (key/value pairs) to be applied to
instances in the cluster. GCP generates some itself including goog-dataproc-cluster-namewhich is the name of the cluster.
- graceful_decommission_ strtimeout 
- labels Mapping[str, str]
- The list of the labels (key/value pairs) configured on the resource and to be applied to instances in the cluster. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- name str
- The name of the cluster, unique within the project and
zone.
- project str
- The ID of the project in which the clusterwill exist. If it is not provided, the provider project is used.
- pulumi_labels Mapping[str, str]
- The combination of labels configured directly on the resource and default labels configured on the provider.
- region str
- The region in which the cluster and associated nodes will be created in.
Defaults to global.
- virtual_cluster_ Clusterconfig Virtual Cluster Config Args 
- Allows you to configure a virtual Dataproc on GKE cluster. Structure defined below.
- clusterConfig Property Map
- Allows you to configure various aspects of the cluster. Structure defined below.
- effectiveLabels Map<String>
- The list of labels (key/value pairs) to be applied to
instances in the cluster. GCP generates some itself including goog-dataproc-cluster-namewhich is the name of the cluster.
- gracefulDecommission StringTimeout 
- labels Map<String>
- The list of the labels (key/value pairs) configured on the resource and to be applied to instances in the cluster. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- name String
- The name of the cluster, unique within the project and
zone.
- project String
- The ID of the project in which the clusterwill exist. If it is not provided, the provider project is used.
- pulumiLabels Map<String>
- The combination of labels configured directly on the resource and default labels configured on the provider.
- region String
- The region in which the cluster and associated nodes will be created in.
Defaults to global.
- virtualCluster Property MapConfig 
- Allows you to configure a virtual Dataproc on GKE cluster. Structure defined below.
Supporting Types
ClusterClusterConfig, ClusterClusterConfigArgs      
- AutoscalingConfig ClusterCluster Config Autoscaling Config 
- The autoscaling policy config associated with the cluster.
Note that once set, if autoscaling_configis the only field set incluster_config, it can only be removed by settingpolicy_uri = "", rather than removing the whole block. Structure defined below.
- AuxiliaryNode List<ClusterGroups Cluster Config Auxiliary Node Group> 
- A Dataproc NodeGroup resource is a group of Dataproc cluster nodes that execute an assigned role. Structure defined below.
- Bucket string
- The name of the cloud storage bucket ultimately used to house the staging data
for the cluster. If staging_bucketis specified, it will contain this value, otherwise it will be the auto generated name.
- DataprocMetric ClusterConfig Cluster Config Dataproc Metric Config 
- The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times. Structure defined below.
- EncryptionConfig ClusterCluster Config Encryption Config 
- The Customer managed encryption keys settings for the cluster. Structure defined below.
- EndpointConfig ClusterCluster Config Endpoint Config 
- The config settings for port access on the cluster. Structure defined below.
- GceCluster ClusterConfig Cluster Config Gce Cluster Config 
- Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below.
- InitializationActions List<ClusterCluster Config Initialization Action> 
- Commands to execute on each node after config is completed. You can specify multiple versions of these. Structure defined below.
- LifecycleConfig ClusterCluster Config Lifecycle Config 
- The settings for auto deletion cluster schedule. Structure defined below.
- MasterConfig ClusterCluster Config Master Config 
- The Google Compute Engine config settings for the master instances in a cluster. Structure defined below.
- MetastoreConfig ClusterCluster Config Metastore Config 
- The config setting for metastore service with the cluster.
Structure defined below.
- PreemptibleWorker ClusterConfig Cluster Config Preemptible Worker Config 
- The Google Compute Engine config settings for the additional
instances in a cluster. Structure defined below.- NOTE : preemptible_worker_configis an alias for the api's secondaryWorkerConfig. The name doesn't necessarily mean it is preemptible and is named as such for legacy/compatibility reasons.
 
- NOTE : 
- SecurityConfig ClusterCluster Config Security Config 
- Security related configuration. Structure defined below.
- SoftwareConfig ClusterCluster Config Software Config 
- The config settings for software inside the cluster. Structure defined below.
- StagingBucket string
- The Cloud Storage staging bucket used to stage files,
such as Hadoop jars, between client machines and the cluster.
Note: If you don't explicitly specify a staging_bucketthen GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.
- TempBucket string
- The Cloud Storage temp bucket used to store ephemeral cluster
and jobs data, such as Spark and MapReduce history files.
Note: If you don't explicitly specify a temp_bucketthen GCP will auto create / assign one for you.
- WorkerConfig ClusterCluster Config Worker Config 
- The Google Compute Engine config settings for the worker instances in a cluster. Structure defined below.
- AutoscalingConfig ClusterCluster Config Autoscaling Config 
- The autoscaling policy config associated with the cluster.
Note that once set, if autoscaling_configis the only field set incluster_config, it can only be removed by settingpolicy_uri = "", rather than removing the whole block. Structure defined below.
- AuxiliaryNode []ClusterGroups Cluster Config Auxiliary Node Group 
- A Dataproc NodeGroup resource is a group of Dataproc cluster nodes that execute an assigned role. Structure defined below.
- Bucket string
- The name of the cloud storage bucket ultimately used to house the staging data
for the cluster. If staging_bucketis specified, it will contain this value, otherwise it will be the auto generated name.
- DataprocMetric ClusterConfig Cluster Config Dataproc Metric Config 
- The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times. Structure defined below.
- EncryptionConfig ClusterCluster Config Encryption Config 
- The Customer managed encryption keys settings for the cluster. Structure defined below.
- EndpointConfig ClusterCluster Config Endpoint Config 
- The config settings for port access on the cluster. Structure defined below.
- GceCluster ClusterConfig Cluster Config Gce Cluster Config 
- Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below.
- InitializationActions []ClusterCluster Config Initialization Action 
- Commands to execute on each node after config is completed. You can specify multiple versions of these. Structure defined below.
- LifecycleConfig ClusterCluster Config Lifecycle Config 
- The settings for auto deletion cluster schedule. Structure defined below.
- MasterConfig ClusterCluster Config Master Config 
- The Google Compute Engine config settings for the master instances in a cluster. Structure defined below.
- MetastoreConfig ClusterCluster Config Metastore Config 
- The config setting for metastore service with the cluster.
Structure defined below.
- PreemptibleWorker ClusterConfig Cluster Config Preemptible Worker Config 
- The Google Compute Engine config settings for the additional
instances in a cluster. Structure defined below.- NOTE : preemptible_worker_configis an alias for the api's secondaryWorkerConfig. The name doesn't necessarily mean it is preemptible and is named as such for legacy/compatibility reasons.
 
- NOTE : 
- SecurityConfig ClusterCluster Config Security Config 
- Security related configuration. Structure defined below.
- SoftwareConfig ClusterCluster Config Software Config 
- The config settings for software inside the cluster. Structure defined below.
- StagingBucket string
- The Cloud Storage staging bucket used to stage files,
such as Hadoop jars, between client machines and the cluster.
Note: If you don't explicitly specify a staging_bucketthen GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.
- TempBucket string
- The Cloud Storage temp bucket used to store ephemeral cluster
and jobs data, such as Spark and MapReduce history files.
Note: If you don't explicitly specify a temp_bucketthen GCP will auto create / assign one for you.
- WorkerConfig ClusterCluster Config Worker Config 
- The Google Compute Engine config settings for the worker instances in a cluster. Structure defined below.
- autoscalingConfig ClusterCluster Config Autoscaling Config 
- The autoscaling policy config associated with the cluster.
Note that once set, if autoscaling_configis the only field set incluster_config, it can only be removed by settingpolicy_uri = "", rather than removing the whole block. Structure defined below.
- auxiliaryNode List<ClusterGroups Cluster Config Auxiliary Node Group> 
- A Dataproc NodeGroup resource is a group of Dataproc cluster nodes that execute an assigned role. Structure defined below.
- bucket String
- The name of the cloud storage bucket ultimately used to house the staging data
for the cluster. If staging_bucketis specified, it will contain this value, otherwise it will be the auto generated name.
- dataprocMetric ClusterConfig Cluster Config Dataproc Metric Config 
- The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times. Structure defined below.
- encryptionConfig ClusterCluster Config Encryption Config 
- The Customer managed encryption keys settings for the cluster. Structure defined below.
- endpointConfig ClusterCluster Config Endpoint Config 
- The config settings for port access on the cluster. Structure defined below.
- gceCluster ClusterConfig Cluster Config Gce Cluster Config 
- Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below.
- initializationActions List<ClusterCluster Config Initialization Action> 
- Commands to execute on each node after config is completed. You can specify multiple versions of these. Structure defined below.
- lifecycleConfig ClusterCluster Config Lifecycle Config 
- The settings for auto deletion cluster schedule. Structure defined below.
- masterConfig ClusterCluster Config Master Config 
- The Google Compute Engine config settings for the master instances in a cluster. Structure defined below.
- metastoreConfig ClusterCluster Config Metastore Config 
- The config setting for metastore service with the cluster.
Structure defined below.
- preemptibleWorker ClusterConfig Cluster Config Preemptible Worker Config 
- The Google Compute Engine config settings for the additional
instances in a cluster. Structure defined below.- NOTE : preemptible_worker_configis an alias for the api's secondaryWorkerConfig. The name doesn't necessarily mean it is preemptible and is named as such for legacy/compatibility reasons.
 
- NOTE : 
- securityConfig ClusterCluster Config Security Config 
- Security related configuration. Structure defined below.
- softwareConfig ClusterCluster Config Software Config 
- The config settings for software inside the cluster. Structure defined below.
- stagingBucket String
- The Cloud Storage staging bucket used to stage files,
such as Hadoop jars, between client machines and the cluster.
Note: If you don't explicitly specify a staging_bucketthen GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.
- tempBucket String
- The Cloud Storage temp bucket used to store ephemeral cluster
and jobs data, such as Spark and MapReduce history files.
Note: If you don't explicitly specify a temp_bucketthen GCP will auto create / assign one for you.
- workerConfig ClusterCluster Config Worker Config 
- The Google Compute Engine config settings for the worker instances in a cluster. Structure defined below.
- autoscalingConfig ClusterCluster Config Autoscaling Config 
- The autoscaling policy config associated with the cluster.
Note that once set, if autoscaling_configis the only field set incluster_config, it can only be removed by settingpolicy_uri = "", rather than removing the whole block. Structure defined below.
- auxiliaryNode ClusterGroups Cluster Config Auxiliary Node Group[] 
- A Dataproc NodeGroup resource is a group of Dataproc cluster nodes that execute an assigned role. Structure defined below.
- bucket string
- The name of the cloud storage bucket ultimately used to house the staging data
for the cluster. If staging_bucketis specified, it will contain this value, otherwise it will be the auto generated name.
- dataprocMetric ClusterConfig Cluster Config Dataproc Metric Config 
- The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times. Structure defined below.
- encryptionConfig ClusterCluster Config Encryption Config 
- The Customer managed encryption keys settings for the cluster. Structure defined below.
- endpointConfig ClusterCluster Config Endpoint Config 
- The config settings for port access on the cluster. Structure defined below.
- gceCluster ClusterConfig Cluster Config Gce Cluster Config 
- Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below.
- initializationActions ClusterCluster Config Initialization Action[] 
- Commands to execute on each node after config is completed. You can specify multiple versions of these. Structure defined below.
- lifecycleConfig ClusterCluster Config Lifecycle Config 
- The settings for auto deletion cluster schedule. Structure defined below.
- masterConfig ClusterCluster Config Master Config 
- The Google Compute Engine config settings for the master instances in a cluster. Structure defined below.
- metastoreConfig ClusterCluster Config Metastore Config 
- The config setting for metastore service with the cluster.
Structure defined below.
- preemptibleWorker ClusterConfig Cluster Config Preemptible Worker Config 
- The Google Compute Engine config settings for the additional
instances in a cluster. Structure defined below.- NOTE : preemptible_worker_configis an alias for the api's secondaryWorkerConfig. The name doesn't necessarily mean it is preemptible and is named as such for legacy/compatibility reasons.
 
- NOTE : 
- securityConfig ClusterCluster Config Security Config 
- Security related configuration. Structure defined below.
- softwareConfig ClusterCluster Config Software Config 
- The config settings for software inside the cluster. Structure defined below.
- stagingBucket string
- The Cloud Storage staging bucket used to stage files,
such as Hadoop jars, between client machines and the cluster.
Note: If you don't explicitly specify a staging_bucketthen GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.
- tempBucket string
- The Cloud Storage temp bucket used to store ephemeral cluster
and jobs data, such as Spark and MapReduce history files.
Note: If you don't explicitly specify a temp_bucketthen GCP will auto create / assign one for you.
- workerConfig ClusterCluster Config Worker Config 
- The Google Compute Engine config settings for the worker instances in a cluster. Structure defined below.
- autoscaling_config ClusterCluster Config Autoscaling Config 
- The autoscaling policy config associated with the cluster.
Note that once set, if autoscaling_configis the only field set incluster_config, it can only be removed by settingpolicy_uri = "", rather than removing the whole block. Structure defined below.
- auxiliary_node_ Sequence[Clustergroups Cluster Config Auxiliary Node Group] 
- A Dataproc NodeGroup resource is a group of Dataproc cluster nodes that execute an assigned role. Structure defined below.
- bucket str
- The name of the cloud storage bucket ultimately used to house the staging data
for the cluster. If staging_bucketis specified, it will contain this value, otherwise it will be the auto generated name.
- dataproc_metric_ Clusterconfig Cluster Config Dataproc Metric Config 
- The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times. Structure defined below.
- encryption_config ClusterCluster Config Encryption Config 
- The Customer managed encryption keys settings for the cluster. Structure defined below.
- endpoint_config ClusterCluster Config Endpoint Config 
- The config settings for port access on the cluster. Structure defined below.
- gce_cluster_ Clusterconfig Cluster Config Gce Cluster Config 
- Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below.
- initialization_actions Sequence[ClusterCluster Config Initialization Action] 
- Commands to execute on each node after config is completed. You can specify multiple versions of these. Structure defined below.
- lifecycle_config ClusterCluster Config Lifecycle Config 
- The settings for auto deletion cluster schedule. Structure defined below.
- master_config ClusterCluster Config Master Config 
- The Google Compute Engine config settings for the master instances in a cluster. Structure defined below.
- metastore_config ClusterCluster Config Metastore Config 
- The config setting for metastore service with the cluster.
Structure defined below.
- preemptible_worker_ Clusterconfig Cluster Config Preemptible Worker Config 
- The Google Compute Engine config settings for the additional
instances in a cluster. Structure defined below.- NOTE : preemptible_worker_configis an alias for the api's secondaryWorkerConfig. The name doesn't necessarily mean it is preemptible and is named as such for legacy/compatibility reasons.
 
- NOTE : 
- security_config ClusterCluster Config Security Config 
- Security related configuration. Structure defined below.
- software_config ClusterCluster Config Software Config 
- The config settings for software inside the cluster. Structure defined below.
- staging_bucket str
- The Cloud Storage staging bucket used to stage files,
such as Hadoop jars, between client machines and the cluster.
Note: If you don't explicitly specify a staging_bucketthen GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.
- temp_bucket str
- The Cloud Storage temp bucket used to store ephemeral cluster
and jobs data, such as Spark and MapReduce history files.
Note: If you don't explicitly specify a temp_bucketthen GCP will auto create / assign one for you.
- worker_config ClusterCluster Config Worker Config 
- The Google Compute Engine config settings for the worker instances in a cluster. Structure defined below.
- autoscalingConfig Property Map
- The autoscaling policy config associated with the cluster.
Note that once set, if autoscaling_configis the only field set incluster_config, it can only be removed by settingpolicy_uri = "", rather than removing the whole block. Structure defined below.
- auxiliaryNode List<Property Map>Groups 
- A Dataproc NodeGroup resource is a group of Dataproc cluster nodes that execute an assigned role. Structure defined below.
- bucket String
- The name of the cloud storage bucket ultimately used to house the staging data
for the cluster. If staging_bucketis specified, it will contain this value, otherwise it will be the auto generated name.
- dataprocMetric Property MapConfig 
- The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times. Structure defined below.
- encryptionConfig Property Map
- The Customer managed encryption keys settings for the cluster. Structure defined below.
- endpointConfig Property Map
- The config settings for port access on the cluster. Structure defined below.
- gceCluster Property MapConfig 
- Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below.
- initializationActions List<Property Map>
- Commands to execute on each node after config is completed. You can specify multiple versions of these. Structure defined below.
- lifecycleConfig Property Map
- The settings for auto deletion cluster schedule. Structure defined below.
- masterConfig Property Map
- The Google Compute Engine config settings for the master instances in a cluster. Structure defined below.
- metastoreConfig Property Map
- The config setting for metastore service with the cluster.
Structure defined below.
- preemptibleWorker Property MapConfig 
- The Google Compute Engine config settings for the additional
instances in a cluster. Structure defined below.- NOTE : preemptible_worker_configis an alias for the api's secondaryWorkerConfig. The name doesn't necessarily mean it is preemptible and is named as such for legacy/compatibility reasons.
 
- NOTE : 
- securityConfig Property Map
- Security related configuration. Structure defined below.
- softwareConfig Property Map
- The config settings for software inside the cluster. Structure defined below.
- stagingBucket String
- The Cloud Storage staging bucket used to stage files,
such as Hadoop jars, between client machines and the cluster.
Note: If you don't explicitly specify a staging_bucketthen GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.
- tempBucket String
- The Cloud Storage temp bucket used to store ephemeral cluster
and jobs data, such as Spark and MapReduce history files.
Note: If you don't explicitly specify a temp_bucketthen GCP will auto create / assign one for you.
- workerConfig Property Map
- The Google Compute Engine config settings for the worker instances in a cluster. Structure defined below.
ClusterClusterConfigAutoscalingConfig, ClusterClusterConfigAutoscalingConfigArgs          
- PolicyUri string
- The autoscaling policy used by the cluster. - Only resource names including projectid and location (region) are valid. Examples: - https://www.googleapis.com/compute/v1/projects/[projectId]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]- projects/[projectId]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]Note that the policy must be in the same project and Cloud Dataproc region.
- PolicyUri string
- The autoscaling policy used by the cluster. - Only resource names including projectid and location (region) are valid. Examples: - https://www.googleapis.com/compute/v1/projects/[projectId]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]- projects/[projectId]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]Note that the policy must be in the same project and Cloud Dataproc region.
- policyUri String
- The autoscaling policy used by the cluster. - Only resource names including projectid and location (region) are valid. Examples: - https://www.googleapis.com/compute/v1/projects/[projectId]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]- projects/[projectId]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]Note that the policy must be in the same project and Cloud Dataproc region.
- policyUri string
- The autoscaling policy used by the cluster. - Only resource names including projectid and location (region) are valid. Examples: - https://www.googleapis.com/compute/v1/projects/[projectId]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]- projects/[projectId]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]Note that the policy must be in the same project and Cloud Dataproc region.
- policy_uri str
- The autoscaling policy used by the cluster. - Only resource names including projectid and location (region) are valid. Examples: - https://www.googleapis.com/compute/v1/projects/[projectId]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]- projects/[projectId]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]Note that the policy must be in the same project and Cloud Dataproc region.
- policyUri String
- The autoscaling policy used by the cluster. - Only resource names including projectid and location (region) are valid. Examples: - https://www.googleapis.com/compute/v1/projects/[projectId]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]- projects/[projectId]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]Note that the policy must be in the same project and Cloud Dataproc region.
ClusterClusterConfigAuxiliaryNodeGroup, ClusterClusterConfigAuxiliaryNodeGroupArgs            
- NodeGroups List<ClusterCluster Config Auxiliary Node Group Node Group> 
- Node group configuration.
- NodeGroup stringId 
- A node group ID. Generated if not specified. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters.
- NodeGroups []ClusterCluster Config Auxiliary Node Group Node Group 
- Node group configuration.
- NodeGroup stringId 
- A node group ID. Generated if not specified. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters.
- nodeGroups List<ClusterCluster Config Auxiliary Node Group Node Group> 
- Node group configuration.
- nodeGroup StringId 
- A node group ID. Generated if not specified. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters.
- nodeGroups ClusterCluster Config Auxiliary Node Group Node Group[] 
- Node group configuration.
- nodeGroup stringId 
- A node group ID. Generated if not specified. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters.
- node_groups Sequence[ClusterCluster Config Auxiliary Node Group Node Group] 
- Node group configuration.
- node_group_ strid 
- A node group ID. Generated if not specified. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters.
- nodeGroups List<Property Map>
- Node group configuration.
- nodeGroup StringId 
- A node group ID. Generated if not specified. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters.
ClusterClusterConfigAuxiliaryNodeGroupNodeGroup, ClusterClusterConfigAuxiliaryNodeGroupNodeGroupArgs                
- Roles List<string>
- Node group roles.
One of "DRIVER".
- Name string
- The Node group resource name.
- NodeGroup ClusterConfig Cluster Config Auxiliary Node Group Node Group Node Group Config 
- The node group instance group configuration.
- Roles []string
- Node group roles.
One of "DRIVER".
- Name string
- The Node group resource name.
- NodeGroup ClusterConfig Cluster Config Auxiliary Node Group Node Group Node Group Config 
- The node group instance group configuration.
- roles List<String>
- Node group roles.
One of "DRIVER".
- name String
- The Node group resource name.
- nodeGroup ClusterConfig Cluster Config Auxiliary Node Group Node Group Node Group Config 
- The node group instance group configuration.
- roles string[]
- Node group roles.
One of "DRIVER".
- name string
- The Node group resource name.
- nodeGroup ClusterConfig Cluster Config Auxiliary Node Group Node Group Node Group Config 
- The node group instance group configuration.
- roles Sequence[str]
- Node group roles.
One of "DRIVER".
- name str
- The Node group resource name.
- node_group_ Clusterconfig Cluster Config Auxiliary Node Group Node Group Node Group Config 
- The node group instance group configuration.
- roles List<String>
- Node group roles.
One of "DRIVER".
- name String
- The Node group resource name.
- nodeGroup Property MapConfig 
- The node group instance group configuration.
ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfig, ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigArgs                      
- Accelerators
List<ClusterCluster Config Auxiliary Node Group Node Group Node Group Config Accelerator> 
- The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times.
- DiskConfig ClusterCluster Config Auxiliary Node Group Node Group Node Group Config Disk Config 
- Disk Config
- InstanceNames List<string>
- List of auxiliary node group instance names which have been assigned to the cluster.
- MachineType string
- The name of a Google Compute Engine machine type
to create for the node group. If not specified, GCP will default to a predetermined
computed value (currently n1-standard-4).
- MinCpu stringPlatform 
- The name of a minimum generation of CPU family for the node group. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
- NumInstances int
- Specifies the number of master nodes to create. Please set a number greater than 0. Node Group must have at least 1 instance.
- Accelerators
[]ClusterCluster Config Auxiliary Node Group Node Group Node Group Config Accelerator 
- The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times.
- DiskConfig ClusterCluster Config Auxiliary Node Group Node Group Node Group Config Disk Config 
- Disk Config
- InstanceNames []string
- List of auxiliary node group instance names which have been assigned to the cluster.
- MachineType string
- The name of a Google Compute Engine machine type
to create for the node group. If not specified, GCP will default to a predetermined
computed value (currently n1-standard-4).
- MinCpu stringPlatform 
- The name of a minimum generation of CPU family for the node group. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
- NumInstances int
- Specifies the number of master nodes to create. Please set a number greater than 0. Node Group must have at least 1 instance.
- accelerators
List<ClusterCluster Config Auxiliary Node Group Node Group Node Group Config Accelerator> 
- The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times.
- diskConfig ClusterCluster Config Auxiliary Node Group Node Group Node Group Config Disk Config 
- Disk Config
- instanceNames List<String>
- List of auxiliary node group instance names which have been assigned to the cluster.
- machineType String
- The name of a Google Compute Engine machine type
to create for the node group. If not specified, GCP will default to a predetermined
computed value (currently n1-standard-4).
- minCpu StringPlatform 
- The name of a minimum generation of CPU family for the node group. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
- numInstances Integer
- Specifies the number of master nodes to create. Please set a number greater than 0. Node Group must have at least 1 instance.
- accelerators
ClusterCluster Config Auxiliary Node Group Node Group Node Group Config Accelerator[] 
- The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times.
- diskConfig ClusterCluster Config Auxiliary Node Group Node Group Node Group Config Disk Config 
- Disk Config
- instanceNames string[]
- List of auxiliary node group instance names which have been assigned to the cluster.
- machineType string
- The name of a Google Compute Engine machine type
to create for the node group. If not specified, GCP will default to a predetermined
computed value (currently n1-standard-4).
- minCpu stringPlatform 
- The name of a minimum generation of CPU family for the node group. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
- numInstances number
- Specifies the number of master nodes to create. Please set a number greater than 0. Node Group must have at least 1 instance.
- accelerators
Sequence[ClusterCluster Config Auxiliary Node Group Node Group Node Group Config Accelerator] 
- The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times.
- disk_config ClusterCluster Config Auxiliary Node Group Node Group Node Group Config Disk Config 
- Disk Config
- instance_names Sequence[str]
- List of auxiliary node group instance names which have been assigned to the cluster.
- machine_type str
- The name of a Google Compute Engine machine type
to create for the node group. If not specified, GCP will default to a predetermined
computed value (currently n1-standard-4).
- min_cpu_ strplatform 
- The name of a minimum generation of CPU family for the node group. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
- num_instances int
- Specifies the number of master nodes to create. Please set a number greater than 0. Node Group must have at least 1 instance.
- accelerators List<Property Map>
- The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times.
- diskConfig Property Map
- Disk Config
- instanceNames List<String>
- List of auxiliary node group instance names which have been assigned to the cluster.
- machineType String
- The name of a Google Compute Engine machine type
to create for the node group. If not specified, GCP will default to a predetermined
computed value (currently n1-standard-4).
- minCpu StringPlatform 
- The name of a minimum generation of CPU family for the node group. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
- numInstances Number
- Specifies the number of master nodes to create. Please set a number greater than 0. Node Group must have at least 1 instance.
ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigAccelerator, ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigAcceleratorArgs                        
- AcceleratorCount int
- The number of the accelerator cards of this type exposed to this instance. Often restricted to one of 1,2,4, or8.
- AcceleratorType string
- The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.
- AcceleratorCount int
- The number of the accelerator cards of this type exposed to this instance. Often restricted to one of 1,2,4, or8.
- AcceleratorType string
- The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.
- acceleratorCount Integer
- The number of the accelerator cards of this type exposed to this instance. Often restricted to one of 1,2,4, or8.
- acceleratorType String
- The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.
- acceleratorCount number
- The number of the accelerator cards of this type exposed to this instance. Often restricted to one of 1,2,4, or8.
- acceleratorType string
- The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.
- accelerator_count int
- The number of the accelerator cards of this type exposed to this instance. Often restricted to one of 1,2,4, or8.
- accelerator_type str
- The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.
- acceleratorCount Number
- The number of the accelerator cards of this type exposed to this instance. Often restricted to one of 1,2,4, or8.
- acceleratorType String
- The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.
ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigDiskConfig, ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigDiskConfigArgs                          
- BootDisk intSize Gb 
- Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- BootDisk stringType 
- The disk type of the primary disk attached to each node.
One of "pd-ssd"or"pd-standard". Defaults to"pd-standard".
- LocalSsd stringInterface 
- Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express).
- NumLocal intSsds 
- The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.
- BootDisk intSize Gb 
- Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- BootDisk stringType 
- The disk type of the primary disk attached to each node.
One of "pd-ssd"or"pd-standard". Defaults to"pd-standard".
- LocalSsd stringInterface 
- Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express).
- NumLocal intSsds 
- The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.
- bootDisk IntegerSize Gb 
- Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- bootDisk StringType 
- The disk type of the primary disk attached to each node.
One of "pd-ssd"or"pd-standard". Defaults to"pd-standard".
- localSsd StringInterface 
- Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express).
- numLocal IntegerSsds 
- The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.
- bootDisk numberSize Gb 
- Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- bootDisk stringType 
- The disk type of the primary disk attached to each node.
One of "pd-ssd"or"pd-standard". Defaults to"pd-standard".
- localSsd stringInterface 
- Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express).
- numLocal numberSsds 
- The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.
- boot_disk_ intsize_ gb 
- Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- boot_disk_ strtype 
- The disk type of the primary disk attached to each node.
One of "pd-ssd"or"pd-standard". Defaults to"pd-standard".
- local_ssd_ strinterface 
- Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express).
- num_local_ intssds 
- The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.
- bootDisk NumberSize Gb 
- Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- bootDisk StringType 
- The disk type of the primary disk attached to each node.
One of "pd-ssd"or"pd-standard". Defaults to"pd-standard".
- localSsd StringInterface 
- Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express).
- numLocal NumberSsds 
- The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.
ClusterClusterConfigDataprocMetricConfig, ClusterClusterConfigDataprocMetricConfigArgs            
- Metrics
List<ClusterCluster Config Dataproc Metric Config Metric> 
- Metrics sources to enable.
- Metrics
[]ClusterCluster Config Dataproc Metric Config Metric 
- Metrics sources to enable.
- metrics
List<ClusterCluster Config Dataproc Metric Config Metric> 
- Metrics sources to enable.
- metrics
ClusterCluster Config Dataproc Metric Config Metric[] 
- Metrics sources to enable.
- metrics
Sequence[ClusterCluster Config Dataproc Metric Config Metric] 
- Metrics sources to enable.
- metrics List<Property Map>
- Metrics sources to enable.
ClusterClusterConfigDataprocMetricConfigMetric, ClusterClusterConfigDataprocMetricConfigMetricArgs              
- MetricSource string
- A source for the collection of Dataproc OSS metrics (see available OSS metrics).
- MetricOverrides List<string>
- One or more [available OSS metrics] (https://cloud.google.com/dataproc/docs/guides/monitoring#available_oss_metrics) to collect for the metric course.
- MetricSource string
- A source for the collection of Dataproc OSS metrics (see available OSS metrics).
- MetricOverrides []string
- One or more [available OSS metrics] (https://cloud.google.com/dataproc/docs/guides/monitoring#available_oss_metrics) to collect for the metric course.
- metricSource String
- A source for the collection of Dataproc OSS metrics (see available OSS metrics).
- metricOverrides List<String>
- One or more [available OSS metrics] (https://cloud.google.com/dataproc/docs/guides/monitoring#available_oss_metrics) to collect for the metric course.
- metricSource string
- A source for the collection of Dataproc OSS metrics (see available OSS metrics).
- metricOverrides string[]
- One or more [available OSS metrics] (https://cloud.google.com/dataproc/docs/guides/monitoring#available_oss_metrics) to collect for the metric course.
- metric_source str
- A source for the collection of Dataproc OSS metrics (see available OSS metrics).
- metric_overrides Sequence[str]
- One or more [available OSS metrics] (https://cloud.google.com/dataproc/docs/guides/monitoring#available_oss_metrics) to collect for the metric course.
- metricSource String
- A source for the collection of Dataproc OSS metrics (see available OSS metrics).
- metricOverrides List<String>
- One or more [available OSS metrics] (https://cloud.google.com/dataproc/docs/guides/monitoring#available_oss_metrics) to collect for the metric course.
ClusterClusterConfigEncryptionConfig, ClusterClusterConfigEncryptionConfigArgs          
- KmsKey stringName 
- The Cloud KMS key name to use for PD disk encryption for
all instances in the cluster.
- KmsKey stringName 
- The Cloud KMS key name to use for PD disk encryption for
all instances in the cluster.
- kmsKey StringName 
- The Cloud KMS key name to use for PD disk encryption for
all instances in the cluster.
- kmsKey stringName 
- The Cloud KMS key name to use for PD disk encryption for
all instances in the cluster.
- kms_key_ strname 
- The Cloud KMS key name to use for PD disk encryption for
all instances in the cluster.
- kmsKey StringName 
- The Cloud KMS key name to use for PD disk encryption for
all instances in the cluster.
ClusterClusterConfigEndpointConfig, ClusterClusterConfigEndpointConfigArgs          
- EnableHttp boolPort Access 
- The flag to enable http access to specific ports on the cluster from external sources (aka Component Gateway). Defaults to false.
- HttpPorts Dictionary<string, string>
- The map of port descriptions to URLs. Will only be populated if
enable_http_port_accessis true.
- EnableHttp boolPort Access 
- The flag to enable http access to specific ports on the cluster from external sources (aka Component Gateway). Defaults to false.
- HttpPorts map[string]string
- The map of port descriptions to URLs. Will only be populated if
enable_http_port_accessis true.
- enableHttp BooleanPort Access 
- The flag to enable http access to specific ports on the cluster from external sources (aka Component Gateway). Defaults to false.
- httpPorts Map<String,String>
- The map of port descriptions to URLs. Will only be populated if
enable_http_port_accessis true.
- enableHttp booleanPort Access 
- The flag to enable http access to specific ports on the cluster from external sources (aka Component Gateway). Defaults to false.
- httpPorts {[key: string]: string}
- The map of port descriptions to URLs. Will only be populated if
enable_http_port_accessis true.
- enable_http_ boolport_ access 
- The flag to enable http access to specific ports on the cluster from external sources (aka Component Gateway). Defaults to false.
- http_ports Mapping[str, str]
- The map of port descriptions to URLs. Will only be populated if
enable_http_port_accessis true.
- enableHttp BooleanPort Access 
- The flag to enable http access to specific ports on the cluster from external sources (aka Component Gateway). Defaults to false.
- httpPorts Map<String>
- The map of port descriptions to URLs. Will only be populated if
enable_http_port_accessis true.
ClusterClusterConfigGceClusterConfig, ClusterClusterConfigGceClusterConfigArgs            
- ConfidentialInstance ClusterConfig Cluster Config Gce Cluster Config Confidential Instance Config 
- Confidential Instance Config for clusters using Confidential VMs
- InternalIp boolOnly 
- By default, clusters are not restricted to internal IP addresses,
and will have ephemeral external IP addresses assigned to each instance. If set to true, all
instances in the cluster will only have internal IP addresses. Note: Private Google Access
(also known as privateIpGoogleAccess) must be enabled on the subnetwork that the cluster will be launched in.
- Metadata Dictionary<string, string>
- A map of the Compute Engine metadata entries to add to all instances (see Project and instance metadata).
- Network string
- The name or self_link of the Google Compute Engine
network to the cluster will be part of. Conflicts with subnetwork. If neither is specified, this defaults to the "default" network.
- NodeGroup ClusterAffinity Cluster Config Gce Cluster Config Node Group Affinity 
- Node Group Affinity for sole-tenant clusters.
- ReservationAffinity ClusterCluster Config Gce Cluster Config Reservation Affinity 
- Reservation Affinity for consuming zonal reservation.
- ServiceAccount string
- The service account to be used by the Node VMs. If not specified, the "default" service account is used.
- ServiceAccount List<string>Scopes 
- The set of Google API scopes
to be made available on all of the node VMs under the service_accountspecified. Both OAuth2 URLs and gcloud short names are supported. To allow full access to all Cloud APIs, use thecloud-platformscope. See a complete list of scopes here.
- ShieldedInstance ClusterConfig Cluster Config Gce Cluster Config Shielded Instance Config 
- Shielded Instance Config for clusters using Compute Engine Shielded VMs.
- Subnetwork string
- The name or self_link of the Google Compute Engine
subnetwork the cluster will be part of. Conflicts with network.
- List<string>
- The list of instance tags applied to instances in the cluster. Tags are used to identify valid sources or targets for network firewalls.
- Zone string
- The GCP zone where your data is stored and used (i.e. where
the master and the worker nodes will be created in). If regionis set to 'global' (default) thenzoneis mandatory, otherwise GCP is able to make use of Auto Zone Placement to determine this automatically for you. Note: This setting additionally determines and restricts which computing resources are available for use with other configs such ascluster_config.master_config.machine_typeandcluster_config.worker_config.machine_type.
- ConfidentialInstance ClusterConfig Cluster Config Gce Cluster Config Confidential Instance Config 
- Confidential Instance Config for clusters using Confidential VMs
- InternalIp boolOnly 
- By default, clusters are not restricted to internal IP addresses,
and will have ephemeral external IP addresses assigned to each instance. If set to true, all
instances in the cluster will only have internal IP addresses. Note: Private Google Access
(also known as privateIpGoogleAccess) must be enabled on the subnetwork that the cluster will be launched in.
- Metadata map[string]string
- A map of the Compute Engine metadata entries to add to all instances (see Project and instance metadata).
- Network string
- The name or self_link of the Google Compute Engine
network to the cluster will be part of. Conflicts with subnetwork. If neither is specified, this defaults to the "default" network.
- NodeGroup ClusterAffinity Cluster Config Gce Cluster Config Node Group Affinity 
- Node Group Affinity for sole-tenant clusters.
- ReservationAffinity ClusterCluster Config Gce Cluster Config Reservation Affinity 
- Reservation Affinity for consuming zonal reservation.
- ServiceAccount string
- The service account to be used by the Node VMs. If not specified, the "default" service account is used.
- ServiceAccount []stringScopes 
- The set of Google API scopes
to be made available on all of the node VMs under the service_accountspecified. Both OAuth2 URLs and gcloud short names are supported. To allow full access to all Cloud APIs, use thecloud-platformscope. See a complete list of scopes here.
- ShieldedInstance ClusterConfig Cluster Config Gce Cluster Config Shielded Instance Config 
- Shielded Instance Config for clusters using Compute Engine Shielded VMs.
- Subnetwork string
- The name or self_link of the Google Compute Engine
subnetwork the cluster will be part of. Conflicts with network.
- []string
- The list of instance tags applied to instances in the cluster. Tags are used to identify valid sources or targets for network firewalls.
- Zone string
- The GCP zone where your data is stored and used (i.e. where
the master and the worker nodes will be created in). If regionis set to 'global' (default) thenzoneis mandatory, otherwise GCP is able to make use of Auto Zone Placement to determine this automatically for you. Note: This setting additionally determines and restricts which computing resources are available for use with other configs such ascluster_config.master_config.machine_typeandcluster_config.worker_config.machine_type.
- confidentialInstance ClusterConfig Cluster Config Gce Cluster Config Confidential Instance Config 
- Confidential Instance Config for clusters using Confidential VMs
- internalIp BooleanOnly 
- By default, clusters are not restricted to internal IP addresses,
and will have ephemeral external IP addresses assigned to each instance. If set to true, all
instances in the cluster will only have internal IP addresses. Note: Private Google Access
(also known as privateIpGoogleAccess) must be enabled on the subnetwork that the cluster will be launched in.
- metadata Map<String,String>
- A map of the Compute Engine metadata entries to add to all instances (see Project and instance metadata).
- network String
- The name or self_link of the Google Compute Engine
network to the cluster will be part of. Conflicts with subnetwork. If neither is specified, this defaults to the "default" network.
- nodeGroup ClusterAffinity Cluster Config Gce Cluster Config Node Group Affinity 
- Node Group Affinity for sole-tenant clusters.
- reservationAffinity ClusterCluster Config Gce Cluster Config Reservation Affinity 
- Reservation Affinity for consuming zonal reservation.
- serviceAccount String
- The service account to be used by the Node VMs. If not specified, the "default" service account is used.
- serviceAccount List<String>Scopes 
- The set of Google API scopes
to be made available on all of the node VMs under the service_accountspecified. Both OAuth2 URLs and gcloud short names are supported. To allow full access to all Cloud APIs, use thecloud-platformscope. See a complete list of scopes here.
- shieldedInstance ClusterConfig Cluster Config Gce Cluster Config Shielded Instance Config 
- Shielded Instance Config for clusters using Compute Engine Shielded VMs.
- subnetwork String
- The name or self_link of the Google Compute Engine
subnetwork the cluster will be part of. Conflicts with network.
- List<String>
- The list of instance tags applied to instances in the cluster. Tags are used to identify valid sources or targets for network firewalls.
- zone String
- The GCP zone where your data is stored and used (i.e. where
the master and the worker nodes will be created in). If regionis set to 'global' (default) thenzoneis mandatory, otherwise GCP is able to make use of Auto Zone Placement to determine this automatically for you. Note: This setting additionally determines and restricts which computing resources are available for use with other configs such ascluster_config.master_config.machine_typeandcluster_config.worker_config.machine_type.
- confidentialInstance ClusterConfig Cluster Config Gce Cluster Config Confidential Instance Config 
- Confidential Instance Config for clusters using Confidential VMs
- internalIp booleanOnly 
- By default, clusters are not restricted to internal IP addresses,
and will have ephemeral external IP addresses assigned to each instance. If set to true, all
instances in the cluster will only have internal IP addresses. Note: Private Google Access
(also known as privateIpGoogleAccess) must be enabled on the subnetwork that the cluster will be launched in.
- metadata {[key: string]: string}
- A map of the Compute Engine metadata entries to add to all instances (see Project and instance metadata).
- network string
- The name or self_link of the Google Compute Engine
network to the cluster will be part of. Conflicts with subnetwork. If neither is specified, this defaults to the "default" network.
- nodeGroup ClusterAffinity Cluster Config Gce Cluster Config Node Group Affinity 
- Node Group Affinity for sole-tenant clusters.
- reservationAffinity ClusterCluster Config Gce Cluster Config Reservation Affinity 
- Reservation Affinity for consuming zonal reservation.
- serviceAccount string
- The service account to be used by the Node VMs. If not specified, the "default" service account is used.
- serviceAccount string[]Scopes 
- The set of Google API scopes
to be made available on all of the node VMs under the service_accountspecified. Both OAuth2 URLs and gcloud short names are supported. To allow full access to all Cloud APIs, use thecloud-platformscope. See a complete list of scopes here.
- shieldedInstance ClusterConfig Cluster Config Gce Cluster Config Shielded Instance Config 
- Shielded Instance Config for clusters using Compute Engine Shielded VMs.
- subnetwork string
- The name or self_link of the Google Compute Engine
subnetwork the cluster will be part of. Conflicts with network.
- string[]
- The list of instance tags applied to instances in the cluster. Tags are used to identify valid sources or targets for network firewalls.
- zone string
- The GCP zone where your data is stored and used (i.e. where
the master and the worker nodes will be created in). If regionis set to 'global' (default) thenzoneis mandatory, otherwise GCP is able to make use of Auto Zone Placement to determine this automatically for you. Note: This setting additionally determines and restricts which computing resources are available for use with other configs such ascluster_config.master_config.machine_typeandcluster_config.worker_config.machine_type.
- confidential_instance_ Clusterconfig Cluster Config Gce Cluster Config Confidential Instance Config 
- Confidential Instance Config for clusters using Confidential VMs
- internal_ip_ boolonly 
- By default, clusters are not restricted to internal IP addresses,
and will have ephemeral external IP addresses assigned to each instance. If set to true, all
instances in the cluster will only have internal IP addresses. Note: Private Google Access
(also known as privateIpGoogleAccess) must be enabled on the subnetwork that the cluster will be launched in.
- metadata Mapping[str, str]
- A map of the Compute Engine metadata entries to add to all instances (see Project and instance metadata).
- network str
- The name or self_link of the Google Compute Engine
network to the cluster will be part of. Conflicts with subnetwork. If neither is specified, this defaults to the "default" network.
- node_group_ Clusteraffinity Cluster Config Gce Cluster Config Node Group Affinity 
- Node Group Affinity for sole-tenant clusters.
- reservation_affinity ClusterCluster Config Gce Cluster Config Reservation Affinity 
- Reservation Affinity for consuming zonal reservation.
- service_account str
- The service account to be used by the Node VMs. If not specified, the "default" service account is used.
- service_account_ Sequence[str]scopes 
- The set of Google API scopes
to be made available on all of the node VMs under the service_accountspecified. Both OAuth2 URLs and gcloud short names are supported. To allow full access to all Cloud APIs, use thecloud-platformscope. See a complete list of scopes here.
- shielded_instance_ Clusterconfig Cluster Config Gce Cluster Config Shielded Instance Config 
- Shielded Instance Config for clusters using Compute Engine Shielded VMs.
- subnetwork str
- The name or self_link of the Google Compute Engine
subnetwork the cluster will be part of. Conflicts with network.
- Sequence[str]
- The list of instance tags applied to instances in the cluster. Tags are used to identify valid sources or targets for network firewalls.
- zone str
- The GCP zone where your data is stored and used (i.e. where
the master and the worker nodes will be created in). If regionis set to 'global' (default) thenzoneis mandatory, otherwise GCP is able to make use of Auto Zone Placement to determine this automatically for you. Note: This setting additionally determines and restricts which computing resources are available for use with other configs such ascluster_config.master_config.machine_typeandcluster_config.worker_config.machine_type.
- confidentialInstance Property MapConfig 
- Confidential Instance Config for clusters using Confidential VMs
- internalIp BooleanOnly 
- By default, clusters are not restricted to internal IP addresses,
and will have ephemeral external IP addresses assigned to each instance. If set to true, all
instances in the cluster will only have internal IP addresses. Note: Private Google Access
(also known as privateIpGoogleAccess) must be enabled on the subnetwork that the cluster will be launched in.
- metadata Map<String>
- A map of the Compute Engine metadata entries to add to all instances (see Project and instance metadata).
- network String
- The name or self_link of the Google Compute Engine
network to the cluster will be part of. Conflicts with subnetwork. If neither is specified, this defaults to the "default" network.
- nodeGroup Property MapAffinity 
- Node Group Affinity for sole-tenant clusters.
- reservationAffinity Property Map
- Reservation Affinity for consuming zonal reservation.
- serviceAccount String
- The service account to be used by the Node VMs. If not specified, the "default" service account is used.
- serviceAccount List<String>Scopes 
- The set of Google API scopes
to be made available on all of the node VMs under the service_accountspecified. Both OAuth2 URLs and gcloud short names are supported. To allow full access to all Cloud APIs, use thecloud-platformscope. See a complete list of scopes here.
- shieldedInstance Property MapConfig 
- Shielded Instance Config for clusters using Compute Engine Shielded VMs.
- subnetwork String
- The name or self_link of the Google Compute Engine
subnetwork the cluster will be part of. Conflicts with network.
- List<String>
- The list of instance tags applied to instances in the cluster. Tags are used to identify valid sources or targets for network firewalls.
- zone String
- The GCP zone where your data is stored and used (i.e. where
the master and the worker nodes will be created in). If regionis set to 'global' (default) thenzoneis mandatory, otherwise GCP is able to make use of Auto Zone Placement to determine this automatically for you. Note: This setting additionally determines and restricts which computing resources are available for use with other configs such ascluster_config.master_config.machine_typeandcluster_config.worker_config.machine_type.
ClusterClusterConfigGceClusterConfigConfidentialInstanceConfig, ClusterClusterConfigGceClusterConfigConfidentialInstanceConfigArgs                  
- EnableConfidential boolCompute 
- Defines whether the instance should have confidential compute enabled.
- EnableConfidential boolCompute 
- Defines whether the instance should have confidential compute enabled.
- enableConfidential BooleanCompute 
- Defines whether the instance should have confidential compute enabled.
- enableConfidential booleanCompute 
- Defines whether the instance should have confidential compute enabled.
- enable_confidential_ boolcompute 
- Defines whether the instance should have confidential compute enabled.
- enableConfidential BooleanCompute 
- Defines whether the instance should have confidential compute enabled.
ClusterClusterConfigGceClusterConfigNodeGroupAffinity, ClusterClusterConfigGceClusterConfigNodeGroupAffinityArgs                  
- NodeGroup stringUri 
- The URI of a sole-tenant node group resource that the cluster will be created on.
- NodeGroup stringUri 
- The URI of a sole-tenant node group resource that the cluster will be created on.
- nodeGroup StringUri 
- The URI of a sole-tenant node group resource that the cluster will be created on.
- nodeGroup stringUri 
- The URI of a sole-tenant node group resource that the cluster will be created on.
- node_group_ struri 
- The URI of a sole-tenant node group resource that the cluster will be created on.
- nodeGroup StringUri 
- The URI of a sole-tenant node group resource that the cluster will be created on.
ClusterClusterConfigGceClusterConfigReservationAffinity, ClusterClusterConfigGceClusterConfigReservationAffinityArgs                
- ConsumeReservation stringType 
- Corresponds to the type of reservation consumption.
- Key string
- Corresponds to the label key of reservation resource.
- Values List<string>
- Corresponds to the label values of reservation resource.
- ConsumeReservation stringType 
- Corresponds to the type of reservation consumption.
- Key string
- Corresponds to the label key of reservation resource.
- Values []string
- Corresponds to the label values of reservation resource.
- consumeReservation StringType 
- Corresponds to the type of reservation consumption.
- key String
- Corresponds to the label key of reservation resource.
- values List<String>
- Corresponds to the label values of reservation resource.
- consumeReservation stringType 
- Corresponds to the type of reservation consumption.
- key string
- Corresponds to the label key of reservation resource.
- values string[]
- Corresponds to the label values of reservation resource.
- consume_reservation_ strtype 
- Corresponds to the type of reservation consumption.
- key str
- Corresponds to the label key of reservation resource.
- values Sequence[str]
- Corresponds to the label values of reservation resource.
- consumeReservation StringType 
- Corresponds to the type of reservation consumption.
- key String
- Corresponds to the label key of reservation resource.
- values List<String>
- Corresponds to the label values of reservation resource.
ClusterClusterConfigGceClusterConfigShieldedInstanceConfig, ClusterClusterConfigGceClusterConfigShieldedInstanceConfigArgs                  
- EnableIntegrity boolMonitoring 
- Defines whether instances have integrity monitoring enabled.
- EnableSecure boolBoot 
- Defines whether instances have Secure Boot enabled.
- EnableVtpm bool
- Defines whether instances have the vTPM enabled.
- EnableIntegrity boolMonitoring 
- Defines whether instances have integrity monitoring enabled.
- EnableSecure boolBoot 
- Defines whether instances have Secure Boot enabled.
- EnableVtpm bool
- Defines whether instances have the vTPM enabled.
- enableIntegrity BooleanMonitoring 
- Defines whether instances have integrity monitoring enabled.
- enableSecure BooleanBoot 
- Defines whether instances have Secure Boot enabled.
- enableVtpm Boolean
- Defines whether instances have the vTPM enabled.
- enableIntegrity booleanMonitoring 
- Defines whether instances have integrity monitoring enabled.
- enableSecure booleanBoot 
- Defines whether instances have Secure Boot enabled.
- enableVtpm boolean
- Defines whether instances have the vTPM enabled.
- enable_integrity_ boolmonitoring 
- Defines whether instances have integrity monitoring enabled.
- enable_secure_ boolboot 
- Defines whether instances have Secure Boot enabled.
- enable_vtpm bool
- Defines whether instances have the vTPM enabled.
- enableIntegrity BooleanMonitoring 
- Defines whether instances have integrity monitoring enabled.
- enableSecure BooleanBoot 
- Defines whether instances have Secure Boot enabled.
- enableVtpm Boolean
- Defines whether instances have the vTPM enabled.
ClusterClusterConfigInitializationAction, ClusterClusterConfigInitializationActionArgs          
- Script string
- The script to be executed during initialization of the cluster. The script must be a GCS file with a gs:// prefix.
- TimeoutSec int
- The maximum duration (in seconds) which scriptis allowed to take to execute its action. GCP will default to a predetermined computed value if not set (currently 300).
- Script string
- The script to be executed during initialization of the cluster. The script must be a GCS file with a gs:// prefix.
- TimeoutSec int
- The maximum duration (in seconds) which scriptis allowed to take to execute its action. GCP will default to a predetermined computed value if not set (currently 300).
- script String
- The script to be executed during initialization of the cluster. The script must be a GCS file with a gs:// prefix.
- timeoutSec Integer
- The maximum duration (in seconds) which scriptis allowed to take to execute its action. GCP will default to a predetermined computed value if not set (currently 300).
- script string
- The script to be executed during initialization of the cluster. The script must be a GCS file with a gs:// prefix.
- timeoutSec number
- The maximum duration (in seconds) which scriptis allowed to take to execute its action. GCP will default to a predetermined computed value if not set (currently 300).
- script str
- The script to be executed during initialization of the cluster. The script must be a GCS file with a gs:// prefix.
- timeout_sec int
- The maximum duration (in seconds) which scriptis allowed to take to execute its action. GCP will default to a predetermined computed value if not set (currently 300).
- script String
- The script to be executed during initialization of the cluster. The script must be a GCS file with a gs:// prefix.
- timeoutSec Number
- The maximum duration (in seconds) which scriptis allowed to take to execute its action. GCP will default to a predetermined computed value if not set (currently 300).
ClusterClusterConfigLifecycleConfig, ClusterClusterConfigLifecycleConfigArgs          
- AutoDelete stringTime 
- The time when cluster will be auto-deleted.
A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds.
Example: "2014-10-02T15:01:23.045123456Z".
- IdleDelete stringTtl 
- The duration to keep the cluster alive while idling (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d].
- IdleStart stringTime 
- Time when the cluster became idle (most recent job finished) and became eligible for deletion due to idleness.
- AutoDelete stringTime 
- The time when cluster will be auto-deleted.
A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds.
Example: "2014-10-02T15:01:23.045123456Z".
- IdleDelete stringTtl 
- The duration to keep the cluster alive while idling (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d].
- IdleStart stringTime 
- Time when the cluster became idle (most recent job finished) and became eligible for deletion due to idleness.
- autoDelete StringTime 
- The time when cluster will be auto-deleted.
A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds.
Example: "2014-10-02T15:01:23.045123456Z".
- idleDelete StringTtl 
- The duration to keep the cluster alive while idling (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d].
- idleStart StringTime 
- Time when the cluster became idle (most recent job finished) and became eligible for deletion due to idleness.
- autoDelete stringTime 
- The time when cluster will be auto-deleted.
A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds.
Example: "2014-10-02T15:01:23.045123456Z".
- idleDelete stringTtl 
- The duration to keep the cluster alive while idling (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d].
- idleStart stringTime 
- Time when the cluster became idle (most recent job finished) and became eligible for deletion due to idleness.
- auto_delete_ strtime 
- The time when cluster will be auto-deleted.
A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds.
Example: "2014-10-02T15:01:23.045123456Z".
- idle_delete_ strttl 
- The duration to keep the cluster alive while idling (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d].
- idle_start_ strtime 
- Time when the cluster became idle (most recent job finished) and became eligible for deletion due to idleness.
- autoDelete StringTime 
- The time when cluster will be auto-deleted.
A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds.
Example: "2014-10-02T15:01:23.045123456Z".
- idleDelete StringTtl 
- The duration to keep the cluster alive while idling (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d].
- idleStart StringTime 
- Time when the cluster became idle (most recent job finished) and became eligible for deletion due to idleness.
ClusterClusterConfigMasterConfig, ClusterClusterConfigMasterConfigArgs          
- Accelerators
List<ClusterCluster Config Master Config Accelerator> 
- The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times.
- DiskConfig ClusterCluster Config Master Config Disk Config 
- Disk Config
- ImageUri string
- The URI for the image to use for this worker. See the guide for more information.
- InstanceNames List<string>
- List of master instance names which have been assigned to the cluster.
- MachineType string
- The name of a Google Compute Engine machine type
to create for the master. If not specified, GCP will default to a predetermined
computed value (currently n1-standard-4).
- MinCpu stringPlatform 
- The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
- NumInstances int
- Specifies the number of master nodes to create. If not specified, GCP will default to a predetermined computed value (currently 1).
- Accelerators
[]ClusterCluster Config Master Config Accelerator 
- The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times.
- DiskConfig ClusterCluster Config Master Config Disk Config 
- Disk Config
- ImageUri string
- The URI for the image to use for this worker. See the guide for more information.
- InstanceNames []string
- List of master instance names which have been assigned to the cluster.
- MachineType string
- The name of a Google Compute Engine machine type
to create for the master. If not specified, GCP will default to a predetermined
computed value (currently n1-standard-4).
- MinCpu stringPlatform 
- The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
- NumInstances int
- Specifies the number of master nodes to create. If not specified, GCP will default to a predetermined computed value (currently 1).
- accelerators
List<ClusterCluster Config Master Config Accelerator> 
- The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times.
- diskConfig ClusterCluster Config Master Config Disk Config 
- Disk Config
- imageUri String
- The URI for the image to use for this worker. See the guide for more information.
- instanceNames List<String>
- List of master instance names which have been assigned to the cluster.
- machineType String
- The name of a Google Compute Engine machine type
to create for the master. If not specified, GCP will default to a predetermined
computed value (currently n1-standard-4).
- minCpu StringPlatform 
- The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
- numInstances Integer
- Specifies the number of master nodes to create. If not specified, GCP will default to a predetermined computed value (currently 1).
- accelerators
ClusterCluster Config Master Config Accelerator[] 
- The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times.
- diskConfig ClusterCluster Config Master Config Disk Config 
- Disk Config
- imageUri string
- The URI for the image to use for this worker. See the guide for more information.
- instanceNames string[]
- List of master instance names which have been assigned to the cluster.
- machineType string
- The name of a Google Compute Engine machine type
to create for the master. If not specified, GCP will default to a predetermined
computed value (currently n1-standard-4).
- minCpu stringPlatform 
- The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
- numInstances number
- Specifies the number of master nodes to create. If not specified, GCP will default to a predetermined computed value (currently 1).
- accelerators
Sequence[ClusterCluster Config Master Config Accelerator] 
- The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times.
- disk_config ClusterCluster Config Master Config Disk Config 
- Disk Config
- image_uri str
- The URI for the image to use for this worker. See the guide for more information.
- instance_names Sequence[str]
- List of master instance names which have been assigned to the cluster.
- machine_type str
- The name of a Google Compute Engine machine type
to create for the master. If not specified, GCP will default to a predetermined
computed value (currently n1-standard-4).
- min_cpu_ strplatform 
- The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
- num_instances int
- Specifies the number of master nodes to create. If not specified, GCP will default to a predetermined computed value (currently 1).
- accelerators List<Property Map>
- The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times.
- diskConfig Property Map
- Disk Config
- imageUri String
- The URI for the image to use for this worker. See the guide for more information.
- instanceNames List<String>
- List of master instance names which have been assigned to the cluster.
- machineType String
- The name of a Google Compute Engine machine type
to create for the master. If not specified, GCP will default to a predetermined
computed value (currently n1-standard-4).
- minCpu StringPlatform 
- The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
- numInstances Number
- Specifies the number of master nodes to create. If not specified, GCP will default to a predetermined computed value (currently 1).
ClusterClusterConfigMasterConfigAccelerator, ClusterClusterConfigMasterConfigAcceleratorArgs            
- AcceleratorCount int
- The number of the accelerator cards of this type exposed to this instance. Often restricted to one of - 1,- 2,- 4, or- 8.- The Cloud Dataproc API can return unintuitive error messages when using accelerators; even when you have defined an accelerator, Auto Zone Placement does not exclusively select zones that have that accelerator available. If you get a 400 error that the accelerator can't be found, this is a likely cause. Make sure you check accelerator availability by zone if you are trying to use accelerators in a given zone. 
- AcceleratorType string
- The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.
- AcceleratorCount int
- The number of the accelerator cards of this type exposed to this instance. Often restricted to one of - 1,- 2,- 4, or- 8.- The Cloud Dataproc API can return unintuitive error messages when using accelerators; even when you have defined an accelerator, Auto Zone Placement does not exclusively select zones that have that accelerator available. If you get a 400 error that the accelerator can't be found, this is a likely cause. Make sure you check accelerator availability by zone if you are trying to use accelerators in a given zone. 
- AcceleratorType string
- The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.
- acceleratorCount Integer
- The number of the accelerator cards of this type exposed to this instance. Often restricted to one of - 1,- 2,- 4, or- 8.- The Cloud Dataproc API can return unintuitive error messages when using accelerators; even when you have defined an accelerator, Auto Zone Placement does not exclusively select zones that have that accelerator available. If you get a 400 error that the accelerator can't be found, this is a likely cause. Make sure you check accelerator availability by zone if you are trying to use accelerators in a given zone. 
- acceleratorType String
- The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.
- acceleratorCount number
- The number of the accelerator cards of this type exposed to this instance. Often restricted to one of - 1,- 2,- 4, or- 8.- The Cloud Dataproc API can return unintuitive error messages when using accelerators; even when you have defined an accelerator, Auto Zone Placement does not exclusively select zones that have that accelerator available. If you get a 400 error that the accelerator can't be found, this is a likely cause. Make sure you check accelerator availability by zone if you are trying to use accelerators in a given zone. 
- acceleratorType string
- The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.
- accelerator_count int
- The number of the accelerator cards of this type exposed to this instance. Often restricted to one of - 1,- 2,- 4, or- 8.- The Cloud Dataproc API can return unintuitive error messages when using accelerators; even when you have defined an accelerator, Auto Zone Placement does not exclusively select zones that have that accelerator available. If you get a 400 error that the accelerator can't be found, this is a likely cause. Make sure you check accelerator availability by zone if you are trying to use accelerators in a given zone. 
- accelerator_type str
- The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.
- acceleratorCount Number
- The number of the accelerator cards of this type exposed to this instance. Often restricted to one of - 1,- 2,- 4, or- 8.- The Cloud Dataproc API can return unintuitive error messages when using accelerators; even when you have defined an accelerator, Auto Zone Placement does not exclusively select zones that have that accelerator available. If you get a 400 error that the accelerator can't be found, this is a likely cause. Make sure you check accelerator availability by zone if you are trying to use accelerators in a given zone. 
- acceleratorType String
- The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.
ClusterClusterConfigMasterConfigDiskConfig, ClusterClusterConfigMasterConfigDiskConfigArgs              
- BootDisk intSize Gb 
- Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- BootDisk stringType 
- The disk type of the primary disk attached to each node.
One of "pd-ssd"or"pd-standard". Defaults to"pd-standard".
- LocalSsd stringInterface 
- Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance.
- NumLocal intSsds 
- The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.
- BootDisk intSize Gb 
- Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- BootDisk stringType 
- The disk type of the primary disk attached to each node.
One of "pd-ssd"or"pd-standard". Defaults to"pd-standard".
- LocalSsd stringInterface 
- Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance.
- NumLocal intSsds 
- The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.
- bootDisk IntegerSize Gb 
- Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- bootDisk StringType 
- The disk type of the primary disk attached to each node.
One of "pd-ssd"or"pd-standard". Defaults to"pd-standard".
- localSsd StringInterface 
- Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance.
- numLocal IntegerSsds 
- The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.
- bootDisk numberSize Gb 
- Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- bootDisk stringType 
- The disk type of the primary disk attached to each node.
One of "pd-ssd"or"pd-standard". Defaults to"pd-standard".
- localSsd stringInterface 
- Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance.
- numLocal numberSsds 
- The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.
- boot_disk_ intsize_ gb 
- Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- boot_disk_ strtype 
- The disk type of the primary disk attached to each node.
One of "pd-ssd"or"pd-standard". Defaults to"pd-standard".
- local_ssd_ strinterface 
- Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance.
- num_local_ intssds 
- The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.
- bootDisk NumberSize Gb 
- Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- bootDisk StringType 
- The disk type of the primary disk attached to each node.
One of "pd-ssd"or"pd-standard". Defaults to"pd-standard".
- localSsd StringInterface 
- Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance.
- numLocal NumberSsds 
- The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.
ClusterClusterConfigMetastoreConfig, ClusterClusterConfigMetastoreConfigArgs          
- DataprocMetastore stringService 
- Resource name of an existing Dataproc Metastore service. - Only resource names including projectid and location (region) are valid. Examples: - projects/[projectId]/locations/[dataproc_region]/services/[service-name]
- DataprocMetastore stringService 
- Resource name of an existing Dataproc Metastore service. - Only resource names including projectid and location (region) are valid. Examples: - projects/[projectId]/locations/[dataproc_region]/services/[service-name]
- dataprocMetastore StringService 
- Resource name of an existing Dataproc Metastore service. - Only resource names including projectid and location (region) are valid. Examples: - projects/[projectId]/locations/[dataproc_region]/services/[service-name]
- dataprocMetastore stringService 
- Resource name of an existing Dataproc Metastore service. - Only resource names including projectid and location (region) are valid. Examples: - projects/[projectId]/locations/[dataproc_region]/services/[service-name]
- dataproc_metastore_ strservice 
- Resource name of an existing Dataproc Metastore service. - Only resource names including projectid and location (region) are valid. Examples: - projects/[projectId]/locations/[dataproc_region]/services/[service-name]
- dataprocMetastore StringService 
- Resource name of an existing Dataproc Metastore service. - Only resource names including projectid and location (region) are valid. Examples: - projects/[projectId]/locations/[dataproc_region]/services/[service-name]
ClusterClusterConfigPreemptibleWorkerConfig, ClusterClusterConfigPreemptibleWorkerConfigArgs            
- DiskConfig ClusterCluster Config Preemptible Worker Config Disk Config 
- Disk Config
- InstanceFlexibility ClusterPolicy Cluster Config Preemptible Worker Config Instance Flexibility Policy 
- Instance flexibility Policy allowing a mixture of VM shapes and provisioning models.
- InstanceNames List<string>
- List of preemptible instance names which have been assigned to the cluster.
- NumInstances int
- Specifies the number of preemptible nodes to create. Defaults to 0.
- Preemptibility string
- Specifies the preemptibility of the secondary workers. The default value is PREEMPTIBLEAccepted values are:- PREEMPTIBILITY_UNSPECIFIED
- NON_PREEMPTIBLE
- PREEMPTIBLE
 
- DiskConfig ClusterCluster Config Preemptible Worker Config Disk Config 
- Disk Config
- InstanceFlexibility ClusterPolicy Cluster Config Preemptible Worker Config Instance Flexibility Policy 
- Instance flexibility Policy allowing a mixture of VM shapes and provisioning models.
- InstanceNames []string
- List of preemptible instance names which have been assigned to the cluster.
- NumInstances int
- Specifies the number of preemptible nodes to create. Defaults to 0.
- Preemptibility string
- Specifies the preemptibility of the secondary workers. The default value is PREEMPTIBLEAccepted values are:- PREEMPTIBILITY_UNSPECIFIED
- NON_PREEMPTIBLE
- PREEMPTIBLE
 
- diskConfig ClusterCluster Config Preemptible Worker Config Disk Config 
- Disk Config
- instanceFlexibility ClusterPolicy Cluster Config Preemptible Worker Config Instance Flexibility Policy 
- Instance flexibility Policy allowing a mixture of VM shapes and provisioning models.
- instanceNames List<String>
- List of preemptible instance names which have been assigned to the cluster.
- numInstances Integer
- Specifies the number of preemptible nodes to create. Defaults to 0.
- preemptibility String
- Specifies the preemptibility of the secondary workers. The default value is PREEMPTIBLEAccepted values are:- PREEMPTIBILITY_UNSPECIFIED
- NON_PREEMPTIBLE
- PREEMPTIBLE
 
- diskConfig ClusterCluster Config Preemptible Worker Config Disk Config 
- Disk Config
- instanceFlexibility ClusterPolicy Cluster Config Preemptible Worker Config Instance Flexibility Policy 
- Instance flexibility Policy allowing a mixture of VM shapes and provisioning models.
- instanceNames string[]
- List of preemptible instance names which have been assigned to the cluster.
- numInstances number
- Specifies the number of preemptible nodes to create. Defaults to 0.
- preemptibility string
- Specifies the preemptibility of the secondary workers. The default value is PREEMPTIBLEAccepted values are:- PREEMPTIBILITY_UNSPECIFIED
- NON_PREEMPTIBLE
- PREEMPTIBLE
 
- disk_config ClusterCluster Config Preemptible Worker Config Disk Config 
- Disk Config
- instance_flexibility_ Clusterpolicy Cluster Config Preemptible Worker Config Instance Flexibility Policy 
- Instance flexibility Policy allowing a mixture of VM shapes and provisioning models.
- instance_names Sequence[str]
- List of preemptible instance names which have been assigned to the cluster.
- num_instances int
- Specifies the number of preemptible nodes to create. Defaults to 0.
- preemptibility str
- Specifies the preemptibility of the secondary workers. The default value is PREEMPTIBLEAccepted values are:- PREEMPTIBILITY_UNSPECIFIED
- NON_PREEMPTIBLE
- PREEMPTIBLE
 
- diskConfig Property Map
- Disk Config
- instanceFlexibility Property MapPolicy 
- Instance flexibility Policy allowing a mixture of VM shapes and provisioning models.
- instanceNames List<String>
- List of preemptible instance names which have been assigned to the cluster.
- numInstances Number
- Specifies the number of preemptible nodes to create. Defaults to 0.
- preemptibility String
- Specifies the preemptibility of the secondary workers. The default value is PREEMPTIBLEAccepted values are:- PREEMPTIBILITY_UNSPECIFIED
- NON_PREEMPTIBLE
- PREEMPTIBLE
 
ClusterClusterConfigPreemptibleWorkerConfigDiskConfig, ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs                
- BootDisk intSize Gb 
- Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- BootDisk stringType 
- The disk type of the primary disk attached to each preemptible worker node.
One of "pd-ssd"or"pd-standard". Defaults to"pd-standard".
- LocalSsd stringInterface 
- Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express).
- NumLocal intSsds 
- The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.
- BootDisk intSize Gb 
- Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- BootDisk stringType 
- The disk type of the primary disk attached to each preemptible worker node.
One of "pd-ssd"or"pd-standard". Defaults to"pd-standard".
- LocalSsd stringInterface 
- Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express).
- NumLocal intSsds 
- The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.
- bootDisk IntegerSize Gb 
- Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- bootDisk StringType 
- The disk type of the primary disk attached to each preemptible worker node.
One of "pd-ssd"or"pd-standard". Defaults to"pd-standard".
- localSsd StringInterface 
- Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express).
- numLocal IntegerSsds 
- The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.
- bootDisk numberSize Gb 
- Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- bootDisk stringType 
- The disk type of the primary disk attached to each preemptible worker node.
One of "pd-ssd"or"pd-standard". Defaults to"pd-standard".
- localSsd stringInterface 
- Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express).
- numLocal numberSsds 
- The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.
- boot_disk_ intsize_ gb 
- Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- boot_disk_ strtype 
- The disk type of the primary disk attached to each preemptible worker node.
One of "pd-ssd"or"pd-standard". Defaults to"pd-standard".
- local_ssd_ strinterface 
- Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express).
- num_local_ intssds 
- The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.
- bootDisk NumberSize Gb 
- Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- bootDisk StringType 
- The disk type of the primary disk attached to each preemptible worker node.
One of "pd-ssd"or"pd-standard". Defaults to"pd-standard".
- localSsd StringInterface 
- Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express).
- numLocal NumberSsds 
- The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.
ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicy, ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyArgs                  
- InstanceSelection List<ClusterLists Cluster Config Preemptible Worker Config Instance Flexibility Policy Instance Selection List> 
- List of instance selection options that the group will use when creating new VMs.
- InstanceSelection List<ClusterResults Cluster Config Preemptible Worker Config Instance Flexibility Policy Instance Selection Result> 
- A list of instance selection results in the group.
- ProvisioningModel ClusterMix Cluster Config Preemptible Worker Config Instance Flexibility Policy Provisioning Model Mix 
- Defines how the Group selects the provisioning model to ensure required reliability.
- InstanceSelection []ClusterLists Cluster Config Preemptible Worker Config Instance Flexibility Policy Instance Selection List 
- List of instance selection options that the group will use when creating new VMs.
- InstanceSelection []ClusterResults Cluster Config Preemptible Worker Config Instance Flexibility Policy Instance Selection Result 
- A list of instance selection results in the group.
- ProvisioningModel ClusterMix Cluster Config Preemptible Worker Config Instance Flexibility Policy Provisioning Model Mix 
- Defines how the Group selects the provisioning model to ensure required reliability.
- instanceSelection List<ClusterLists Cluster Config Preemptible Worker Config Instance Flexibility Policy Instance Selection List> 
- List of instance selection options that the group will use when creating new VMs.
- instanceSelection List<ClusterResults Cluster Config Preemptible Worker Config Instance Flexibility Policy Instance Selection Result> 
- A list of instance selection results in the group.
- provisioningModel ClusterMix Cluster Config Preemptible Worker Config Instance Flexibility Policy Provisioning Model Mix 
- Defines how the Group selects the provisioning model to ensure required reliability.
- instanceSelection ClusterLists Cluster Config Preemptible Worker Config Instance Flexibility Policy Instance Selection List[] 
- List of instance selection options that the group will use when creating new VMs.
- instanceSelection ClusterResults Cluster Config Preemptible Worker Config Instance Flexibility Policy Instance Selection Result[] 
- A list of instance selection results in the group.
- provisioningModel ClusterMix Cluster Config Preemptible Worker Config Instance Flexibility Policy Provisioning Model Mix 
- Defines how the Group selects the provisioning model to ensure required reliability.
- instance_selection_ Sequence[Clusterlists Cluster Config Preemptible Worker Config Instance Flexibility Policy Instance Selection List] 
- List of instance selection options that the group will use when creating new VMs.
- instance_selection_ Sequence[Clusterresults Cluster Config Preemptible Worker Config Instance Flexibility Policy Instance Selection Result] 
- A list of instance selection results in the group.
- provisioning_model_ Clustermix Cluster Config Preemptible Worker Config Instance Flexibility Policy Provisioning Model Mix 
- Defines how the Group selects the provisioning model to ensure required reliability.
- instanceSelection List<Property Map>Lists 
- List of instance selection options that the group will use when creating new VMs.
- instanceSelection List<Property Map>Results 
- A list of instance selection results in the group.
- provisioningModel Property MapMix 
- Defines how the Group selects the provisioning model to ensure required reliability.
ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyInstanceSelectionList, ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyInstanceSelectionListArgs                        
- MachineTypes List<string>
- Full machine-type names, e.g. "n1-standard-16".
- Rank int
- Preference of this instance selection. A lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference.
- MachineTypes []string
- Full machine-type names, e.g. "n1-standard-16".
- Rank int
- Preference of this instance selection. A lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference.
- machineTypes List<String>
- Full machine-type names, e.g. "n1-standard-16".
- rank Integer
- Preference of this instance selection. A lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference.
- machineTypes string[]
- Full machine-type names, e.g. "n1-standard-16".
- rank number
- Preference of this instance selection. A lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference.
- machine_types Sequence[str]
- Full machine-type names, e.g. "n1-standard-16".
- rank int
- Preference of this instance selection. A lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference.
- machineTypes List<String>
- Full machine-type names, e.g. "n1-standard-16".
- rank Number
- Preference of this instance selection. A lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference.
ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyInstanceSelectionResult, ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyInstanceSelectionResultArgs                        
- MachineType string
- Full machine-type names, e.g. "n1-standard-16".
- VmCount int
- Number of VM provisioned with the machine_type.
- MachineType string
- Full machine-type names, e.g. "n1-standard-16".
- VmCount int
- Number of VM provisioned with the machine_type.
- machineType String
- Full machine-type names, e.g. "n1-standard-16".
- vmCount Integer
- Number of VM provisioned with the machine_type.
- machineType string
- Full machine-type names, e.g. "n1-standard-16".
- vmCount number
- Number of VM provisioned with the machine_type.
- machine_type str
- Full machine-type names, e.g. "n1-standard-16".
- vm_count int
- Number of VM provisioned with the machine_type.
- machineType String
- Full machine-type names, e.g. "n1-standard-16".
- vmCount Number
- Number of VM provisioned with the machine_type.
ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyProvisioningModelMix, ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyProvisioningModelMixArgs                        
- StandardCapacity intBase 
- The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standardCapacityBase, then it will start using standardCapacityPercentAboveBase to mix Spot with Standard VMs. eg. If 15 instances are requested and standardCapacityBase is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances.
- StandardCapacity intPercent Above Base 
- The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standardCapacityBase. eg. If 15 instances are requested and standardCapacityBase is 5 and standardCapacityPercentAboveBase is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot.
- StandardCapacity intBase 
- The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standardCapacityBase, then it will start using standardCapacityPercentAboveBase to mix Spot with Standard VMs. eg. If 15 instances are requested and standardCapacityBase is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances.
- StandardCapacity intPercent Above Base 
- The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standardCapacityBase. eg. If 15 instances are requested and standardCapacityBase is 5 and standardCapacityPercentAboveBase is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot.
- standardCapacity IntegerBase 
- The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standardCapacityBase, then it will start using standardCapacityPercentAboveBase to mix Spot with Standard VMs. eg. If 15 instances are requested and standardCapacityBase is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances.
- standardCapacity IntegerPercent Above Base 
- The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standardCapacityBase. eg. If 15 instances are requested and standardCapacityBase is 5 and standardCapacityPercentAboveBase is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot.
- standardCapacity numberBase 
- The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standardCapacityBase, then it will start using standardCapacityPercentAboveBase to mix Spot with Standard VMs. eg. If 15 instances are requested and standardCapacityBase is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances.
- standardCapacity numberPercent Above Base 
- The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standardCapacityBase. eg. If 15 instances are requested and standardCapacityBase is 5 and standardCapacityPercentAboveBase is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot.
- standard_capacity_ intbase 
- The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standardCapacityBase, then it will start using standardCapacityPercentAboveBase to mix Spot with Standard VMs. eg. If 15 instances are requested and standardCapacityBase is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances.
- standard_capacity_ intpercent_ above_ base 
- The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standardCapacityBase. eg. If 15 instances are requested and standardCapacityBase is 5 and standardCapacityPercentAboveBase is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot.
- standardCapacity NumberBase 
- The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standardCapacityBase, then it will start using standardCapacityPercentAboveBase to mix Spot with Standard VMs. eg. If 15 instances are requested and standardCapacityBase is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances.
- standardCapacity NumberPercent Above Base 
- The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standardCapacityBase. eg. If 15 instances are requested and standardCapacityBase is 5 and standardCapacityPercentAboveBase is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot.
ClusterClusterConfigSecurityConfig, ClusterClusterConfigSecurityConfigArgs          
- KerberosConfig ClusterCluster Config Security Config Kerberos Config 
- Kerberos Configuration
- KerberosConfig ClusterCluster Config Security Config Kerberos Config 
- Kerberos Configuration
- kerberosConfig ClusterCluster Config Security Config Kerberos Config 
- Kerberos Configuration
- kerberosConfig ClusterCluster Config Security Config Kerberos Config 
- Kerberos Configuration
- kerberos_config ClusterCluster Config Security Config Kerberos Config 
- Kerberos Configuration
- kerberosConfig Property Map
- Kerberos Configuration
ClusterClusterConfigSecurityConfigKerberosConfig, ClusterClusterConfigSecurityConfigKerberosConfigArgs              
- KmsKey stringUri 
- The URI of the KMS key used to encrypt various sensitive files.
- RootPrincipal stringPassword Uri 
- The Cloud Storage URI of a KMS encrypted file containing the root principal password.
- CrossRealm stringTrust Admin Server 
- The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- CrossRealm stringTrust Kdc 
- The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- CrossRealm stringTrust Realm 
- The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
- string
- The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
- EnableKerberos bool
- Flag to indicate whether to Kerberize the cluster.
- KdcDb stringKey Uri 
- The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
- KeyPassword stringUri 
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
- KeystorePassword stringUri 
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificated, the password is generated by Dataproc.
- KeystoreUri string
- The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- Realm string
- The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
- TgtLifetime intHours 
- The lifetime of the ticket granting ticket, in hours.
- TruststorePassword stringUri 
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
- TruststoreUri string
- The Cloud Storage URI of the truststore file used for
SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- KmsKey stringUri 
- The URI of the KMS key used to encrypt various sensitive files.
- RootPrincipal stringPassword Uri 
- The Cloud Storage URI of a KMS encrypted file containing the root principal password.
- CrossRealm stringTrust Admin Server 
- The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- CrossRealm stringTrust Kdc 
- The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- CrossRealm stringTrust Realm 
- The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
- string
- The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
- EnableKerberos bool
- Flag to indicate whether to Kerberize the cluster.
- KdcDb stringKey Uri 
- The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
- KeyPassword stringUri 
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
- KeystorePassword stringUri 
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificated, the password is generated by Dataproc.
- KeystoreUri string
- The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- Realm string
- The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
- TgtLifetime intHours 
- The lifetime of the ticket granting ticket, in hours.
- TruststorePassword stringUri 
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
- TruststoreUri string
- The Cloud Storage URI of the truststore file used for
SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- kmsKey StringUri 
- The URI of the KMS key used to encrypt various sensitive files.
- rootPrincipal StringPassword Uri 
- The Cloud Storage URI of a KMS encrypted file containing the root principal password.
- crossRealm StringTrust Admin Server 
- The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- crossRealm StringTrust Kdc 
- The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- crossRealm StringTrust Realm 
- The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
- String
- The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
- enableKerberos Boolean
- Flag to indicate whether to Kerberize the cluster.
- kdcDb StringKey Uri 
- The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
- keyPassword StringUri 
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
- keystorePassword StringUri 
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificated, the password is generated by Dataproc.
- keystoreUri String
- The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- realm String
- The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
- tgtLifetime IntegerHours 
- The lifetime of the ticket granting ticket, in hours.
- truststorePassword StringUri 
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
- truststoreUri String
- The Cloud Storage URI of the truststore file used for
SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- kmsKey stringUri 
- The URI of the KMS key used to encrypt various sensitive files.
- rootPrincipal stringPassword Uri 
- The Cloud Storage URI of a KMS encrypted file containing the root principal password.
- crossRealm stringTrust Admin Server 
- The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- crossRealm stringTrust Kdc 
- The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- crossRealm stringTrust Realm 
- The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
- string
- The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
- enableKerberos boolean
- Flag to indicate whether to Kerberize the cluster.
- kdcDb stringKey Uri 
- The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
- keyPassword stringUri 
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
- keystorePassword stringUri 
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificated, the password is generated by Dataproc.
- keystoreUri string
- The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- realm string
- The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
- tgtLifetime numberHours 
- The lifetime of the ticket granting ticket, in hours.
- truststorePassword stringUri 
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
- truststoreUri string
- The Cloud Storage URI of the truststore file used for
SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- kms_key_ struri 
- The URI of the KMS key used to encrypt various sensitive files.
- root_principal_ strpassword_ uri 
- The Cloud Storage URI of a KMS encrypted file containing the root principal password.
- cross_realm_ strtrust_ admin_ server 
- The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- cross_realm_ strtrust_ kdc 
- The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- cross_realm_ strtrust_ realm 
- The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
- str
- The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
- enable_kerberos bool
- Flag to indicate whether to Kerberize the cluster.
- kdc_db_ strkey_ uri 
- The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
- key_password_ struri 
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
- keystore_password_ struri 
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificated, the password is generated by Dataproc.
- keystore_uri str
- The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- realm str
- The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
- tgt_lifetime_ inthours 
- The lifetime of the ticket granting ticket, in hours.
- truststore_password_ struri 
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
- truststore_uri str
- The Cloud Storage URI of the truststore file used for
SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- kmsKey StringUri 
- The URI of the KMS key used to encrypt various sensitive files.
- rootPrincipal StringPassword Uri 
- The Cloud Storage URI of a KMS encrypted file containing the root principal password.
- crossRealm StringTrust Admin Server 
- The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- crossRealm StringTrust Kdc 
- The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- crossRealm StringTrust Realm 
- The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
- String
- The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
- enableKerberos Boolean
- Flag to indicate whether to Kerberize the cluster.
- kdcDb StringKey Uri 
- The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
- keyPassword StringUri 
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
- keystorePassword StringUri 
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificated, the password is generated by Dataproc.
- keystoreUri String
- The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- realm String
- The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
- tgtLifetime NumberHours 
- The lifetime of the ticket granting ticket, in hours.
- truststorePassword StringUri 
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
- truststoreUri String
- The Cloud Storage URI of the truststore file used for
SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
ClusterClusterConfigSoftwareConfig, ClusterClusterConfigSoftwareConfigArgs          
- ImageVersion string
- The Cloud Dataproc image version to use for the cluster - this controls the sets of software versions installed onto the nodes when you create clusters. If not specified, defaults to the latest version. For a list of valid versions see Cloud Dataproc versions
- OptionalComponents List<string>
- The set of optional components to activate on the cluster. See Available Optional Components.
- OverrideProperties Dictionary<string, string>
- A list of override and additional properties (key/value pairs) used to modify various aspects of the common configuration files used when creating a cluster. For a list of valid properties please see Cluster properties
- Properties Dictionary<string, string>
- A list of the properties used to set the daemon config files.
This will include any values supplied by the user via cluster_config.software_config.override_properties
- ImageVersion string
- The Cloud Dataproc image version to use for the cluster - this controls the sets of software versions installed onto the nodes when you create clusters. If not specified, defaults to the latest version. For a list of valid versions see Cloud Dataproc versions
- OptionalComponents []string
- The set of optional components to activate on the cluster. See Available Optional Components.
- OverrideProperties map[string]string
- A list of override and additional properties (key/value pairs) used to modify various aspects of the common configuration files used when creating a cluster. For a list of valid properties please see Cluster properties
- Properties map[string]string
- A list of the properties used to set the daemon config files.
This will include any values supplied by the user via cluster_config.software_config.override_properties
- imageVersion String
- The Cloud Dataproc image version to use for the cluster - this controls the sets of software versions installed onto the nodes when you create clusters. If not specified, defaults to the latest version. For a list of valid versions see Cloud Dataproc versions
- optionalComponents List<String>
- The set of optional components to activate on the cluster. See Available Optional Components.
- overrideProperties Map<String,String>
- A list of override and additional properties (key/value pairs) used to modify various aspects of the common configuration files used when creating a cluster. For a list of valid properties please see Cluster properties
- properties Map<String,String>
- A list of the properties used to set the daemon config files.
This will include any values supplied by the user via cluster_config.software_config.override_properties
- imageVersion string
- The Cloud Dataproc image version to use for the cluster - this controls the sets of software versions installed onto the nodes when you create clusters. If not specified, defaults to the latest version. For a list of valid versions see Cloud Dataproc versions
- optionalComponents string[]
- The set of optional components to activate on the cluster. See Available Optional Components.
- overrideProperties {[key: string]: string}
- A list of override and additional properties (key/value pairs) used to modify various aspects of the common configuration files used when creating a cluster. For a list of valid properties please see Cluster properties
- properties {[key: string]: string}
- A list of the properties used to set the daemon config files.
This will include any values supplied by the user via cluster_config.software_config.override_properties
- image_version str
- The Cloud Dataproc image version to use for the cluster - this controls the sets of software versions installed onto the nodes when you create clusters. If not specified, defaults to the latest version. For a list of valid versions see Cloud Dataproc versions
- optional_components Sequence[str]
- The set of optional components to activate on the cluster. See Available Optional Components.
- override_properties Mapping[str, str]
- A list of override and additional properties (key/value pairs) used to modify various aspects of the common configuration files used when creating a cluster. For a list of valid properties please see Cluster properties
- properties Mapping[str, str]
- A list of the properties used to set the daemon config files.
This will include any values supplied by the user via cluster_config.software_config.override_properties
- imageVersion String
- The Cloud Dataproc image version to use for the cluster - this controls the sets of software versions installed onto the nodes when you create clusters. If not specified, defaults to the latest version. For a list of valid versions see Cloud Dataproc versions
- optionalComponents List<String>
- The set of optional components to activate on the cluster. See Available Optional Components.
- overrideProperties Map<String>
- A list of override and additional properties (key/value pairs) used to modify various aspects of the common configuration files used when creating a cluster. For a list of valid properties please see Cluster properties
- properties Map<String>
- A list of the properties used to set the daemon config files.
This will include any values supplied by the user via cluster_config.software_config.override_properties
ClusterClusterConfigWorkerConfig, ClusterClusterConfigWorkerConfigArgs          
- Accelerators
List<ClusterCluster Config Worker Config Accelerator> 
- The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
- DiskConfig ClusterCluster Config Worker Config Disk Config 
- Disk Config
- ImageUri string
- The URI for the image to use for this worker. See the guide for more information.
- InstanceNames List<string>
- List of worker instance names which have been assigned to the cluster.
- MachineType string
- The name of a Google Compute Engine machine type
to create for the worker nodes. If not specified, GCP will default to a predetermined
computed value (currently n1-standard-4).
- MinCpu stringPlatform 
- The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
- MinNum intInstances 
- The minimum number of primary worker instances to create. If min_num_instancesis set, cluster creation will succeed if the number of primary workers created is at least equal to themin_num_instancesnumber.
- NumInstances int
- Specifies the number of worker nodes to create.
If not specified, GCP will default to a predetermined computed value (currently 2).
There is currently a beta feature which allows you to run a
Single Node Cluster.
In order to take advantage of this you need to set
"dataproc:dataproc.allow.zero.workers" = "true"incluster_config.software_config.properties
- Accelerators
[]ClusterCluster Config Worker Config Accelerator 
- The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
- DiskConfig ClusterCluster Config Worker Config Disk Config 
- Disk Config
- ImageUri string
- The URI for the image to use for this worker. See the guide for more information.
- InstanceNames []string
- List of worker instance names which have been assigned to the cluster.
- MachineType string
- The name of a Google Compute Engine machine type
to create for the worker nodes. If not specified, GCP will default to a predetermined
computed value (currently n1-standard-4).
- MinCpu stringPlatform 
- The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
- MinNum intInstances 
- The minimum number of primary worker instances to create. If min_num_instancesis set, cluster creation will succeed if the number of primary workers created is at least equal to themin_num_instancesnumber.
- NumInstances int
- Specifies the number of worker nodes to create.
If not specified, GCP will default to a predetermined computed value (currently 2).
There is currently a beta feature which allows you to run a
Single Node Cluster.
In order to take advantage of this you need to set
"dataproc:dataproc.allow.zero.workers" = "true"incluster_config.software_config.properties
- accelerators
List<ClusterCluster Config Worker Config Accelerator> 
- The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
- diskConfig ClusterCluster Config Worker Config Disk Config 
- Disk Config
- imageUri String
- The URI for the image to use for this worker. See the guide for more information.
- instanceNames List<String>
- List of worker instance names which have been assigned to the cluster.
- machineType String
- The name of a Google Compute Engine machine type
to create for the worker nodes. If not specified, GCP will default to a predetermined
computed value (currently n1-standard-4).
- minCpu StringPlatform 
- The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
- minNum IntegerInstances 
- The minimum number of primary worker instances to create. If min_num_instancesis set, cluster creation will succeed if the number of primary workers created is at least equal to themin_num_instancesnumber.
- numInstances Integer
- Specifies the number of worker nodes to create.
If not specified, GCP will default to a predetermined computed value (currently 2).
There is currently a beta feature which allows you to run a
Single Node Cluster.
In order to take advantage of this you need to set
"dataproc:dataproc.allow.zero.workers" = "true"incluster_config.software_config.properties
- accelerators
ClusterCluster Config Worker Config Accelerator[] 
- The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
- diskConfig ClusterCluster Config Worker Config Disk Config 
- Disk Config
- imageUri string
- The URI for the image to use for this worker. See the guide for more information.
- instanceNames string[]
- List of worker instance names which have been assigned to the cluster.
- machineType string
- The name of a Google Compute Engine machine type
to create for the worker nodes. If not specified, GCP will default to a predetermined
computed value (currently n1-standard-4).
- minCpu stringPlatform 
- The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
- minNum numberInstances 
- The minimum number of primary worker instances to create. If min_num_instancesis set, cluster creation will succeed if the number of primary workers created is at least equal to themin_num_instancesnumber.
- numInstances number
- Specifies the number of worker nodes to create.
If not specified, GCP will default to a predetermined computed value (currently 2).
There is currently a beta feature which allows you to run a
Single Node Cluster.
In order to take advantage of this you need to set
"dataproc:dataproc.allow.zero.workers" = "true"incluster_config.software_config.properties
- accelerators
Sequence[ClusterCluster Config Worker Config Accelerator] 
- The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
- disk_config ClusterCluster Config Worker Config Disk Config 
- Disk Config
- image_uri str
- The URI for the image to use for this worker. See the guide for more information.
- instance_names Sequence[str]
- List of worker instance names which have been assigned to the cluster.
- machine_type str
- The name of a Google Compute Engine machine type
to create for the worker nodes. If not specified, GCP will default to a predetermined
computed value (currently n1-standard-4).
- min_cpu_ strplatform 
- The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
- min_num_ intinstances 
- The minimum number of primary worker instances to create. If min_num_instancesis set, cluster creation will succeed if the number of primary workers created is at least equal to themin_num_instancesnumber.
- num_instances int
- Specifies the number of worker nodes to create.
If not specified, GCP will default to a predetermined computed value (currently 2).
There is currently a beta feature which allows you to run a
Single Node Cluster.
In order to take advantage of this you need to set
"dataproc:dataproc.allow.zero.workers" = "true"incluster_config.software_config.properties
- accelerators List<Property Map>
- The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
- diskConfig Property Map
- Disk Config
- imageUri String
- The URI for the image to use for this worker. See the guide for more information.
- instanceNames List<String>
- List of worker instance names which have been assigned to the cluster.
- machineType String
- The name of a Google Compute Engine machine type
to create for the worker nodes. If not specified, GCP will default to a predetermined
computed value (currently n1-standard-4).
- minCpu StringPlatform 
- The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
- minNum NumberInstances 
- The minimum number of primary worker instances to create. If min_num_instancesis set, cluster creation will succeed if the number of primary workers created is at least equal to themin_num_instancesnumber.
- numInstances Number
- Specifies the number of worker nodes to create.
If not specified, GCP will default to a predetermined computed value (currently 2).
There is currently a beta feature which allows you to run a
Single Node Cluster.
In order to take advantage of this you need to set
"dataproc:dataproc.allow.zero.workers" = "true"incluster_config.software_config.properties
ClusterClusterConfigWorkerConfigAccelerator, ClusterClusterConfigWorkerConfigAcceleratorArgs            
- AcceleratorCount int
- The number of the accelerator cards of this type exposed to this instance. Often restricted to one of - 1,- 2,- 4, or- 8.- The Cloud Dataproc API can return unintuitive error messages when using accelerators; even when you have defined an accelerator, Auto Zone Placement does not exclusively select zones that have that accelerator available. If you get a 400 error that the accelerator can't be found, this is a likely cause. Make sure you check accelerator availability by zone if you are trying to use accelerators in a given zone. 
- AcceleratorType string
- The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.
- AcceleratorCount int
- The number of the accelerator cards of this type exposed to this instance. Often restricted to one of - 1,- 2,- 4, or- 8.- The Cloud Dataproc API can return unintuitive error messages when using accelerators; even when you have defined an accelerator, Auto Zone Placement does not exclusively select zones that have that accelerator available. If you get a 400 error that the accelerator can't be found, this is a likely cause. Make sure you check accelerator availability by zone if you are trying to use accelerators in a given zone. 
- AcceleratorType string
- The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.
- acceleratorCount Integer
- The number of the accelerator cards of this type exposed to this instance. Often restricted to one of - 1,- 2,- 4, or- 8.- The Cloud Dataproc API can return unintuitive error messages when using accelerators; even when you have defined an accelerator, Auto Zone Placement does not exclusively select zones that have that accelerator available. If you get a 400 error that the accelerator can't be found, this is a likely cause. Make sure you check accelerator availability by zone if you are trying to use accelerators in a given zone. 
- acceleratorType String
- The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.
- acceleratorCount number
- The number of the accelerator cards of this type exposed to this instance. Often restricted to one of - 1,- 2,- 4, or- 8.- The Cloud Dataproc API can return unintuitive error messages when using accelerators; even when you have defined an accelerator, Auto Zone Placement does not exclusively select zones that have that accelerator available. If you get a 400 error that the accelerator can't be found, this is a likely cause. Make sure you check accelerator availability by zone if you are trying to use accelerators in a given zone. 
- acceleratorType string
- The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.
- accelerator_count int
- The number of the accelerator cards of this type exposed to this instance. Often restricted to one of - 1,- 2,- 4, or- 8.- The Cloud Dataproc API can return unintuitive error messages when using accelerators; even when you have defined an accelerator, Auto Zone Placement does not exclusively select zones that have that accelerator available. If you get a 400 error that the accelerator can't be found, this is a likely cause. Make sure you check accelerator availability by zone if you are trying to use accelerators in a given zone. 
- accelerator_type str
- The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.
- acceleratorCount Number
- The number of the accelerator cards of this type exposed to this instance. Often restricted to one of - 1,- 2,- 4, or- 8.- The Cloud Dataproc API can return unintuitive error messages when using accelerators; even when you have defined an accelerator, Auto Zone Placement does not exclusively select zones that have that accelerator available. If you get a 400 error that the accelerator can't be found, this is a likely cause. Make sure you check accelerator availability by zone if you are trying to use accelerators in a given zone. 
- acceleratorType String
- The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.
ClusterClusterConfigWorkerConfigDiskConfig, ClusterClusterConfigWorkerConfigDiskConfigArgs              
- BootDisk intSize Gb 
- Size of the primary disk attached to each worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- BootDisk stringType 
- The disk type of the primary disk attached to each node.
One of "pd-ssd"or"pd-standard". Defaults to"pd-standard".
- LocalSsd stringInterface 
- Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express).
- NumLocal intSsds 
- The amount of local SSD disks that will be attached to each worker cluster node. Defaults to 0.
- BootDisk intSize Gb 
- Size of the primary disk attached to each worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- BootDisk stringType 
- The disk type of the primary disk attached to each node.
One of "pd-ssd"or"pd-standard". Defaults to"pd-standard".
- LocalSsd stringInterface 
- Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express).
- NumLocal intSsds 
- The amount of local SSD disks that will be attached to each worker cluster node. Defaults to 0.
- bootDisk IntegerSize Gb 
- Size of the primary disk attached to each worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- bootDisk StringType 
- The disk type of the primary disk attached to each node.
One of "pd-ssd"or"pd-standard". Defaults to"pd-standard".
- localSsd StringInterface 
- Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express).
- numLocal IntegerSsds 
- The amount of local SSD disks that will be attached to each worker cluster node. Defaults to 0.
- bootDisk numberSize Gb 
- Size of the primary disk attached to each worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- bootDisk stringType 
- The disk type of the primary disk attached to each node.
One of "pd-ssd"or"pd-standard". Defaults to"pd-standard".
- localSsd stringInterface 
- Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express).
- numLocal numberSsds 
- The amount of local SSD disks that will be attached to each worker cluster node. Defaults to 0.
- boot_disk_ intsize_ gb 
- Size of the primary disk attached to each worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- boot_disk_ strtype 
- The disk type of the primary disk attached to each node.
One of "pd-ssd"or"pd-standard". Defaults to"pd-standard".
- local_ssd_ strinterface 
- Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express).
- num_local_ intssds 
- The amount of local SSD disks that will be attached to each worker cluster node. Defaults to 0.
- bootDisk NumberSize Gb 
- Size of the primary disk attached to each worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- bootDisk StringType 
- The disk type of the primary disk attached to each node.
One of "pd-ssd"or"pd-standard". Defaults to"pd-standard".
- localSsd StringInterface 
- Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express).
- numLocal NumberSsds 
- The amount of local SSD disks that will be attached to each worker cluster node. Defaults to 0.
ClusterVirtualClusterConfig, ClusterVirtualClusterConfigArgs        
- AuxiliaryServices ClusterConfig Virtual Cluster Config Auxiliary Services Config 
- Configuration of auxiliary services used by this cluster. Structure defined below.
- KubernetesCluster ClusterConfig Virtual Cluster Config Kubernetes Cluster Config 
- The configuration for running the Dataproc cluster on Kubernetes.
Structure defined below.
- StagingBucket string
- The Cloud Storage staging bucket used to stage files,
such as Hadoop jars, between client machines and the cluster.
Note: If you don't explicitly specify a staging_bucketthen GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.
- AuxiliaryServices ClusterConfig Virtual Cluster Config Auxiliary Services Config 
- Configuration of auxiliary services used by this cluster. Structure defined below.
- KubernetesCluster ClusterConfig Virtual Cluster Config Kubernetes Cluster Config 
- The configuration for running the Dataproc cluster on Kubernetes.
Structure defined below.
- StagingBucket string
- The Cloud Storage staging bucket used to stage files,
such as Hadoop jars, between client machines and the cluster.
Note: If you don't explicitly specify a staging_bucketthen GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.
- auxiliaryServices ClusterConfig Virtual Cluster Config Auxiliary Services Config 
- Configuration of auxiliary services used by this cluster. Structure defined below.
- kubernetesCluster ClusterConfig Virtual Cluster Config Kubernetes Cluster Config 
- The configuration for running the Dataproc cluster on Kubernetes.
Structure defined below.
- stagingBucket String
- The Cloud Storage staging bucket used to stage files,
such as Hadoop jars, between client machines and the cluster.
Note: If you don't explicitly specify a staging_bucketthen GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.
- auxiliaryServices ClusterConfig Virtual Cluster Config Auxiliary Services Config 
- Configuration of auxiliary services used by this cluster. Structure defined below.
- kubernetesCluster ClusterConfig Virtual Cluster Config Kubernetes Cluster Config 
- The configuration for running the Dataproc cluster on Kubernetes.
Structure defined below.
- stagingBucket string
- The Cloud Storage staging bucket used to stage files,
such as Hadoop jars, between client machines and the cluster.
Note: If you don't explicitly specify a staging_bucketthen GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.
- auxiliary_services_ Clusterconfig Virtual Cluster Config Auxiliary Services Config 
- Configuration of auxiliary services used by this cluster. Structure defined below.
- kubernetes_cluster_ Clusterconfig Virtual Cluster Config Kubernetes Cluster Config 
- The configuration for running the Dataproc cluster on Kubernetes.
Structure defined below.
- staging_bucket str
- The Cloud Storage staging bucket used to stage files,
such as Hadoop jars, between client machines and the cluster.
Note: If you don't explicitly specify a staging_bucketthen GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.
- auxiliaryServices Property MapConfig 
- Configuration of auxiliary services used by this cluster. Structure defined below.
- kubernetesCluster Property MapConfig 
- The configuration for running the Dataproc cluster on Kubernetes.
Structure defined below.
- stagingBucket String
- The Cloud Storage staging bucket used to stage files,
such as Hadoop jars, between client machines and the cluster.
Note: If you don't explicitly specify a staging_bucketthen GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.
ClusterVirtualClusterConfigAuxiliaryServicesConfig, ClusterVirtualClusterConfigAuxiliaryServicesConfigArgs              
- MetastoreConfig ClusterVirtual Cluster Config Auxiliary Services Config Metastore Config 
- The Hive Metastore configuration for this workload.
- SparkHistory ClusterServer Config Virtual Cluster Config Auxiliary Services Config Spark History Server Config 
- The Spark History Server configuration for the workload.
- MetastoreConfig ClusterVirtual Cluster Config Auxiliary Services Config Metastore Config 
- The Hive Metastore configuration for this workload.
- SparkHistory ClusterServer Config Virtual Cluster Config Auxiliary Services Config Spark History Server Config 
- The Spark History Server configuration for the workload.
- metastoreConfig ClusterVirtual Cluster Config Auxiliary Services Config Metastore Config 
- The Hive Metastore configuration for this workload.
- sparkHistory ClusterServer Config Virtual Cluster Config Auxiliary Services Config Spark History Server Config 
- The Spark History Server configuration for the workload.
- metastoreConfig ClusterVirtual Cluster Config Auxiliary Services Config Metastore Config 
- The Hive Metastore configuration for this workload.
- sparkHistory ClusterServer Config Virtual Cluster Config Auxiliary Services Config Spark History Server Config 
- The Spark History Server configuration for the workload.
- metastore_config ClusterVirtual Cluster Config Auxiliary Services Config Metastore Config 
- The Hive Metastore configuration for this workload.
- spark_history_ Clusterserver_ config Virtual Cluster Config Auxiliary Services Config Spark History Server Config 
- The Spark History Server configuration for the workload.
- metastoreConfig Property Map
- The Hive Metastore configuration for this workload.
- sparkHistory Property MapServer Config 
- The Spark History Server configuration for the workload.
ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig, ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigArgs                  
- DataprocMetastore stringService 
- Resource name of an existing Dataproc Metastore service.
- DataprocMetastore stringService 
- Resource name of an existing Dataproc Metastore service.
- dataprocMetastore StringService 
- Resource name of an existing Dataproc Metastore service.
- dataprocMetastore stringService 
- Resource name of an existing Dataproc Metastore service.
- dataproc_metastore_ strservice 
- Resource name of an existing Dataproc Metastore service.
- dataprocMetastore StringService 
- Resource name of an existing Dataproc Metastore service.
ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig, ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigArgs                      
- DataprocCluster string
- Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.
- DataprocCluster string
- Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.
- dataprocCluster String
- Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.
- dataprocCluster string
- Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.
- dataproc_cluster str
- Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.
- dataprocCluster String
- Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.
ClusterVirtualClusterConfigKubernetesClusterConfig, ClusterVirtualClusterConfigKubernetesClusterConfigArgs              
- GkeCluster ClusterConfig Virtual Cluster Config Kubernetes Cluster Config Gke Cluster Config 
- The configuration for running the Dataproc cluster on GKE.
- KubernetesSoftware ClusterConfig Virtual Cluster Config Kubernetes Cluster Config Kubernetes Software Config 
- The software configuration for this Dataproc cluster running on Kubernetes.
- KubernetesNamespace string
- A namespace within the Kubernetes cluster to deploy into. If this namespace does not exist, it is created. If it exists, Dataproc verifies that another Dataproc VirtualCluster is not installed into it. If not specified, the name of the Dataproc Cluster is used.
- GkeCluster ClusterConfig Virtual Cluster Config Kubernetes Cluster Config Gke Cluster Config 
- The configuration for running the Dataproc cluster on GKE.
- KubernetesSoftware ClusterConfig Virtual Cluster Config Kubernetes Cluster Config Kubernetes Software Config 
- The software configuration for this Dataproc cluster running on Kubernetes.
- KubernetesNamespace string
- A namespace within the Kubernetes cluster to deploy into. If this namespace does not exist, it is created. If it exists, Dataproc verifies that another Dataproc VirtualCluster is not installed into it. If not specified, the name of the Dataproc Cluster is used.
- gkeCluster ClusterConfig Virtual Cluster Config Kubernetes Cluster Config Gke Cluster Config 
- The configuration for running the Dataproc cluster on GKE.
- kubernetesSoftware ClusterConfig Virtual Cluster Config Kubernetes Cluster Config Kubernetes Software Config 
- The software configuration for this Dataproc cluster running on Kubernetes.
- kubernetesNamespace String
- A namespace within the Kubernetes cluster to deploy into. If this namespace does not exist, it is created. If it exists, Dataproc verifies that another Dataproc VirtualCluster is not installed into it. If not specified, the name of the Dataproc Cluster is used.
- gkeCluster ClusterConfig Virtual Cluster Config Kubernetes Cluster Config Gke Cluster Config 
- The configuration for running the Dataproc cluster on GKE.
- kubernetesSoftware ClusterConfig Virtual Cluster Config Kubernetes Cluster Config Kubernetes Software Config 
- The software configuration for this Dataproc cluster running on Kubernetes.
- kubernetesNamespace string
- A namespace within the Kubernetes cluster to deploy into. If this namespace does not exist, it is created. If it exists, Dataproc verifies that another Dataproc VirtualCluster is not installed into it. If not specified, the name of the Dataproc Cluster is used.
- gke_cluster_ Clusterconfig Virtual Cluster Config Kubernetes Cluster Config Gke Cluster Config 
- The configuration for running the Dataproc cluster on GKE.
- kubernetes_software_ Clusterconfig Virtual Cluster Config Kubernetes Cluster Config Kubernetes Software Config 
- The software configuration for this Dataproc cluster running on Kubernetes.
- kubernetes_namespace str
- A namespace within the Kubernetes cluster to deploy into. If this namespace does not exist, it is created. If it exists, Dataproc verifies that another Dataproc VirtualCluster is not installed into it. If not specified, the name of the Dataproc Cluster is used.
- gkeCluster Property MapConfig 
- The configuration for running the Dataproc cluster on GKE.
- kubernetesSoftware Property MapConfig 
- The software configuration for this Dataproc cluster running on Kubernetes.
- kubernetesNamespace String
- A namespace within the Kubernetes cluster to deploy into. If this namespace does not exist, it is created. If it exists, Dataproc verifies that another Dataproc VirtualCluster is not installed into it. If not specified, the name of the Dataproc Cluster is used.
ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig, ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigArgs                    
- GkeCluster stringTarget 
- A target GKE cluster to deploy to. It must be in the same project and region as the Dataproc cluster (the GKE cluster can be zonal or regional)
- NodePool List<ClusterTargets Virtual Cluster Config Kubernetes Cluster Config Gke Cluster Config Node Pool Target> 
- GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULTGkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs aDEFAULTGkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings.
- GkeCluster stringTarget 
- A target GKE cluster to deploy to. It must be in the same project and region as the Dataproc cluster (the GKE cluster can be zonal or regional)
- NodePool []ClusterTargets Virtual Cluster Config Kubernetes Cluster Config Gke Cluster Config Node Pool Target 
- GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULTGkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs aDEFAULTGkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings.
- gkeCluster StringTarget 
- A target GKE cluster to deploy to. It must be in the same project and region as the Dataproc cluster (the GKE cluster can be zonal or regional)
- nodePool List<ClusterTargets Virtual Cluster Config Kubernetes Cluster Config Gke Cluster Config Node Pool Target> 
- GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULTGkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs aDEFAULTGkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings.
- gkeCluster stringTarget 
- A target GKE cluster to deploy to. It must be in the same project and region as the Dataproc cluster (the GKE cluster can be zonal or regional)
- nodePool ClusterTargets Virtual Cluster Config Kubernetes Cluster Config Gke Cluster Config Node Pool Target[] 
- GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULTGkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs aDEFAULTGkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings.
- gke_cluster_ strtarget 
- A target GKE cluster to deploy to. It must be in the same project and region as the Dataproc cluster (the GKE cluster can be zonal or regional)
- node_pool_ Sequence[Clustertargets Virtual Cluster Config Kubernetes Cluster Config Gke Cluster Config Node Pool Target] 
- GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULTGkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs aDEFAULTGkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings.
- gkeCluster StringTarget 
- A target GKE cluster to deploy to. It must be in the same project and region as the Dataproc cluster (the GKE cluster can be zonal or regional)
- nodePool List<Property Map>Targets 
- GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULTGkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs aDEFAULTGkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings.
ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget, ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetArgs                          
- NodePool string
- The target GKE node pool.
- Roles List<string>
- The roles associated with the GKE node pool.
One of "DEFAULT","CONTROLLER","SPARK_DRIVER"or"SPARK_EXECUTOR".
- NodePool ClusterConfig Virtual Cluster Config Kubernetes Cluster Config Gke Cluster Config Node Pool Target Node Pool Config 
- The configuration for the GKE node pool. If specified, Dataproc attempts to create a node pool with the specified shape. If one with the same name already exists, it is verified against all specified fields. If a field differs, the virtual cluster creation will fail.
- NodePool string
- The target GKE node pool.
- Roles []string
- The roles associated with the GKE node pool.
One of "DEFAULT","CONTROLLER","SPARK_DRIVER"or"SPARK_EXECUTOR".
- NodePool ClusterConfig Virtual Cluster Config Kubernetes Cluster Config Gke Cluster Config Node Pool Target Node Pool Config 
- The configuration for the GKE node pool. If specified, Dataproc attempts to create a node pool with the specified shape. If one with the same name already exists, it is verified against all specified fields. If a field differs, the virtual cluster creation will fail.
- nodePool String
- The target GKE node pool.
- roles List<String>
- The roles associated with the GKE node pool.
One of "DEFAULT","CONTROLLER","SPARK_DRIVER"or"SPARK_EXECUTOR".
- nodePool ClusterConfig Virtual Cluster Config Kubernetes Cluster Config Gke Cluster Config Node Pool Target Node Pool Config 
- The configuration for the GKE node pool. If specified, Dataproc attempts to create a node pool with the specified shape. If one with the same name already exists, it is verified against all specified fields. If a field differs, the virtual cluster creation will fail.
- nodePool string
- The target GKE node pool.
- roles string[]
- The roles associated with the GKE node pool.
One of "DEFAULT","CONTROLLER","SPARK_DRIVER"or"SPARK_EXECUTOR".
- nodePool ClusterConfig Virtual Cluster Config Kubernetes Cluster Config Gke Cluster Config Node Pool Target Node Pool Config 
- The configuration for the GKE node pool. If specified, Dataproc attempts to create a node pool with the specified shape. If one with the same name already exists, it is verified against all specified fields. If a field differs, the virtual cluster creation will fail.
- node_pool str
- The target GKE node pool.
- roles Sequence[str]
- The roles associated with the GKE node pool.
One of "DEFAULT","CONTROLLER","SPARK_DRIVER"or"SPARK_EXECUTOR".
- node_pool_ Clusterconfig Virtual Cluster Config Kubernetes Cluster Config Gke Cluster Config Node Pool Target Node Pool Config 
- The configuration for the GKE node pool. If specified, Dataproc attempts to create a node pool with the specified shape. If one with the same name already exists, it is verified against all specified fields. If a field differs, the virtual cluster creation will fail.
- nodePool String
- The target GKE node pool.
- roles List<String>
- The roles associated with the GKE node pool.
One of "DEFAULT","CONTROLLER","SPARK_DRIVER"or"SPARK_EXECUTOR".
- nodePool Property MapConfig 
- The configuration for the GKE node pool. If specified, Dataproc attempts to create a node pool with the specified shape. If one with the same name already exists, it is verified against all specified fields. If a field differs, the virtual cluster creation will fail.
ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig, ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigArgs                                
- Locations List<string>
- The list of Compute Engine zones where node pool nodes associated
with a Dataproc on GKE virtual cluster will be located.
- Autoscaling
ClusterVirtual Cluster Config Kubernetes Cluster Config Gke Cluster Config Node Pool Target Node Pool Config Autoscaling 
- The autoscaler configuration for this node pool. The autoscaler is enabled only when a valid configuration is present.
- Config
ClusterVirtual Cluster Config Kubernetes Cluster Config Gke Cluster Config Node Pool Target Node Pool Config Config 
- The node pool configuration.
- Locations []string
- The list of Compute Engine zones where node pool nodes associated
with a Dataproc on GKE virtual cluster will be located.
- Autoscaling
ClusterVirtual Cluster Config Kubernetes Cluster Config Gke Cluster Config Node Pool Target Node Pool Config Autoscaling 
- The autoscaler configuration for this node pool. The autoscaler is enabled only when a valid configuration is present.
- Config
ClusterVirtual Cluster Config Kubernetes Cluster Config Gke Cluster Config Node Pool Target Node Pool Config Config 
- The node pool configuration.
- locations List<String>
- The list of Compute Engine zones where node pool nodes associated
with a Dataproc on GKE virtual cluster will be located.
- autoscaling
ClusterVirtual Cluster Config Kubernetes Cluster Config Gke Cluster Config Node Pool Target Node Pool Config Autoscaling 
- The autoscaler configuration for this node pool. The autoscaler is enabled only when a valid configuration is present.
- config
ClusterVirtual Cluster Config Kubernetes Cluster Config Gke Cluster Config Node Pool Target Node Pool Config Config 
- The node pool configuration.
- locations string[]
- The list of Compute Engine zones where node pool nodes associated
with a Dataproc on GKE virtual cluster will be located.
- autoscaling
ClusterVirtual Cluster Config Kubernetes Cluster Config Gke Cluster Config Node Pool Target Node Pool Config Autoscaling 
- The autoscaler configuration for this node pool. The autoscaler is enabled only when a valid configuration is present.
- config
ClusterVirtual Cluster Config Kubernetes Cluster Config Gke Cluster Config Node Pool Target Node Pool Config Config 
- The node pool configuration.
- locations Sequence[str]
- The list of Compute Engine zones where node pool nodes associated
with a Dataproc on GKE virtual cluster will be located.
- autoscaling
ClusterVirtual Cluster Config Kubernetes Cluster Config Gke Cluster Config Node Pool Target Node Pool Config Autoscaling 
- The autoscaler configuration for this node pool. The autoscaler is enabled only when a valid configuration is present.
- config
ClusterVirtual Cluster Config Kubernetes Cluster Config Gke Cluster Config Node Pool Target Node Pool Config Config 
- The node pool configuration.
- locations List<String>
- The list of Compute Engine zones where node pool nodes associated
with a Dataproc on GKE virtual cluster will be located.
- autoscaling Property Map
- The autoscaler configuration for this node pool. The autoscaler is enabled only when a valid configuration is present.
- config Property Map
- The node pool configuration.
ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling, ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingArgs                                  
- MaxNode intCount 
- The maximum number of nodes in the node pool. Must be >= minNodeCount, and must be > 0.
- MinNode intCount 
- The minimum number of nodes in the node pool. Must be >= 0 and <= maxNodeCount.
- MaxNode intCount 
- The maximum number of nodes in the node pool. Must be >= minNodeCount, and must be > 0.
- MinNode intCount 
- The minimum number of nodes in the node pool. Must be >= 0 and <= maxNodeCount.
- maxNode IntegerCount 
- The maximum number of nodes in the node pool. Must be >= minNodeCount, and must be > 0.
- minNode IntegerCount 
- The minimum number of nodes in the node pool. Must be >= 0 and <= maxNodeCount.
- maxNode numberCount 
- The maximum number of nodes in the node pool. Must be >= minNodeCount, and must be > 0.
- minNode numberCount 
- The minimum number of nodes in the node pool. Must be >= 0 and <= maxNodeCount.
- max_node_ intcount 
- The maximum number of nodes in the node pool. Must be >= minNodeCount, and must be > 0.
- min_node_ intcount 
- The minimum number of nodes in the node pool. Must be >= 0 and <= maxNodeCount.
- maxNode NumberCount 
- The maximum number of nodes in the node pool. Must be >= minNodeCount, and must be > 0.
- minNode NumberCount 
- The minimum number of nodes in the node pool. Must be >= 0 and <= maxNodeCount.
ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig, ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigArgs                                  
- LocalSsd intCount 
- The number of local SSD disks to attach to the node, which is limited by the maximum number of disks allowable per zone.
- MachineType string
- The name of a Compute Engine machine type.
- MinCpu stringPlatform 
- Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or a newer CPU platform. Specify the friendly names of CPU platforms, such as "Intel Haswell" or "Intel Sandy Bridge".
- Preemptible bool
- Whether the nodes are created as preemptible VM instances. Preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).
- Spot bool
- Spot flag for enabling Spot VM, which is a rebrand of the existing preemptible flag.
- LocalSsd intCount 
- The number of local SSD disks to attach to the node, which is limited by the maximum number of disks allowable per zone.
- MachineType string
- The name of a Compute Engine machine type.
- MinCpu stringPlatform 
- Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or a newer CPU platform. Specify the friendly names of CPU platforms, such as "Intel Haswell" or "Intel Sandy Bridge".
- Preemptible bool
- Whether the nodes are created as preemptible VM instances. Preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).
- Spot bool
- Spot flag for enabling Spot VM, which is a rebrand of the existing preemptible flag.
- localSsd IntegerCount 
- The number of local SSD disks to attach to the node, which is limited by the maximum number of disks allowable per zone.
- machineType String
- The name of a Compute Engine machine type.
- minCpu StringPlatform 
- Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or a newer CPU platform. Specify the friendly names of CPU platforms, such as "Intel Haswell" or "Intel Sandy Bridge".
- preemptible Boolean
- Whether the nodes are created as preemptible VM instances. Preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).
- spot Boolean
- Spot flag for enabling Spot VM, which is a rebrand of the existing preemptible flag.
- localSsd numberCount 
- The number of local SSD disks to attach to the node, which is limited by the maximum number of disks allowable per zone.
- machineType string
- The name of a Compute Engine machine type.
- minCpu stringPlatform 
- Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or a newer CPU platform. Specify the friendly names of CPU platforms, such as "Intel Haswell" or "Intel Sandy Bridge".
- preemptible boolean
- Whether the nodes are created as preemptible VM instances. Preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).
- spot boolean
- Spot flag for enabling Spot VM, which is a rebrand of the existing preemptible flag.
- local_ssd_ intcount 
- The number of local SSD disks to attach to the node, which is limited by the maximum number of disks allowable per zone.
- machine_type str
- The name of a Compute Engine machine type.
- min_cpu_ strplatform 
- Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or a newer CPU platform. Specify the friendly names of CPU platforms, such as "Intel Haswell" or "Intel Sandy Bridge".
- preemptible bool
- Whether the nodes are created as preemptible VM instances. Preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).
- spot bool
- Spot flag for enabling Spot VM, which is a rebrand of the existing preemptible flag.
- localSsd NumberCount 
- The number of local SSD disks to attach to the node, which is limited by the maximum number of disks allowable per zone.
- machineType String
- The name of a Compute Engine machine type.
- minCpu StringPlatform 
- Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or a newer CPU platform. Specify the friendly names of CPU platforms, such as "Intel Haswell" or "Intel Sandy Bridge".
- preemptible Boolean
- Whether the nodes are created as preemptible VM instances. Preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).
- spot Boolean
- Spot flag for enabling Spot VM, which is a rebrand of the existing preemptible flag.
ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig, ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigArgs                    
- ComponentVersion Dictionary<string, string>
- The components that should be installed in this Dataproc cluster. The key must be a string from the
 KubernetesComponent enumeration. The value is the version of the software to be installed. At least one entry must be specified.- NOTE : component_version[SPARK]is mandatory to set, or the creation of the cluster will fail.
 
- NOTE : 
- Properties Dictionary<string, string>
- The properties to set on daemon config files. Property keys are specified in prefix:property format, for example spark:spark.kubernetes.container.image.
- ComponentVersion map[string]string
- The components that should be installed in this Dataproc cluster. The key must be a string from the
 KubernetesComponent enumeration. The value is the version of the software to be installed. At least one entry must be specified.- NOTE : component_version[SPARK]is mandatory to set, or the creation of the cluster will fail.
 
- NOTE : 
- Properties map[string]string
- The properties to set on daemon config files. Property keys are specified in prefix:property format, for example spark:spark.kubernetes.container.image.
- componentVersion Map<String,String>
- The components that should be installed in this Dataproc cluster. The key must be a string from the
 KubernetesComponent enumeration. The value is the version of the software to be installed. At least one entry must be specified.- NOTE : component_version[SPARK]is mandatory to set, or the creation of the cluster will fail.
 
- NOTE : 
- properties Map<String,String>
- The properties to set on daemon config files. Property keys are specified in prefix:property format, for example spark:spark.kubernetes.container.image.
- componentVersion {[key: string]: string}
- The components that should be installed in this Dataproc cluster. The key must be a string from the
 KubernetesComponent enumeration. The value is the version of the software to be installed. At least one entry must be specified.- NOTE : component_version[SPARK]is mandatory to set, or the creation of the cluster will fail.
 
- NOTE : 
- properties {[key: string]: string}
- The properties to set on daemon config files. Property keys are specified in prefix:property format, for example spark:spark.kubernetes.container.image.
- component_version Mapping[str, str]
- The components that should be installed in this Dataproc cluster. The key must be a string from the
 KubernetesComponent enumeration. The value is the version of the software to be installed. At least one entry must be specified.- NOTE : component_version[SPARK]is mandatory to set, or the creation of the cluster will fail.
 
- NOTE : 
- properties Mapping[str, str]
- The properties to set on daemon config files. Property keys are specified in prefix:property format, for example spark:spark.kubernetes.container.image.
- componentVersion Map<String>
- The components that should be installed in this Dataproc cluster. The key must be a string from the
 KubernetesComponent enumeration. The value is the version of the software to be installed. At least one entry must be specified.- NOTE : component_version[SPARK]is mandatory to set, or the creation of the cluster will fail.
 
- NOTE : 
- properties Map<String>
- The properties to set on daemon config files. Property keys are specified in prefix:property format, for example spark:spark.kubernetes.container.image.
Import
This resource does not support import.
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Google Cloud (GCP) Classic pulumi/pulumi-gcp
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the google-betaTerraform Provider.