gcp.dataproc.MetastoreService
Explore with Pulumi AI
A managed metastore service that serves metadata queries.
To get more information about Service, see:
- API documentation
- How-to Guides
Example Usage
Dataproc Metastore Service Basic
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const _default = new gcp.dataproc.MetastoreService("default", {
    serviceId: "metastore-srv",
    location: "us-central1",
    port: 9080,
    tier: "DEVELOPER",
    maintenanceWindow: {
        hourOfDay: 2,
        dayOfWeek: "SUNDAY",
    },
    hiveMetastoreConfig: {
        version: "2.3.6",
    },
    labels: {
        env: "test",
    },
});
import pulumi
import pulumi_gcp as gcp
default = gcp.dataproc.MetastoreService("default",
    service_id="metastore-srv",
    location="us-central1",
    port=9080,
    tier="DEVELOPER",
    maintenance_window={
        "hour_of_day": 2,
        "day_of_week": "SUNDAY",
    },
    hive_metastore_config={
        "version": "2.3.6",
    },
    labels={
        "env": "test",
    })
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewMetastoreService(ctx, "default", &dataproc.MetastoreServiceArgs{
			ServiceId: pulumi.String("metastore-srv"),
			Location:  pulumi.String("us-central1"),
			Port:      pulumi.Int(9080),
			Tier:      pulumi.String("DEVELOPER"),
			MaintenanceWindow: &dataproc.MetastoreServiceMaintenanceWindowArgs{
				HourOfDay: pulumi.Int(2),
				DayOfWeek: pulumi.String("SUNDAY"),
			},
			HiveMetastoreConfig: &dataproc.MetastoreServiceHiveMetastoreConfigArgs{
				Version: pulumi.String("2.3.6"),
			},
			Labels: pulumi.StringMap{
				"env": pulumi.String("test"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var @default = new Gcp.Dataproc.MetastoreService("default", new()
    {
        ServiceId = "metastore-srv",
        Location = "us-central1",
        Port = 9080,
        Tier = "DEVELOPER",
        MaintenanceWindow = new Gcp.Dataproc.Inputs.MetastoreServiceMaintenanceWindowArgs
        {
            HourOfDay = 2,
            DayOfWeek = "SUNDAY",
        },
        HiveMetastoreConfig = new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigArgs
        {
            Version = "2.3.6",
        },
        Labels = 
        {
            { "env", "test" },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataproc.MetastoreService;
import com.pulumi.gcp.dataproc.MetastoreServiceArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceMaintenanceWindowArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceHiveMetastoreConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var default_ = new MetastoreService("default", MetastoreServiceArgs.builder()
            .serviceId("metastore-srv")
            .location("us-central1")
            .port(9080)
            .tier("DEVELOPER")
            .maintenanceWindow(MetastoreServiceMaintenanceWindowArgs.builder()
                .hourOfDay(2)
                .dayOfWeek("SUNDAY")
                .build())
            .hiveMetastoreConfig(MetastoreServiceHiveMetastoreConfigArgs.builder()
                .version("2.3.6")
                .build())
            .labels(Map.of("env", "test"))
            .build());
    }
}
resources:
  default:
    type: gcp:dataproc:MetastoreService
    properties:
      serviceId: metastore-srv
      location: us-central1
      port: 9080
      tier: DEVELOPER
      maintenanceWindow:
        hourOfDay: 2
        dayOfWeek: SUNDAY
      hiveMetastoreConfig:
        version: 2.3.6
      labels:
        env: test
Dataproc Metastore Service Deletion Protection
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const _default = new gcp.dataproc.MetastoreService("default", {
    serviceId: "metastore-srv",
    location: "us-central1",
    port: 9080,
    tier: "DEVELOPER",
    deletionProtection: true,
    maintenanceWindow: {
        hourOfDay: 2,
        dayOfWeek: "SUNDAY",
    },
    hiveMetastoreConfig: {
        version: "2.3.6",
    },
    labels: {
        env: "test",
    },
});
import pulumi
import pulumi_gcp as gcp
default = gcp.dataproc.MetastoreService("default",
    service_id="metastore-srv",
    location="us-central1",
    port=9080,
    tier="DEVELOPER",
    deletion_protection=True,
    maintenance_window={
        "hour_of_day": 2,
        "day_of_week": "SUNDAY",
    },
    hive_metastore_config={
        "version": "2.3.6",
    },
    labels={
        "env": "test",
    })
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewMetastoreService(ctx, "default", &dataproc.MetastoreServiceArgs{
			ServiceId:          pulumi.String("metastore-srv"),
			Location:           pulumi.String("us-central1"),
			Port:               pulumi.Int(9080),
			Tier:               pulumi.String("DEVELOPER"),
			DeletionProtection: pulumi.Bool(true),
			MaintenanceWindow: &dataproc.MetastoreServiceMaintenanceWindowArgs{
				HourOfDay: pulumi.Int(2),
				DayOfWeek: pulumi.String("SUNDAY"),
			},
			HiveMetastoreConfig: &dataproc.MetastoreServiceHiveMetastoreConfigArgs{
				Version: pulumi.String("2.3.6"),
			},
			Labels: pulumi.StringMap{
				"env": pulumi.String("test"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var @default = new Gcp.Dataproc.MetastoreService("default", new()
    {
        ServiceId = "metastore-srv",
        Location = "us-central1",
        Port = 9080,
        Tier = "DEVELOPER",
        DeletionProtection = true,
        MaintenanceWindow = new Gcp.Dataproc.Inputs.MetastoreServiceMaintenanceWindowArgs
        {
            HourOfDay = 2,
            DayOfWeek = "SUNDAY",
        },
        HiveMetastoreConfig = new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigArgs
        {
            Version = "2.3.6",
        },
        Labels = 
        {
            { "env", "test" },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataproc.MetastoreService;
import com.pulumi.gcp.dataproc.MetastoreServiceArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceMaintenanceWindowArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceHiveMetastoreConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var default_ = new MetastoreService("default", MetastoreServiceArgs.builder()
            .serviceId("metastore-srv")
            .location("us-central1")
            .port(9080)
            .tier("DEVELOPER")
            .deletionProtection(true)
            .maintenanceWindow(MetastoreServiceMaintenanceWindowArgs.builder()
                .hourOfDay(2)
                .dayOfWeek("SUNDAY")
                .build())
            .hiveMetastoreConfig(MetastoreServiceHiveMetastoreConfigArgs.builder()
                .version("2.3.6")
                .build())
            .labels(Map.of("env", "test"))
            .build());
    }
}
resources:
  default:
    type: gcp:dataproc:MetastoreService
    properties:
      serviceId: metastore-srv
      location: us-central1
      port: 9080
      tier: DEVELOPER
      deletionProtection: true
      maintenanceWindow:
        hourOfDay: 2
        dayOfWeek: SUNDAY
      hiveMetastoreConfig:
        version: 2.3.6
      labels:
        env: test
Dataproc Metastore Service Cmek Example
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const keyRing = new gcp.kms.KeyRing("key_ring", {
    name: "example-keyring",
    location: "us-central1",
});
const cryptoKey = new gcp.kms.CryptoKey("crypto_key", {
    name: "example-key",
    keyRing: keyRing.id,
    purpose: "ENCRYPT_DECRYPT",
});
const _default = new gcp.dataproc.MetastoreService("default", {
    serviceId: "example-service",
    location: "us-central1",
    encryptionConfig: {
        kmsKey: cryptoKey.id,
    },
    hiveMetastoreConfig: {
        version: "3.1.2",
    },
});
import pulumi
import pulumi_gcp as gcp
key_ring = gcp.kms.KeyRing("key_ring",
    name="example-keyring",
    location="us-central1")
crypto_key = gcp.kms.CryptoKey("crypto_key",
    name="example-key",
    key_ring=key_ring.id,
    purpose="ENCRYPT_DECRYPT")
default = gcp.dataproc.MetastoreService("default",
    service_id="example-service",
    location="us-central1",
    encryption_config={
        "kms_key": crypto_key.id,
    },
    hive_metastore_config={
        "version": "3.1.2",
    })
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/kms"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		keyRing, err := kms.NewKeyRing(ctx, "key_ring", &kms.KeyRingArgs{
			Name:     pulumi.String("example-keyring"),
			Location: pulumi.String("us-central1"),
		})
		if err != nil {
			return err
		}
		cryptoKey, err := kms.NewCryptoKey(ctx, "crypto_key", &kms.CryptoKeyArgs{
			Name:    pulumi.String("example-key"),
			KeyRing: keyRing.ID(),
			Purpose: pulumi.String("ENCRYPT_DECRYPT"),
		})
		if err != nil {
			return err
		}
		_, err = dataproc.NewMetastoreService(ctx, "default", &dataproc.MetastoreServiceArgs{
			ServiceId: pulumi.String("example-service"),
			Location:  pulumi.String("us-central1"),
			EncryptionConfig: &dataproc.MetastoreServiceEncryptionConfigArgs{
				KmsKey: cryptoKey.ID(),
			},
			HiveMetastoreConfig: &dataproc.MetastoreServiceHiveMetastoreConfigArgs{
				Version: pulumi.String("3.1.2"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var keyRing = new Gcp.Kms.KeyRing("key_ring", new()
    {
        Name = "example-keyring",
        Location = "us-central1",
    });
    var cryptoKey = new Gcp.Kms.CryptoKey("crypto_key", new()
    {
        Name = "example-key",
        KeyRing = keyRing.Id,
        Purpose = "ENCRYPT_DECRYPT",
    });
    var @default = new Gcp.Dataproc.MetastoreService("default", new()
    {
        ServiceId = "example-service",
        Location = "us-central1",
        EncryptionConfig = new Gcp.Dataproc.Inputs.MetastoreServiceEncryptionConfigArgs
        {
            KmsKey = cryptoKey.Id,
        },
        HiveMetastoreConfig = new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigArgs
        {
            Version = "3.1.2",
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.kms.KeyRing;
import com.pulumi.gcp.kms.KeyRingArgs;
import com.pulumi.gcp.kms.CryptoKey;
import com.pulumi.gcp.kms.CryptoKeyArgs;
import com.pulumi.gcp.dataproc.MetastoreService;
import com.pulumi.gcp.dataproc.MetastoreServiceArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceEncryptionConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceHiveMetastoreConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var keyRing = new KeyRing("keyRing", KeyRingArgs.builder()
            .name("example-keyring")
            .location("us-central1")
            .build());
        var cryptoKey = new CryptoKey("cryptoKey", CryptoKeyArgs.builder()
            .name("example-key")
            .keyRing(keyRing.id())
            .purpose("ENCRYPT_DECRYPT")
            .build());
        var default_ = new MetastoreService("default", MetastoreServiceArgs.builder()
            .serviceId("example-service")
            .location("us-central1")
            .encryptionConfig(MetastoreServiceEncryptionConfigArgs.builder()
                .kmsKey(cryptoKey.id())
                .build())
            .hiveMetastoreConfig(MetastoreServiceHiveMetastoreConfigArgs.builder()
                .version("3.1.2")
                .build())
            .build());
    }
}
resources:
  default:
    type: gcp:dataproc:MetastoreService
    properties:
      serviceId: example-service
      location: us-central1
      encryptionConfig:
        kmsKey: ${cryptoKey.id}
      hiveMetastoreConfig:
        version: 3.1.2
  cryptoKey:
    type: gcp:kms:CryptoKey
    name: crypto_key
    properties:
      name: example-key
      keyRing: ${keyRing.id}
      purpose: ENCRYPT_DECRYPT
  keyRing:
    type: gcp:kms:KeyRing
    name: key_ring
    properties:
      name: example-keyring
      location: us-central1
Dataproc Metastore Service Private Service Connect
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const net = new gcp.compute.Network("net", {
    name: "my-network",
    autoCreateSubnetworks: false,
});
const subnet = new gcp.compute.Subnetwork("subnet", {
    name: "my-subnetwork",
    region: "us-central1",
    network: net.id,
    ipCidrRange: "10.0.0.0/22",
    privateIpGoogleAccess: true,
});
const _default = new gcp.dataproc.MetastoreService("default", {
    serviceId: "metastore-srv",
    location: "us-central1",
    tier: "DEVELOPER",
    hiveMetastoreConfig: {
        version: "3.1.2",
    },
    networkConfig: {
        consumers: [{
            subnetwork: subnet.id,
        }],
    },
});
import pulumi
import pulumi_gcp as gcp
net = gcp.compute.Network("net",
    name="my-network",
    auto_create_subnetworks=False)
subnet = gcp.compute.Subnetwork("subnet",
    name="my-subnetwork",
    region="us-central1",
    network=net.id,
    ip_cidr_range="10.0.0.0/22",
    private_ip_google_access=True)
default = gcp.dataproc.MetastoreService("default",
    service_id="metastore-srv",
    location="us-central1",
    tier="DEVELOPER",
    hive_metastore_config={
        "version": "3.1.2",
    },
    network_config={
        "consumers": [{
            "subnetwork": subnet.id,
        }],
    })
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute"
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		net, err := compute.NewNetwork(ctx, "net", &compute.NetworkArgs{
			Name:                  pulumi.String("my-network"),
			AutoCreateSubnetworks: pulumi.Bool(false),
		})
		if err != nil {
			return err
		}
		subnet, err := compute.NewSubnetwork(ctx, "subnet", &compute.SubnetworkArgs{
			Name:                  pulumi.String("my-subnetwork"),
			Region:                pulumi.String("us-central1"),
			Network:               net.ID(),
			IpCidrRange:           pulumi.String("10.0.0.0/22"),
			PrivateIpGoogleAccess: pulumi.Bool(true),
		})
		if err != nil {
			return err
		}
		_, err = dataproc.NewMetastoreService(ctx, "default", &dataproc.MetastoreServiceArgs{
			ServiceId: pulumi.String("metastore-srv"),
			Location:  pulumi.String("us-central1"),
			Tier:      pulumi.String("DEVELOPER"),
			HiveMetastoreConfig: &dataproc.MetastoreServiceHiveMetastoreConfigArgs{
				Version: pulumi.String("3.1.2"),
			},
			NetworkConfig: &dataproc.MetastoreServiceNetworkConfigArgs{
				Consumers: dataproc.MetastoreServiceNetworkConfigConsumerArray{
					&dataproc.MetastoreServiceNetworkConfigConsumerArgs{
						Subnetwork: subnet.ID(),
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var net = new Gcp.Compute.Network("net", new()
    {
        Name = "my-network",
        AutoCreateSubnetworks = false,
    });
    var subnet = new Gcp.Compute.Subnetwork("subnet", new()
    {
        Name = "my-subnetwork",
        Region = "us-central1",
        Network = net.Id,
        IpCidrRange = "10.0.0.0/22",
        PrivateIpGoogleAccess = true,
    });
    var @default = new Gcp.Dataproc.MetastoreService("default", new()
    {
        ServiceId = "metastore-srv",
        Location = "us-central1",
        Tier = "DEVELOPER",
        HiveMetastoreConfig = new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigArgs
        {
            Version = "3.1.2",
        },
        NetworkConfig = new Gcp.Dataproc.Inputs.MetastoreServiceNetworkConfigArgs
        {
            Consumers = new[]
            {
                new Gcp.Dataproc.Inputs.MetastoreServiceNetworkConfigConsumerArgs
                {
                    Subnetwork = subnet.Id,
                },
            },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.Network;
import com.pulumi.gcp.compute.NetworkArgs;
import com.pulumi.gcp.compute.Subnetwork;
import com.pulumi.gcp.compute.SubnetworkArgs;
import com.pulumi.gcp.dataproc.MetastoreService;
import com.pulumi.gcp.dataproc.MetastoreServiceArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceHiveMetastoreConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceNetworkConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var net = new Network("net", NetworkArgs.builder()
            .name("my-network")
            .autoCreateSubnetworks(false)
            .build());
        var subnet = new Subnetwork("subnet", SubnetworkArgs.builder()
            .name("my-subnetwork")
            .region("us-central1")
            .network(net.id())
            .ipCidrRange("10.0.0.0/22")
            .privateIpGoogleAccess(true)
            .build());
        var default_ = new MetastoreService("default", MetastoreServiceArgs.builder()
            .serviceId("metastore-srv")
            .location("us-central1")
            .tier("DEVELOPER")
            .hiveMetastoreConfig(MetastoreServiceHiveMetastoreConfigArgs.builder()
                .version("3.1.2")
                .build())
            .networkConfig(MetastoreServiceNetworkConfigArgs.builder()
                .consumers(MetastoreServiceNetworkConfigConsumerArgs.builder()
                    .subnetwork(subnet.id())
                    .build())
                .build())
            .build());
    }
}
resources:
  net:
    type: gcp:compute:Network
    properties:
      name: my-network
      autoCreateSubnetworks: false
  subnet:
    type: gcp:compute:Subnetwork
    properties:
      name: my-subnetwork
      region: us-central1
      network: ${net.id}
      ipCidrRange: 10.0.0.0/22
      privateIpGoogleAccess: true
  default:
    type: gcp:dataproc:MetastoreService
    properties:
      serviceId: metastore-srv
      location: us-central1
      tier: DEVELOPER
      hiveMetastoreConfig:
        version: 3.1.2
      networkConfig:
        consumers:
          - subnetwork: ${subnet.id}
Dataproc Metastore Service Private Service Connect Custom Routes
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const net = new gcp.compute.Network("net", {
    name: "my-network",
    autoCreateSubnetworks: false,
});
const subnet = new gcp.compute.Subnetwork("subnet", {
    name: "my-subnetwork",
    region: "us-central1",
    network: net.id,
    ipCidrRange: "10.0.0.0/22",
    privateIpGoogleAccess: true,
});
const _default = new gcp.dataproc.MetastoreService("default", {
    serviceId: "metastore-srv",
    location: "us-central1",
    hiveMetastoreConfig: {
        version: "3.1.2",
    },
    networkConfig: {
        consumers: [{
            subnetwork: subnet.id,
        }],
        customRoutesEnabled: true,
    },
});
import pulumi
import pulumi_gcp as gcp
net = gcp.compute.Network("net",
    name="my-network",
    auto_create_subnetworks=False)
subnet = gcp.compute.Subnetwork("subnet",
    name="my-subnetwork",
    region="us-central1",
    network=net.id,
    ip_cidr_range="10.0.0.0/22",
    private_ip_google_access=True)
default = gcp.dataproc.MetastoreService("default",
    service_id="metastore-srv",
    location="us-central1",
    hive_metastore_config={
        "version": "3.1.2",
    },
    network_config={
        "consumers": [{
            "subnetwork": subnet.id,
        }],
        "custom_routes_enabled": True,
    })
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute"
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		net, err := compute.NewNetwork(ctx, "net", &compute.NetworkArgs{
			Name:                  pulumi.String("my-network"),
			AutoCreateSubnetworks: pulumi.Bool(false),
		})
		if err != nil {
			return err
		}
		subnet, err := compute.NewSubnetwork(ctx, "subnet", &compute.SubnetworkArgs{
			Name:                  pulumi.String("my-subnetwork"),
			Region:                pulumi.String("us-central1"),
			Network:               net.ID(),
			IpCidrRange:           pulumi.String("10.0.0.0/22"),
			PrivateIpGoogleAccess: pulumi.Bool(true),
		})
		if err != nil {
			return err
		}
		_, err = dataproc.NewMetastoreService(ctx, "default", &dataproc.MetastoreServiceArgs{
			ServiceId: pulumi.String("metastore-srv"),
			Location:  pulumi.String("us-central1"),
			HiveMetastoreConfig: &dataproc.MetastoreServiceHiveMetastoreConfigArgs{
				Version: pulumi.String("3.1.2"),
			},
			NetworkConfig: &dataproc.MetastoreServiceNetworkConfigArgs{
				Consumers: dataproc.MetastoreServiceNetworkConfigConsumerArray{
					&dataproc.MetastoreServiceNetworkConfigConsumerArgs{
						Subnetwork: subnet.ID(),
					},
				},
				CustomRoutesEnabled: pulumi.Bool(true),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var net = new Gcp.Compute.Network("net", new()
    {
        Name = "my-network",
        AutoCreateSubnetworks = false,
    });
    var subnet = new Gcp.Compute.Subnetwork("subnet", new()
    {
        Name = "my-subnetwork",
        Region = "us-central1",
        Network = net.Id,
        IpCidrRange = "10.0.0.0/22",
        PrivateIpGoogleAccess = true,
    });
    var @default = new Gcp.Dataproc.MetastoreService("default", new()
    {
        ServiceId = "metastore-srv",
        Location = "us-central1",
        HiveMetastoreConfig = new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigArgs
        {
            Version = "3.1.2",
        },
        NetworkConfig = new Gcp.Dataproc.Inputs.MetastoreServiceNetworkConfigArgs
        {
            Consumers = new[]
            {
                new Gcp.Dataproc.Inputs.MetastoreServiceNetworkConfigConsumerArgs
                {
                    Subnetwork = subnet.Id,
                },
            },
            CustomRoutesEnabled = true,
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.Network;
import com.pulumi.gcp.compute.NetworkArgs;
import com.pulumi.gcp.compute.Subnetwork;
import com.pulumi.gcp.compute.SubnetworkArgs;
import com.pulumi.gcp.dataproc.MetastoreService;
import com.pulumi.gcp.dataproc.MetastoreServiceArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceHiveMetastoreConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceNetworkConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var net = new Network("net", NetworkArgs.builder()
            .name("my-network")
            .autoCreateSubnetworks(false)
            .build());
        var subnet = new Subnetwork("subnet", SubnetworkArgs.builder()
            .name("my-subnetwork")
            .region("us-central1")
            .network(net.id())
            .ipCidrRange("10.0.0.0/22")
            .privateIpGoogleAccess(true)
            .build());
        var default_ = new MetastoreService("default", MetastoreServiceArgs.builder()
            .serviceId("metastore-srv")
            .location("us-central1")
            .hiveMetastoreConfig(MetastoreServiceHiveMetastoreConfigArgs.builder()
                .version("3.1.2")
                .build())
            .networkConfig(MetastoreServiceNetworkConfigArgs.builder()
                .consumers(MetastoreServiceNetworkConfigConsumerArgs.builder()
                    .subnetwork(subnet.id())
                    .build())
                .customRoutesEnabled(true)
                .build())
            .build());
    }
}
resources:
  net:
    type: gcp:compute:Network
    properties:
      name: my-network
      autoCreateSubnetworks: false
  subnet:
    type: gcp:compute:Subnetwork
    properties:
      name: my-subnetwork
      region: us-central1
      network: ${net.id}
      ipCidrRange: 10.0.0.0/22
      privateIpGoogleAccess: true
  default:
    type: gcp:dataproc:MetastoreService
    properties:
      serviceId: metastore-srv
      location: us-central1
      hiveMetastoreConfig:
        version: 3.1.2
      networkConfig:
        consumers:
          - subnetwork: ${subnet.id}
        customRoutesEnabled: true
Dataproc Metastore Service Dpms2
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const dpms2 = new gcp.dataproc.MetastoreService("dpms2", {
    serviceId: "ms-dpms2",
    location: "us-central1",
    databaseType: "SPANNER",
    hiveMetastoreConfig: {
        version: "3.1.2",
    },
    scalingConfig: {
        instanceSize: "EXTRA_SMALL",
    },
});
import pulumi
import pulumi_gcp as gcp
dpms2 = gcp.dataproc.MetastoreService("dpms2",
    service_id="ms-dpms2",
    location="us-central1",
    database_type="SPANNER",
    hive_metastore_config={
        "version": "3.1.2",
    },
    scaling_config={
        "instance_size": "EXTRA_SMALL",
    })
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewMetastoreService(ctx, "dpms2", &dataproc.MetastoreServiceArgs{
			ServiceId:    pulumi.String("ms-dpms2"),
			Location:     pulumi.String("us-central1"),
			DatabaseType: pulumi.String("SPANNER"),
			HiveMetastoreConfig: &dataproc.MetastoreServiceHiveMetastoreConfigArgs{
				Version: pulumi.String("3.1.2"),
			},
			ScalingConfig: &dataproc.MetastoreServiceScalingConfigArgs{
				InstanceSize: pulumi.String("EXTRA_SMALL"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var dpms2 = new Gcp.Dataproc.MetastoreService("dpms2", new()
    {
        ServiceId = "ms-dpms2",
        Location = "us-central1",
        DatabaseType = "SPANNER",
        HiveMetastoreConfig = new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigArgs
        {
            Version = "3.1.2",
        },
        ScalingConfig = new Gcp.Dataproc.Inputs.MetastoreServiceScalingConfigArgs
        {
            InstanceSize = "EXTRA_SMALL",
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataproc.MetastoreService;
import com.pulumi.gcp.dataproc.MetastoreServiceArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceHiveMetastoreConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceScalingConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var dpms2 = new MetastoreService("dpms2", MetastoreServiceArgs.builder()
            .serviceId("ms-dpms2")
            .location("us-central1")
            .databaseType("SPANNER")
            .hiveMetastoreConfig(MetastoreServiceHiveMetastoreConfigArgs.builder()
                .version("3.1.2")
                .build())
            .scalingConfig(MetastoreServiceScalingConfigArgs.builder()
                .instanceSize("EXTRA_SMALL")
                .build())
            .build());
    }
}
resources:
  dpms2:
    type: gcp:dataproc:MetastoreService
    properties:
      serviceId: ms-dpms2
      location: us-central1
      databaseType: SPANNER
      hiveMetastoreConfig:
        version: 3.1.2
      scalingConfig:
        instanceSize: EXTRA_SMALL
Dataproc Metastore Service Dpms2 Scaling Factor
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const dpms2ScalingFactor = new gcp.dataproc.MetastoreService("dpms2_scaling_factor", {
    serviceId: "ms-dpms2sf",
    location: "us-central1",
    databaseType: "SPANNER",
    hiveMetastoreConfig: {
        version: "3.1.2",
    },
    scalingConfig: {
        scalingFactor: 2,
    },
});
import pulumi
import pulumi_gcp as gcp
dpms2_scaling_factor = gcp.dataproc.MetastoreService("dpms2_scaling_factor",
    service_id="ms-dpms2sf",
    location="us-central1",
    database_type="SPANNER",
    hive_metastore_config={
        "version": "3.1.2",
    },
    scaling_config={
        "scaling_factor": 2,
    })
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewMetastoreService(ctx, "dpms2_scaling_factor", &dataproc.MetastoreServiceArgs{
			ServiceId:    pulumi.String("ms-dpms2sf"),
			Location:     pulumi.String("us-central1"),
			DatabaseType: pulumi.String("SPANNER"),
			HiveMetastoreConfig: &dataproc.MetastoreServiceHiveMetastoreConfigArgs{
				Version: pulumi.String("3.1.2"),
			},
			ScalingConfig: &dataproc.MetastoreServiceScalingConfigArgs{
				ScalingFactor: pulumi.Float64(2),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var dpms2ScalingFactor = new Gcp.Dataproc.MetastoreService("dpms2_scaling_factor", new()
    {
        ServiceId = "ms-dpms2sf",
        Location = "us-central1",
        DatabaseType = "SPANNER",
        HiveMetastoreConfig = new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigArgs
        {
            Version = "3.1.2",
        },
        ScalingConfig = new Gcp.Dataproc.Inputs.MetastoreServiceScalingConfigArgs
        {
            ScalingFactor = 2,
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataproc.MetastoreService;
import com.pulumi.gcp.dataproc.MetastoreServiceArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceHiveMetastoreConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceScalingConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var dpms2ScalingFactor = new MetastoreService("dpms2ScalingFactor", MetastoreServiceArgs.builder()
            .serviceId("ms-dpms2sf")
            .location("us-central1")
            .databaseType("SPANNER")
            .hiveMetastoreConfig(MetastoreServiceHiveMetastoreConfigArgs.builder()
                .version("3.1.2")
                .build())
            .scalingConfig(MetastoreServiceScalingConfigArgs.builder()
                .scalingFactor("2")
                .build())
            .build());
    }
}
resources:
  dpms2ScalingFactor:
    type: gcp:dataproc:MetastoreService
    name: dpms2_scaling_factor
    properties:
      serviceId: ms-dpms2sf
      location: us-central1
      databaseType: SPANNER
      hiveMetastoreConfig:
        version: 3.1.2
      scalingConfig:
        scalingFactor: '2'
Dataproc Metastore Service Scheduled Backup
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const bucket = new gcp.storage.Bucket("bucket", {
    name: "backup",
    location: "us-central1",
});
const backup = new gcp.dataproc.MetastoreService("backup", {
    serviceId: "backup",
    location: "us-central1",
    port: 9080,
    tier: "DEVELOPER",
    maintenanceWindow: {
        hourOfDay: 2,
        dayOfWeek: "SUNDAY",
    },
    hiveMetastoreConfig: {
        version: "2.3.6",
    },
    scheduledBackup: {
        enabled: true,
        cronSchedule: "0 0 * * *",
        timeZone: "UTC",
        backupLocation: pulumi.interpolate`gs://${bucket.name}`,
    },
    labels: {
        env: "test",
    },
});
import pulumi
import pulumi_gcp as gcp
bucket = gcp.storage.Bucket("bucket",
    name="backup",
    location="us-central1")
backup = gcp.dataproc.MetastoreService("backup",
    service_id="backup",
    location="us-central1",
    port=9080,
    tier="DEVELOPER",
    maintenance_window={
        "hour_of_day": 2,
        "day_of_week": "SUNDAY",
    },
    hive_metastore_config={
        "version": "2.3.6",
    },
    scheduled_backup={
        "enabled": True,
        "cron_schedule": "0 0 * * *",
        "time_zone": "UTC",
        "backup_location": bucket.name.apply(lambda name: f"gs://{name}"),
    },
    labels={
        "env": "test",
    })
package main
import (
	"fmt"
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/storage"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		bucket, err := storage.NewBucket(ctx, "bucket", &storage.BucketArgs{
			Name:     pulumi.String("backup"),
			Location: pulumi.String("us-central1"),
		})
		if err != nil {
			return err
		}
		_, err = dataproc.NewMetastoreService(ctx, "backup", &dataproc.MetastoreServiceArgs{
			ServiceId: pulumi.String("backup"),
			Location:  pulumi.String("us-central1"),
			Port:      pulumi.Int(9080),
			Tier:      pulumi.String("DEVELOPER"),
			MaintenanceWindow: &dataproc.MetastoreServiceMaintenanceWindowArgs{
				HourOfDay: pulumi.Int(2),
				DayOfWeek: pulumi.String("SUNDAY"),
			},
			HiveMetastoreConfig: &dataproc.MetastoreServiceHiveMetastoreConfigArgs{
				Version: pulumi.String("2.3.6"),
			},
			ScheduledBackup: &dataproc.MetastoreServiceScheduledBackupArgs{
				Enabled:      pulumi.Bool(true),
				CronSchedule: pulumi.String("0 0 * * *"),
				TimeZone:     pulumi.String("UTC"),
				BackupLocation: bucket.Name.ApplyT(func(name string) (string, error) {
					return fmt.Sprintf("gs://%v", name), nil
				}).(pulumi.StringOutput),
			},
			Labels: pulumi.StringMap{
				"env": pulumi.String("test"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var bucket = new Gcp.Storage.Bucket("bucket", new()
    {
        Name = "backup",
        Location = "us-central1",
    });
    var backup = new Gcp.Dataproc.MetastoreService("backup", new()
    {
        ServiceId = "backup",
        Location = "us-central1",
        Port = 9080,
        Tier = "DEVELOPER",
        MaintenanceWindow = new Gcp.Dataproc.Inputs.MetastoreServiceMaintenanceWindowArgs
        {
            HourOfDay = 2,
            DayOfWeek = "SUNDAY",
        },
        HiveMetastoreConfig = new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigArgs
        {
            Version = "2.3.6",
        },
        ScheduledBackup = new Gcp.Dataproc.Inputs.MetastoreServiceScheduledBackupArgs
        {
            Enabled = true,
            CronSchedule = "0 0 * * *",
            TimeZone = "UTC",
            BackupLocation = bucket.Name.Apply(name => $"gs://{name}"),
        },
        Labels = 
        {
            { "env", "test" },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.storage.Bucket;
import com.pulumi.gcp.storage.BucketArgs;
import com.pulumi.gcp.dataproc.MetastoreService;
import com.pulumi.gcp.dataproc.MetastoreServiceArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceMaintenanceWindowArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceHiveMetastoreConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceScheduledBackupArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var bucket = new Bucket("bucket", BucketArgs.builder()
            .name("backup")
            .location("us-central1")
            .build());
        var backup = new MetastoreService("backup", MetastoreServiceArgs.builder()
            .serviceId("backup")
            .location("us-central1")
            .port(9080)
            .tier("DEVELOPER")
            .maintenanceWindow(MetastoreServiceMaintenanceWindowArgs.builder()
                .hourOfDay(2)
                .dayOfWeek("SUNDAY")
                .build())
            .hiveMetastoreConfig(MetastoreServiceHiveMetastoreConfigArgs.builder()
                .version("2.3.6")
                .build())
            .scheduledBackup(MetastoreServiceScheduledBackupArgs.builder()
                .enabled(true)
                .cronSchedule("0 0 * * *")
                .timeZone("UTC")
                .backupLocation(bucket.name().applyValue(name -> String.format("gs://%s", name)))
                .build())
            .labels(Map.of("env", "test"))
            .build());
    }
}
resources:
  backup:
    type: gcp:dataproc:MetastoreService
    properties:
      serviceId: backup
      location: us-central1
      port: 9080
      tier: DEVELOPER
      maintenanceWindow:
        hourOfDay: 2
        dayOfWeek: SUNDAY
      hiveMetastoreConfig:
        version: 2.3.6
      scheduledBackup:
        enabled: true
        cronSchedule: 0 0 * * *
        timeZone: UTC
        backupLocation: gs://${bucket.name}
      labels:
        env: test
  bucket:
    type: gcp:storage:Bucket
    properties:
      name: backup
      location: us-central1
Dataproc Metastore Service Autoscaling Max Scaling Factor
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const testResource = new gcp.dataproc.MetastoreService("test_resource", {
    serviceId: "test-service",
    location: "us-central1",
    databaseType: "SPANNER",
    hiveMetastoreConfig: {
        version: "3.1.2",
    },
    scalingConfig: {
        autoscalingConfig: {
            autoscalingEnabled: true,
            limitConfig: {
                maxScalingFactor: 1,
            },
        },
    },
});
import pulumi
import pulumi_gcp as gcp
test_resource = gcp.dataproc.MetastoreService("test_resource",
    service_id="test-service",
    location="us-central1",
    database_type="SPANNER",
    hive_metastore_config={
        "version": "3.1.2",
    },
    scaling_config={
        "autoscaling_config": {
            "autoscaling_enabled": True,
            "limit_config": {
                "max_scaling_factor": 1,
            },
        },
    })
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewMetastoreService(ctx, "test_resource", &dataproc.MetastoreServiceArgs{
			ServiceId:    pulumi.String("test-service"),
			Location:     pulumi.String("us-central1"),
			DatabaseType: pulumi.String("SPANNER"),
			HiveMetastoreConfig: &dataproc.MetastoreServiceHiveMetastoreConfigArgs{
				Version: pulumi.String("3.1.2"),
			},
			ScalingConfig: &dataproc.MetastoreServiceScalingConfigArgs{
				AutoscalingConfig: &dataproc.MetastoreServiceScalingConfigAutoscalingConfigArgs{
					AutoscalingEnabled: pulumi.Bool(true),
					LimitConfig: &dataproc.MetastoreServiceScalingConfigAutoscalingConfigLimitConfigArgs{
						MaxScalingFactor: pulumi.Float64(1),
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var testResource = new Gcp.Dataproc.MetastoreService("test_resource", new()
    {
        ServiceId = "test-service",
        Location = "us-central1",
        DatabaseType = "SPANNER",
        HiveMetastoreConfig = new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigArgs
        {
            Version = "3.1.2",
        },
        ScalingConfig = new Gcp.Dataproc.Inputs.MetastoreServiceScalingConfigArgs
        {
            AutoscalingConfig = new Gcp.Dataproc.Inputs.MetastoreServiceScalingConfigAutoscalingConfigArgs
            {
                AutoscalingEnabled = true,
                LimitConfig = new Gcp.Dataproc.Inputs.MetastoreServiceScalingConfigAutoscalingConfigLimitConfigArgs
                {
                    MaxScalingFactor = 1,
                },
            },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataproc.MetastoreService;
import com.pulumi.gcp.dataproc.MetastoreServiceArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceHiveMetastoreConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceScalingConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceScalingConfigAutoscalingConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceScalingConfigAutoscalingConfigLimitConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var testResource = new MetastoreService("testResource", MetastoreServiceArgs.builder()
            .serviceId("test-service")
            .location("us-central1")
            .databaseType("SPANNER")
            .hiveMetastoreConfig(MetastoreServiceHiveMetastoreConfigArgs.builder()
                .version("3.1.2")
                .build())
            .scalingConfig(MetastoreServiceScalingConfigArgs.builder()
                .autoscalingConfig(MetastoreServiceScalingConfigAutoscalingConfigArgs.builder()
                    .autoscalingEnabled(true)
                    .limitConfig(MetastoreServiceScalingConfigAutoscalingConfigLimitConfigArgs.builder()
                        .maxScalingFactor(1)
                        .build())
                    .build())
                .build())
            .build());
    }
}
resources:
  testResource:
    type: gcp:dataproc:MetastoreService
    name: test_resource
    properties:
      serviceId: test-service
      location: us-central1
      databaseType: SPANNER
      hiveMetastoreConfig:
        version: 3.1.2
      scalingConfig:
        autoscalingConfig:
          autoscalingEnabled: true
          limitConfig:
            maxScalingFactor: 1
Dataproc Metastore Service Autoscaling Min And Max Scaling Factor
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const testResource = new gcp.dataproc.MetastoreService("test_resource", {
    serviceId: "test-service",
    location: "us-central1",
    databaseType: "SPANNER",
    hiveMetastoreConfig: {
        version: "3.1.2",
    },
    scalingConfig: {
        autoscalingConfig: {
            autoscalingEnabled: true,
            limitConfig: {
                minScalingFactor: 0.1,
                maxScalingFactor: 1,
            },
        },
    },
});
import pulumi
import pulumi_gcp as gcp
test_resource = gcp.dataproc.MetastoreService("test_resource",
    service_id="test-service",
    location="us-central1",
    database_type="SPANNER",
    hive_metastore_config={
        "version": "3.1.2",
    },
    scaling_config={
        "autoscaling_config": {
            "autoscaling_enabled": True,
            "limit_config": {
                "min_scaling_factor": 0.1,
                "max_scaling_factor": 1,
            },
        },
    })
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewMetastoreService(ctx, "test_resource", &dataproc.MetastoreServiceArgs{
			ServiceId:    pulumi.String("test-service"),
			Location:     pulumi.String("us-central1"),
			DatabaseType: pulumi.String("SPANNER"),
			HiveMetastoreConfig: &dataproc.MetastoreServiceHiveMetastoreConfigArgs{
				Version: pulumi.String("3.1.2"),
			},
			ScalingConfig: &dataproc.MetastoreServiceScalingConfigArgs{
				AutoscalingConfig: &dataproc.MetastoreServiceScalingConfigAutoscalingConfigArgs{
					AutoscalingEnabled: pulumi.Bool(true),
					LimitConfig: &dataproc.MetastoreServiceScalingConfigAutoscalingConfigLimitConfigArgs{
						MinScalingFactor: pulumi.Float64(0.1),
						MaxScalingFactor: pulumi.Float64(1),
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var testResource = new Gcp.Dataproc.MetastoreService("test_resource", new()
    {
        ServiceId = "test-service",
        Location = "us-central1",
        DatabaseType = "SPANNER",
        HiveMetastoreConfig = new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigArgs
        {
            Version = "3.1.2",
        },
        ScalingConfig = new Gcp.Dataproc.Inputs.MetastoreServiceScalingConfigArgs
        {
            AutoscalingConfig = new Gcp.Dataproc.Inputs.MetastoreServiceScalingConfigAutoscalingConfigArgs
            {
                AutoscalingEnabled = true,
                LimitConfig = new Gcp.Dataproc.Inputs.MetastoreServiceScalingConfigAutoscalingConfigLimitConfigArgs
                {
                    MinScalingFactor = 0.1,
                    MaxScalingFactor = 1,
                },
            },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataproc.MetastoreService;
import com.pulumi.gcp.dataproc.MetastoreServiceArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceHiveMetastoreConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceScalingConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceScalingConfigAutoscalingConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceScalingConfigAutoscalingConfigLimitConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var testResource = new MetastoreService("testResource", MetastoreServiceArgs.builder()
            .serviceId("test-service")
            .location("us-central1")
            .databaseType("SPANNER")
            .hiveMetastoreConfig(MetastoreServiceHiveMetastoreConfigArgs.builder()
                .version("3.1.2")
                .build())
            .scalingConfig(MetastoreServiceScalingConfigArgs.builder()
                .autoscalingConfig(MetastoreServiceScalingConfigAutoscalingConfigArgs.builder()
                    .autoscalingEnabled(true)
                    .limitConfig(MetastoreServiceScalingConfigAutoscalingConfigLimitConfigArgs.builder()
                        .minScalingFactor(0.1)
                        .maxScalingFactor(1)
                        .build())
                    .build())
                .build())
            .build());
    }
}
resources:
  testResource:
    type: gcp:dataproc:MetastoreService
    name: test_resource
    properties:
      serviceId: test-service
      location: us-central1
      databaseType: SPANNER
      hiveMetastoreConfig:
        version: 3.1.2
      scalingConfig:
        autoscalingConfig:
          autoscalingEnabled: true
          limitConfig:
            minScalingFactor: 0.1
            maxScalingFactor: 1
Dataproc Metastore Service Autoscaling Min Scaling Factor
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const testResource = new gcp.dataproc.MetastoreService("test_resource", {
    serviceId: "test-service",
    location: "us-central1",
    databaseType: "SPANNER",
    hiveMetastoreConfig: {
        version: "3.1.2",
    },
    scalingConfig: {
        autoscalingConfig: {
            autoscalingEnabled: true,
            limitConfig: {
                minScalingFactor: 0.1,
            },
        },
    },
});
import pulumi
import pulumi_gcp as gcp
test_resource = gcp.dataproc.MetastoreService("test_resource",
    service_id="test-service",
    location="us-central1",
    database_type="SPANNER",
    hive_metastore_config={
        "version": "3.1.2",
    },
    scaling_config={
        "autoscaling_config": {
            "autoscaling_enabled": True,
            "limit_config": {
                "min_scaling_factor": 0.1,
            },
        },
    })
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewMetastoreService(ctx, "test_resource", &dataproc.MetastoreServiceArgs{
			ServiceId:    pulumi.String("test-service"),
			Location:     pulumi.String("us-central1"),
			DatabaseType: pulumi.String("SPANNER"),
			HiveMetastoreConfig: &dataproc.MetastoreServiceHiveMetastoreConfigArgs{
				Version: pulumi.String("3.1.2"),
			},
			ScalingConfig: &dataproc.MetastoreServiceScalingConfigArgs{
				AutoscalingConfig: &dataproc.MetastoreServiceScalingConfigAutoscalingConfigArgs{
					AutoscalingEnabled: pulumi.Bool(true),
					LimitConfig: &dataproc.MetastoreServiceScalingConfigAutoscalingConfigLimitConfigArgs{
						MinScalingFactor: pulumi.Float64(0.1),
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var testResource = new Gcp.Dataproc.MetastoreService("test_resource", new()
    {
        ServiceId = "test-service",
        Location = "us-central1",
        DatabaseType = "SPANNER",
        HiveMetastoreConfig = new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigArgs
        {
            Version = "3.1.2",
        },
        ScalingConfig = new Gcp.Dataproc.Inputs.MetastoreServiceScalingConfigArgs
        {
            AutoscalingConfig = new Gcp.Dataproc.Inputs.MetastoreServiceScalingConfigAutoscalingConfigArgs
            {
                AutoscalingEnabled = true,
                LimitConfig = new Gcp.Dataproc.Inputs.MetastoreServiceScalingConfigAutoscalingConfigLimitConfigArgs
                {
                    MinScalingFactor = 0.1,
                },
            },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataproc.MetastoreService;
import com.pulumi.gcp.dataproc.MetastoreServiceArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceHiveMetastoreConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceScalingConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceScalingConfigAutoscalingConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceScalingConfigAutoscalingConfigLimitConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var testResource = new MetastoreService("testResource", MetastoreServiceArgs.builder()
            .serviceId("test-service")
            .location("us-central1")
            .databaseType("SPANNER")
            .hiveMetastoreConfig(MetastoreServiceHiveMetastoreConfigArgs.builder()
                .version("3.1.2")
                .build())
            .scalingConfig(MetastoreServiceScalingConfigArgs.builder()
                .autoscalingConfig(MetastoreServiceScalingConfigAutoscalingConfigArgs.builder()
                    .autoscalingEnabled(true)
                    .limitConfig(MetastoreServiceScalingConfigAutoscalingConfigLimitConfigArgs.builder()
                        .minScalingFactor(0.1)
                        .build())
                    .build())
                .build())
            .build());
    }
}
resources:
  testResource:
    type: gcp:dataproc:MetastoreService
    name: test_resource
    properties:
      serviceId: test-service
      location: us-central1
      databaseType: SPANNER
      hiveMetastoreConfig:
        version: 3.1.2
      scalingConfig:
        autoscalingConfig:
          autoscalingEnabled: true
          limitConfig:
            minScalingFactor: 0.1
Dataproc Metastore Service Autoscaling No Limit Config
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const testResource = new gcp.dataproc.MetastoreService("test_resource", {
    serviceId: "test-service",
    location: "us-central1",
    databaseType: "SPANNER",
    hiveMetastoreConfig: {
        version: "3.1.2",
    },
    scalingConfig: {
        autoscalingConfig: {
            autoscalingEnabled: true,
        },
    },
});
import pulumi
import pulumi_gcp as gcp
test_resource = gcp.dataproc.MetastoreService("test_resource",
    service_id="test-service",
    location="us-central1",
    database_type="SPANNER",
    hive_metastore_config={
        "version": "3.1.2",
    },
    scaling_config={
        "autoscaling_config": {
            "autoscaling_enabled": True,
        },
    })
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewMetastoreService(ctx, "test_resource", &dataproc.MetastoreServiceArgs{
			ServiceId:    pulumi.String("test-service"),
			Location:     pulumi.String("us-central1"),
			DatabaseType: pulumi.String("SPANNER"),
			HiveMetastoreConfig: &dataproc.MetastoreServiceHiveMetastoreConfigArgs{
				Version: pulumi.String("3.1.2"),
			},
			ScalingConfig: &dataproc.MetastoreServiceScalingConfigArgs{
				AutoscalingConfig: &dataproc.MetastoreServiceScalingConfigAutoscalingConfigArgs{
					AutoscalingEnabled: pulumi.Bool(true),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var testResource = new Gcp.Dataproc.MetastoreService("test_resource", new()
    {
        ServiceId = "test-service",
        Location = "us-central1",
        DatabaseType = "SPANNER",
        HiveMetastoreConfig = new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigArgs
        {
            Version = "3.1.2",
        },
        ScalingConfig = new Gcp.Dataproc.Inputs.MetastoreServiceScalingConfigArgs
        {
            AutoscalingConfig = new Gcp.Dataproc.Inputs.MetastoreServiceScalingConfigAutoscalingConfigArgs
            {
                AutoscalingEnabled = true,
            },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataproc.MetastoreService;
import com.pulumi.gcp.dataproc.MetastoreServiceArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceHiveMetastoreConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceScalingConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceScalingConfigAutoscalingConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var testResource = new MetastoreService("testResource", MetastoreServiceArgs.builder()
            .serviceId("test-service")
            .location("us-central1")
            .databaseType("SPANNER")
            .hiveMetastoreConfig(MetastoreServiceHiveMetastoreConfigArgs.builder()
                .version("3.1.2")
                .build())
            .scalingConfig(MetastoreServiceScalingConfigArgs.builder()
                .autoscalingConfig(MetastoreServiceScalingConfigAutoscalingConfigArgs.builder()
                    .autoscalingEnabled(true)
                    .build())
                .build())
            .build());
    }
}
resources:
  testResource:
    type: gcp:dataproc:MetastoreService
    name: test_resource
    properties:
      serviceId: test-service
      location: us-central1
      databaseType: SPANNER
      hiveMetastoreConfig:
        version: 3.1.2
      scalingConfig:
        autoscalingConfig:
          autoscalingEnabled: true
Create MetastoreService Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new MetastoreService(name: string, args?: MetastoreServiceArgs, opts?: CustomResourceOptions);@overload
def MetastoreService(resource_name: str,
                     args: Optional[MetastoreServiceArgs] = None,
                     opts: Optional[ResourceOptions] = None)
@overload
def MetastoreService(resource_name: str,
                     opts: Optional[ResourceOptions] = None,
                     database_type: Optional[str] = None,
                     deletion_protection: Optional[bool] = None,
                     encryption_config: Optional[MetastoreServiceEncryptionConfigArgs] = None,
                     hive_metastore_config: Optional[MetastoreServiceHiveMetastoreConfigArgs] = None,
                     labels: Optional[Mapping[str, str]] = None,
                     location: Optional[str] = None,
                     maintenance_window: Optional[MetastoreServiceMaintenanceWindowArgs] = None,
                     metadata_integration: Optional[MetastoreServiceMetadataIntegrationArgs] = None,
                     network: Optional[str] = None,
                     network_config: Optional[MetastoreServiceNetworkConfigArgs] = None,
                     port: Optional[int] = None,
                     project: Optional[str] = None,
                     release_channel: Optional[str] = None,
                     scaling_config: Optional[MetastoreServiceScalingConfigArgs] = None,
                     scheduled_backup: Optional[MetastoreServiceScheduledBackupArgs] = None,
                     service_id: Optional[str] = None,
                     telemetry_config: Optional[MetastoreServiceTelemetryConfigArgs] = None,
                     tier: Optional[str] = None)func NewMetastoreService(ctx *Context, name string, args *MetastoreServiceArgs, opts ...ResourceOption) (*MetastoreService, error)public MetastoreService(string name, MetastoreServiceArgs? args = null, CustomResourceOptions? opts = null)
public MetastoreService(String name, MetastoreServiceArgs args)
public MetastoreService(String name, MetastoreServiceArgs args, CustomResourceOptions options)
type: gcp:dataproc:MetastoreService
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args MetastoreServiceArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args MetastoreServiceArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args MetastoreServiceArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args MetastoreServiceArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args MetastoreServiceArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var metastoreServiceResource = new Gcp.Dataproc.MetastoreService("metastoreServiceResource", new()
{
    DatabaseType = "string",
    DeletionProtection = false,
    EncryptionConfig = new Gcp.Dataproc.Inputs.MetastoreServiceEncryptionConfigArgs
    {
        KmsKey = "string",
    },
    HiveMetastoreConfig = new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigArgs
    {
        Version = "string",
        AuxiliaryVersions = new[]
        {
            new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigAuxiliaryVersionArgs
            {
                Key = "string",
                Version = "string",
                ConfigOverrides = 
                {
                    { "string", "string" },
                },
            },
        },
        ConfigOverrides = 
        {
            { "string", "string" },
        },
        EndpointProtocol = "string",
        KerberosConfig = new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigKerberosConfigArgs
        {
            Keytab = new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs
            {
                CloudSecret = "string",
            },
            Krb5ConfigGcsUri = "string",
            Principal = "string",
        },
    },
    Labels = 
    {
        { "string", "string" },
    },
    Location = "string",
    MaintenanceWindow = new Gcp.Dataproc.Inputs.MetastoreServiceMaintenanceWindowArgs
    {
        DayOfWeek = "string",
        HourOfDay = 0,
    },
    MetadataIntegration = new Gcp.Dataproc.Inputs.MetastoreServiceMetadataIntegrationArgs
    {
        DataCatalogConfig = new Gcp.Dataproc.Inputs.MetastoreServiceMetadataIntegrationDataCatalogConfigArgs
        {
            Enabled = false,
        },
    },
    Network = "string",
    NetworkConfig = new Gcp.Dataproc.Inputs.MetastoreServiceNetworkConfigArgs
    {
        Consumers = new[]
        {
            new Gcp.Dataproc.Inputs.MetastoreServiceNetworkConfigConsumerArgs
            {
                Subnetwork = "string",
                EndpointUri = "string",
            },
        },
        CustomRoutesEnabled = false,
    },
    Port = 0,
    Project = "string",
    ReleaseChannel = "string",
    ScalingConfig = new Gcp.Dataproc.Inputs.MetastoreServiceScalingConfigArgs
    {
        AutoscalingConfig = new Gcp.Dataproc.Inputs.MetastoreServiceScalingConfigAutoscalingConfigArgs
        {
            AutoscalingEnabled = false,
            LimitConfig = new Gcp.Dataproc.Inputs.MetastoreServiceScalingConfigAutoscalingConfigLimitConfigArgs
            {
                MaxScalingFactor = 0,
                MinScalingFactor = 0,
            },
        },
        InstanceSize = "string",
        ScalingFactor = 0,
    },
    ScheduledBackup = new Gcp.Dataproc.Inputs.MetastoreServiceScheduledBackupArgs
    {
        BackupLocation = "string",
        CronSchedule = "string",
        Enabled = false,
        TimeZone = "string",
    },
    ServiceId = "string",
    TelemetryConfig = new Gcp.Dataproc.Inputs.MetastoreServiceTelemetryConfigArgs
    {
        LogFormat = "string",
    },
    Tier = "string",
});
example, err := dataproc.NewMetastoreService(ctx, "metastoreServiceResource", &dataproc.MetastoreServiceArgs{
	DatabaseType:       pulumi.String("string"),
	DeletionProtection: pulumi.Bool(false),
	EncryptionConfig: &dataproc.MetastoreServiceEncryptionConfigArgs{
		KmsKey: pulumi.String("string"),
	},
	HiveMetastoreConfig: &dataproc.MetastoreServiceHiveMetastoreConfigArgs{
		Version: pulumi.String("string"),
		AuxiliaryVersions: dataproc.MetastoreServiceHiveMetastoreConfigAuxiliaryVersionArray{
			&dataproc.MetastoreServiceHiveMetastoreConfigAuxiliaryVersionArgs{
				Key:     pulumi.String("string"),
				Version: pulumi.String("string"),
				ConfigOverrides: pulumi.StringMap{
					"string": pulumi.String("string"),
				},
			},
		},
		ConfigOverrides: pulumi.StringMap{
			"string": pulumi.String("string"),
		},
		EndpointProtocol: pulumi.String("string"),
		KerberosConfig: &dataproc.MetastoreServiceHiveMetastoreConfigKerberosConfigArgs{
			Keytab: &dataproc.MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs{
				CloudSecret: pulumi.String("string"),
			},
			Krb5ConfigGcsUri: pulumi.String("string"),
			Principal:        pulumi.String("string"),
		},
	},
	Labels: pulumi.StringMap{
		"string": pulumi.String("string"),
	},
	Location: pulumi.String("string"),
	MaintenanceWindow: &dataproc.MetastoreServiceMaintenanceWindowArgs{
		DayOfWeek: pulumi.String("string"),
		HourOfDay: pulumi.Int(0),
	},
	MetadataIntegration: &dataproc.MetastoreServiceMetadataIntegrationArgs{
		DataCatalogConfig: &dataproc.MetastoreServiceMetadataIntegrationDataCatalogConfigArgs{
			Enabled: pulumi.Bool(false),
		},
	},
	Network: pulumi.String("string"),
	NetworkConfig: &dataproc.MetastoreServiceNetworkConfigArgs{
		Consumers: dataproc.MetastoreServiceNetworkConfigConsumerArray{
			&dataproc.MetastoreServiceNetworkConfigConsumerArgs{
				Subnetwork:  pulumi.String("string"),
				EndpointUri: pulumi.String("string"),
			},
		},
		CustomRoutesEnabled: pulumi.Bool(false),
	},
	Port:           pulumi.Int(0),
	Project:        pulumi.String("string"),
	ReleaseChannel: pulumi.String("string"),
	ScalingConfig: &dataproc.MetastoreServiceScalingConfigArgs{
		AutoscalingConfig: &dataproc.MetastoreServiceScalingConfigAutoscalingConfigArgs{
			AutoscalingEnabled: pulumi.Bool(false),
			LimitConfig: &dataproc.MetastoreServiceScalingConfigAutoscalingConfigLimitConfigArgs{
				MaxScalingFactor: pulumi.Float64(0),
				MinScalingFactor: pulumi.Float64(0),
			},
		},
		InstanceSize:  pulumi.String("string"),
		ScalingFactor: pulumi.Float64(0),
	},
	ScheduledBackup: &dataproc.MetastoreServiceScheduledBackupArgs{
		BackupLocation: pulumi.String("string"),
		CronSchedule:   pulumi.String("string"),
		Enabled:        pulumi.Bool(false),
		TimeZone:       pulumi.String("string"),
	},
	ServiceId: pulumi.String("string"),
	TelemetryConfig: &dataproc.MetastoreServiceTelemetryConfigArgs{
		LogFormat: pulumi.String("string"),
	},
	Tier: pulumi.String("string"),
})
var metastoreServiceResource = new MetastoreService("metastoreServiceResource", MetastoreServiceArgs.builder()
    .databaseType("string")
    .deletionProtection(false)
    .encryptionConfig(MetastoreServiceEncryptionConfigArgs.builder()
        .kmsKey("string")
        .build())
    .hiveMetastoreConfig(MetastoreServiceHiveMetastoreConfigArgs.builder()
        .version("string")
        .auxiliaryVersions(MetastoreServiceHiveMetastoreConfigAuxiliaryVersionArgs.builder()
            .key("string")
            .version("string")
            .configOverrides(Map.of("string", "string"))
            .build())
        .configOverrides(Map.of("string", "string"))
        .endpointProtocol("string")
        .kerberosConfig(MetastoreServiceHiveMetastoreConfigKerberosConfigArgs.builder()
            .keytab(MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs.builder()
                .cloudSecret("string")
                .build())
            .krb5ConfigGcsUri("string")
            .principal("string")
            .build())
        .build())
    .labels(Map.of("string", "string"))
    .location("string")
    .maintenanceWindow(MetastoreServiceMaintenanceWindowArgs.builder()
        .dayOfWeek("string")
        .hourOfDay(0)
        .build())
    .metadataIntegration(MetastoreServiceMetadataIntegrationArgs.builder()
        .dataCatalogConfig(MetastoreServiceMetadataIntegrationDataCatalogConfigArgs.builder()
            .enabled(false)
            .build())
        .build())
    .network("string")
    .networkConfig(MetastoreServiceNetworkConfigArgs.builder()
        .consumers(MetastoreServiceNetworkConfigConsumerArgs.builder()
            .subnetwork("string")
            .endpointUri("string")
            .build())
        .customRoutesEnabled(false)
        .build())
    .port(0)
    .project("string")
    .releaseChannel("string")
    .scalingConfig(MetastoreServiceScalingConfigArgs.builder()
        .autoscalingConfig(MetastoreServiceScalingConfigAutoscalingConfigArgs.builder()
            .autoscalingEnabled(false)
            .limitConfig(MetastoreServiceScalingConfigAutoscalingConfigLimitConfigArgs.builder()
                .maxScalingFactor(0)
                .minScalingFactor(0)
                .build())
            .build())
        .instanceSize("string")
        .scalingFactor(0)
        .build())
    .scheduledBackup(MetastoreServiceScheduledBackupArgs.builder()
        .backupLocation("string")
        .cronSchedule("string")
        .enabled(false)
        .timeZone("string")
        .build())
    .serviceId("string")
    .telemetryConfig(MetastoreServiceTelemetryConfigArgs.builder()
        .logFormat("string")
        .build())
    .tier("string")
    .build());
metastore_service_resource = gcp.dataproc.MetastoreService("metastoreServiceResource",
    database_type="string",
    deletion_protection=False,
    encryption_config={
        "kms_key": "string",
    },
    hive_metastore_config={
        "version": "string",
        "auxiliary_versions": [{
            "key": "string",
            "version": "string",
            "config_overrides": {
                "string": "string",
            },
        }],
        "config_overrides": {
            "string": "string",
        },
        "endpoint_protocol": "string",
        "kerberos_config": {
            "keytab": {
                "cloud_secret": "string",
            },
            "krb5_config_gcs_uri": "string",
            "principal": "string",
        },
    },
    labels={
        "string": "string",
    },
    location="string",
    maintenance_window={
        "day_of_week": "string",
        "hour_of_day": 0,
    },
    metadata_integration={
        "data_catalog_config": {
            "enabled": False,
        },
    },
    network="string",
    network_config={
        "consumers": [{
            "subnetwork": "string",
            "endpoint_uri": "string",
        }],
        "custom_routes_enabled": False,
    },
    port=0,
    project="string",
    release_channel="string",
    scaling_config={
        "autoscaling_config": {
            "autoscaling_enabled": False,
            "limit_config": {
                "max_scaling_factor": 0,
                "min_scaling_factor": 0,
            },
        },
        "instance_size": "string",
        "scaling_factor": 0,
    },
    scheduled_backup={
        "backup_location": "string",
        "cron_schedule": "string",
        "enabled": False,
        "time_zone": "string",
    },
    service_id="string",
    telemetry_config={
        "log_format": "string",
    },
    tier="string")
const metastoreServiceResource = new gcp.dataproc.MetastoreService("metastoreServiceResource", {
    databaseType: "string",
    deletionProtection: false,
    encryptionConfig: {
        kmsKey: "string",
    },
    hiveMetastoreConfig: {
        version: "string",
        auxiliaryVersions: [{
            key: "string",
            version: "string",
            configOverrides: {
                string: "string",
            },
        }],
        configOverrides: {
            string: "string",
        },
        endpointProtocol: "string",
        kerberosConfig: {
            keytab: {
                cloudSecret: "string",
            },
            krb5ConfigGcsUri: "string",
            principal: "string",
        },
    },
    labels: {
        string: "string",
    },
    location: "string",
    maintenanceWindow: {
        dayOfWeek: "string",
        hourOfDay: 0,
    },
    metadataIntegration: {
        dataCatalogConfig: {
            enabled: false,
        },
    },
    network: "string",
    networkConfig: {
        consumers: [{
            subnetwork: "string",
            endpointUri: "string",
        }],
        customRoutesEnabled: false,
    },
    port: 0,
    project: "string",
    releaseChannel: "string",
    scalingConfig: {
        autoscalingConfig: {
            autoscalingEnabled: false,
            limitConfig: {
                maxScalingFactor: 0,
                minScalingFactor: 0,
            },
        },
        instanceSize: "string",
        scalingFactor: 0,
    },
    scheduledBackup: {
        backupLocation: "string",
        cronSchedule: "string",
        enabled: false,
        timeZone: "string",
    },
    serviceId: "string",
    telemetryConfig: {
        logFormat: "string",
    },
    tier: "string",
});
type: gcp:dataproc:MetastoreService
properties:
    databaseType: string
    deletionProtection: false
    encryptionConfig:
        kmsKey: string
    hiveMetastoreConfig:
        auxiliaryVersions:
            - configOverrides:
                string: string
              key: string
              version: string
        configOverrides:
            string: string
        endpointProtocol: string
        kerberosConfig:
            keytab:
                cloudSecret: string
            krb5ConfigGcsUri: string
            principal: string
        version: string
    labels:
        string: string
    location: string
    maintenanceWindow:
        dayOfWeek: string
        hourOfDay: 0
    metadataIntegration:
        dataCatalogConfig:
            enabled: false
    network: string
    networkConfig:
        consumers:
            - endpointUri: string
              subnetwork: string
        customRoutesEnabled: false
    port: 0
    project: string
    releaseChannel: string
    scalingConfig:
        autoscalingConfig:
            autoscalingEnabled: false
            limitConfig:
                maxScalingFactor: 0
                minScalingFactor: 0
        instanceSize: string
        scalingFactor: 0
    scheduledBackup:
        backupLocation: string
        cronSchedule: string
        enabled: false
        timeZone: string
    serviceId: string
    telemetryConfig:
        logFormat: string
    tier: string
MetastoreService Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The MetastoreService resource accepts the following input properties:
- DatabaseType string
- The database type that the Metastore service stores its data.
Default value is MYSQL. Possible values are:MYSQL,SPANNER.
- DeletionProtection bool
- Indicates if the dataproc metastore should be protected against accidental deletions.
- EncryptionConfig MetastoreService Encryption Config 
- Information used to configure the Dataproc Metastore service to encrypt customer data at rest. Structure is documented below.
- HiveMetastore MetastoreConfig Service Hive Metastore Config 
- Configuration information specific to running Hive metastore software as the metastore service. Structure is documented below.
- Labels Dictionary<string, string>
- User-defined labels for the metastore service.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field effective_labelsfor all of the labels present on the resource.
- Location string
- The location where the metastore service should reside.
The default value is global.
- MaintenanceWindow MetastoreService Maintenance Window 
- The one hour maintenance window of the metastore service.
This specifies when the service can be restarted for maintenance purposes in UTC time.
Maintenance window is not needed for services with the SPANNERdatabase type. Structure is documented below.
- MetadataIntegration MetastoreService Metadata Integration 
- The setting that defines how metastore metadata should be integrated with external services and systems. Structure is documented below.
- Network string
- The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: "projects/{projectNumber}/global/networks/{network_id}".
- NetworkConfig MetastoreService Network Config 
- The configuration specifying the network settings for the Dataproc Metastore service. Structure is documented below.
- Port int
- The TCP port at which the metastore service is reached. Default: 9083.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- ReleaseChannel string
- The release channel of the service. If unspecified, defaults to STABLE. Default value isSTABLE. Possible values are:CANARY,STABLE.
- ScalingConfig MetastoreService Scaling Config 
- Represents the scaling configuration of a metastore service. Structure is documented below.
- ScheduledBackup MetastoreService Scheduled Backup 
- The configuration of scheduled backup for the metastore service. Structure is documented below.
- ServiceId string
- The ID of the metastore service. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 63 characters.
- TelemetryConfig MetastoreService Telemetry Config 
- The configuration specifying telemetry settings for the Dataproc Metastore service. If unspecified defaults to JSON. Structure is documented below.
- Tier string
- The tier of the service.
Possible values are: DEVELOPER,ENTERPRISE.
- DatabaseType string
- The database type that the Metastore service stores its data.
Default value is MYSQL. Possible values are:MYSQL,SPANNER.
- DeletionProtection bool
- Indicates if the dataproc metastore should be protected against accidental deletions.
- EncryptionConfig MetastoreService Encryption Config Args 
- Information used to configure the Dataproc Metastore service to encrypt customer data at rest. Structure is documented below.
- HiveMetastore MetastoreConfig Service Hive Metastore Config Args 
- Configuration information specific to running Hive metastore software as the metastore service. Structure is documented below.
- Labels map[string]string
- User-defined labels for the metastore service.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field effective_labelsfor all of the labels present on the resource.
- Location string
- The location where the metastore service should reside.
The default value is global.
- MaintenanceWindow MetastoreService Maintenance Window Args 
- The one hour maintenance window of the metastore service.
This specifies when the service can be restarted for maintenance purposes in UTC time.
Maintenance window is not needed for services with the SPANNERdatabase type. Structure is documented below.
- MetadataIntegration MetastoreService Metadata Integration Args 
- The setting that defines how metastore metadata should be integrated with external services and systems. Structure is documented below.
- Network string
- The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: "projects/{projectNumber}/global/networks/{network_id}".
- NetworkConfig MetastoreService Network Config Args 
- The configuration specifying the network settings for the Dataproc Metastore service. Structure is documented below.
- Port int
- The TCP port at which the metastore service is reached. Default: 9083.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- ReleaseChannel string
- The release channel of the service. If unspecified, defaults to STABLE. Default value isSTABLE. Possible values are:CANARY,STABLE.
- ScalingConfig MetastoreService Scaling Config Args 
- Represents the scaling configuration of a metastore service. Structure is documented below.
- ScheduledBackup MetastoreService Scheduled Backup Args 
- The configuration of scheduled backup for the metastore service. Structure is documented below.
- ServiceId string
- The ID of the metastore service. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 63 characters.
- TelemetryConfig MetastoreService Telemetry Config Args 
- The configuration specifying telemetry settings for the Dataproc Metastore service. If unspecified defaults to JSON. Structure is documented below.
- Tier string
- The tier of the service.
Possible values are: DEVELOPER,ENTERPRISE.
- databaseType String
- The database type that the Metastore service stores its data.
Default value is MYSQL. Possible values are:MYSQL,SPANNER.
- deletionProtection Boolean
- Indicates if the dataproc metastore should be protected against accidental deletions.
- encryptionConfig MetastoreService Encryption Config 
- Information used to configure the Dataproc Metastore service to encrypt customer data at rest. Structure is documented below.
- hiveMetastore MetastoreConfig Service Hive Metastore Config 
- Configuration information specific to running Hive metastore software as the metastore service. Structure is documented below.
- labels Map<String,String>
- User-defined labels for the metastore service.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field effective_labelsfor all of the labels present on the resource.
- location String
- The location where the metastore service should reside.
The default value is global.
- maintenanceWindow MetastoreService Maintenance Window 
- The one hour maintenance window of the metastore service.
This specifies when the service can be restarted for maintenance purposes in UTC time.
Maintenance window is not needed for services with the SPANNERdatabase type. Structure is documented below.
- metadataIntegration MetastoreService Metadata Integration 
- The setting that defines how metastore metadata should be integrated with external services and systems. Structure is documented below.
- network String
- The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: "projects/{projectNumber}/global/networks/{network_id}".
- networkConfig MetastoreService Network Config 
- The configuration specifying the network settings for the Dataproc Metastore service. Structure is documented below.
- port Integer
- The TCP port at which the metastore service is reached. Default: 9083.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- releaseChannel String
- The release channel of the service. If unspecified, defaults to STABLE. Default value isSTABLE. Possible values are:CANARY,STABLE.
- scalingConfig MetastoreService Scaling Config 
- Represents the scaling configuration of a metastore service. Structure is documented below.
- scheduledBackup MetastoreService Scheduled Backup 
- The configuration of scheduled backup for the metastore service. Structure is documented below.
- serviceId String
- The ID of the metastore service. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 63 characters.
- telemetryConfig MetastoreService Telemetry Config 
- The configuration specifying telemetry settings for the Dataproc Metastore service. If unspecified defaults to JSON. Structure is documented below.
- tier String
- The tier of the service.
Possible values are: DEVELOPER,ENTERPRISE.
- databaseType string
- The database type that the Metastore service stores its data.
Default value is MYSQL. Possible values are:MYSQL,SPANNER.
- deletionProtection boolean
- Indicates if the dataproc metastore should be protected against accidental deletions.
- encryptionConfig MetastoreService Encryption Config 
- Information used to configure the Dataproc Metastore service to encrypt customer data at rest. Structure is documented below.
- hiveMetastore MetastoreConfig Service Hive Metastore Config 
- Configuration information specific to running Hive metastore software as the metastore service. Structure is documented below.
- labels {[key: string]: string}
- User-defined labels for the metastore service.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field effective_labelsfor all of the labels present on the resource.
- location string
- The location where the metastore service should reside.
The default value is global.
- maintenanceWindow MetastoreService Maintenance Window 
- The one hour maintenance window of the metastore service.
This specifies when the service can be restarted for maintenance purposes in UTC time.
Maintenance window is not needed for services with the SPANNERdatabase type. Structure is documented below.
- metadataIntegration MetastoreService Metadata Integration 
- The setting that defines how metastore metadata should be integrated with external services and systems. Structure is documented below.
- network string
- The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: "projects/{projectNumber}/global/networks/{network_id}".
- networkConfig MetastoreService Network Config 
- The configuration specifying the network settings for the Dataproc Metastore service. Structure is documented below.
- port number
- The TCP port at which the metastore service is reached. Default: 9083.
- project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- releaseChannel string
- The release channel of the service. If unspecified, defaults to STABLE. Default value isSTABLE. Possible values are:CANARY,STABLE.
- scalingConfig MetastoreService Scaling Config 
- Represents the scaling configuration of a metastore service. Structure is documented below.
- scheduledBackup MetastoreService Scheduled Backup 
- The configuration of scheduled backup for the metastore service. Structure is documented below.
- serviceId string
- The ID of the metastore service. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 63 characters.
- telemetryConfig MetastoreService Telemetry Config 
- The configuration specifying telemetry settings for the Dataproc Metastore service. If unspecified defaults to JSON. Structure is documented below.
- tier string
- The tier of the service.
Possible values are: DEVELOPER,ENTERPRISE.
- database_type str
- The database type that the Metastore service stores its data.
Default value is MYSQL. Possible values are:MYSQL,SPANNER.
- deletion_protection bool
- Indicates if the dataproc metastore should be protected against accidental deletions.
- encryption_config MetastoreService Encryption Config Args 
- Information used to configure the Dataproc Metastore service to encrypt customer data at rest. Structure is documented below.
- hive_metastore_ Metastoreconfig Service Hive Metastore Config Args 
- Configuration information specific to running Hive metastore software as the metastore service. Structure is documented below.
- labels Mapping[str, str]
- User-defined labels for the metastore service.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field effective_labelsfor all of the labels present on the resource.
- location str
- The location where the metastore service should reside.
The default value is global.
- maintenance_window MetastoreService Maintenance Window Args 
- The one hour maintenance window of the metastore service.
This specifies when the service can be restarted for maintenance purposes in UTC time.
Maintenance window is not needed for services with the SPANNERdatabase type. Structure is documented below.
- metadata_integration MetastoreService Metadata Integration Args 
- The setting that defines how metastore metadata should be integrated with external services and systems. Structure is documented below.
- network str
- The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: "projects/{projectNumber}/global/networks/{network_id}".
- network_config MetastoreService Network Config Args 
- The configuration specifying the network settings for the Dataproc Metastore service. Structure is documented below.
- port int
- The TCP port at which the metastore service is reached. Default: 9083.
- project str
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- release_channel str
- The release channel of the service. If unspecified, defaults to STABLE. Default value isSTABLE. Possible values are:CANARY,STABLE.
- scaling_config MetastoreService Scaling Config Args 
- Represents the scaling configuration of a metastore service. Structure is documented below.
- scheduled_backup MetastoreService Scheduled Backup Args 
- The configuration of scheduled backup for the metastore service. Structure is documented below.
- service_id str
- The ID of the metastore service. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 63 characters.
- telemetry_config MetastoreService Telemetry Config Args 
- The configuration specifying telemetry settings for the Dataproc Metastore service. If unspecified defaults to JSON. Structure is documented below.
- tier str
- The tier of the service.
Possible values are: DEVELOPER,ENTERPRISE.
- databaseType String
- The database type that the Metastore service stores its data.
Default value is MYSQL. Possible values are:MYSQL,SPANNER.
- deletionProtection Boolean
- Indicates if the dataproc metastore should be protected against accidental deletions.
- encryptionConfig Property Map
- Information used to configure the Dataproc Metastore service to encrypt customer data at rest. Structure is documented below.
- hiveMetastore Property MapConfig 
- Configuration information specific to running Hive metastore software as the metastore service. Structure is documented below.
- labels Map<String>
- User-defined labels for the metastore service.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field effective_labelsfor all of the labels present on the resource.
- location String
- The location where the metastore service should reside.
The default value is global.
- maintenanceWindow Property Map
- The one hour maintenance window of the metastore service.
This specifies when the service can be restarted for maintenance purposes in UTC time.
Maintenance window is not needed for services with the SPANNERdatabase type. Structure is documented below.
- metadataIntegration Property Map
- The setting that defines how metastore metadata should be integrated with external services and systems. Structure is documented below.
- network String
- The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: "projects/{projectNumber}/global/networks/{network_id}".
- networkConfig Property Map
- The configuration specifying the network settings for the Dataproc Metastore service. Structure is documented below.
- port Number
- The TCP port at which the metastore service is reached. Default: 9083.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- releaseChannel String
- The release channel of the service. If unspecified, defaults to STABLE. Default value isSTABLE. Possible values are:CANARY,STABLE.
- scalingConfig Property Map
- Represents the scaling configuration of a metastore service. Structure is documented below.
- scheduledBackup Property Map
- The configuration of scheduled backup for the metastore service. Structure is documented below.
- serviceId String
- The ID of the metastore service. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 63 characters.
- telemetryConfig Property Map
- The configuration specifying telemetry settings for the Dataproc Metastore service. If unspecified defaults to JSON. Structure is documented below.
- tier String
- The tier of the service.
Possible values are: DEVELOPER,ENTERPRISE.
Outputs
All input properties are implicitly available as output properties. Additionally, the MetastoreService resource produces the following output properties:
- ArtifactGcs stringUri 
- A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored.
- EffectiveLabels Dictionary<string, string>
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- EndpointUri string
- The URI of the endpoint used to access the metastore service.
- Id string
- The provider-assigned unique ID for this managed resource.
- Name string
- The relative resource name of the metastore service.
- PulumiLabels Dictionary<string, string>
- The combination of labels configured directly on the resource and default labels configured on the provider.
- State string
- The current state of the metastore service.
- StateMessage string
- Additional information about the current state of the metastore service, if available.
- Uid string
- The globally unique resource identifier of the metastore service.
- ArtifactGcs stringUri 
- A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored.
- EffectiveLabels map[string]string
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- EndpointUri string
- The URI of the endpoint used to access the metastore service.
- Id string
- The provider-assigned unique ID for this managed resource.
- Name string
- The relative resource name of the metastore service.
- PulumiLabels map[string]string
- The combination of labels configured directly on the resource and default labels configured on the provider.
- State string
- The current state of the metastore service.
- StateMessage string
- Additional information about the current state of the metastore service, if available.
- Uid string
- The globally unique resource identifier of the metastore service.
- artifactGcs StringUri 
- A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored.
- effectiveLabels Map<String,String>
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- endpointUri String
- The URI of the endpoint used to access the metastore service.
- id String
- The provider-assigned unique ID for this managed resource.
- name String
- The relative resource name of the metastore service.
- pulumiLabels Map<String,String>
- The combination of labels configured directly on the resource and default labels configured on the provider.
- state String
- The current state of the metastore service.
- stateMessage String
- Additional information about the current state of the metastore service, if available.
- uid String
- The globally unique resource identifier of the metastore service.
- artifactGcs stringUri 
- A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored.
- effectiveLabels {[key: string]: string}
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- endpointUri string
- The URI of the endpoint used to access the metastore service.
- id string
- The provider-assigned unique ID for this managed resource.
- name string
- The relative resource name of the metastore service.
- pulumiLabels {[key: string]: string}
- The combination of labels configured directly on the resource and default labels configured on the provider.
- state string
- The current state of the metastore service.
- stateMessage string
- Additional information about the current state of the metastore service, if available.
- uid string
- The globally unique resource identifier of the metastore service.
- artifact_gcs_ struri 
- A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored.
- effective_labels Mapping[str, str]
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- endpoint_uri str
- The URI of the endpoint used to access the metastore service.
- id str
- The provider-assigned unique ID for this managed resource.
- name str
- The relative resource name of the metastore service.
- pulumi_labels Mapping[str, str]
- The combination of labels configured directly on the resource and default labels configured on the provider.
- state str
- The current state of the metastore service.
- state_message str
- Additional information about the current state of the metastore service, if available.
- uid str
- The globally unique resource identifier of the metastore service.
- artifactGcs StringUri 
- A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored.
- effectiveLabels Map<String>
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- endpointUri String
- The URI of the endpoint used to access the metastore service.
- id String
- The provider-assigned unique ID for this managed resource.
- name String
- The relative resource name of the metastore service.
- pulumiLabels Map<String>
- The combination of labels configured directly on the resource and default labels configured on the provider.
- state String
- The current state of the metastore service.
- stateMessage String
- Additional information about the current state of the metastore service, if available.
- uid String
- The globally unique resource identifier of the metastore service.
Look up Existing MetastoreService Resource
Get an existing MetastoreService resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: MetastoreServiceState, opts?: CustomResourceOptions): MetastoreService@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        artifact_gcs_uri: Optional[str] = None,
        database_type: Optional[str] = None,
        deletion_protection: Optional[bool] = None,
        effective_labels: Optional[Mapping[str, str]] = None,
        encryption_config: Optional[MetastoreServiceEncryptionConfigArgs] = None,
        endpoint_uri: Optional[str] = None,
        hive_metastore_config: Optional[MetastoreServiceHiveMetastoreConfigArgs] = None,
        labels: Optional[Mapping[str, str]] = None,
        location: Optional[str] = None,
        maintenance_window: Optional[MetastoreServiceMaintenanceWindowArgs] = None,
        metadata_integration: Optional[MetastoreServiceMetadataIntegrationArgs] = None,
        name: Optional[str] = None,
        network: Optional[str] = None,
        network_config: Optional[MetastoreServiceNetworkConfigArgs] = None,
        port: Optional[int] = None,
        project: Optional[str] = None,
        pulumi_labels: Optional[Mapping[str, str]] = None,
        release_channel: Optional[str] = None,
        scaling_config: Optional[MetastoreServiceScalingConfigArgs] = None,
        scheduled_backup: Optional[MetastoreServiceScheduledBackupArgs] = None,
        service_id: Optional[str] = None,
        state: Optional[str] = None,
        state_message: Optional[str] = None,
        telemetry_config: Optional[MetastoreServiceTelemetryConfigArgs] = None,
        tier: Optional[str] = None,
        uid: Optional[str] = None) -> MetastoreServicefunc GetMetastoreService(ctx *Context, name string, id IDInput, state *MetastoreServiceState, opts ...ResourceOption) (*MetastoreService, error)public static MetastoreService Get(string name, Input<string> id, MetastoreServiceState? state, CustomResourceOptions? opts = null)public static MetastoreService get(String name, Output<String> id, MetastoreServiceState state, CustomResourceOptions options)resources:  _:    type: gcp:dataproc:MetastoreService    get:      id: ${id}- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- ArtifactGcs stringUri 
- A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored.
- DatabaseType string
- The database type that the Metastore service stores its data.
Default value is MYSQL. Possible values are:MYSQL,SPANNER.
- DeletionProtection bool
- Indicates if the dataproc metastore should be protected against accidental deletions.
- EffectiveLabels Dictionary<string, string>
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- EncryptionConfig MetastoreService Encryption Config 
- Information used to configure the Dataproc Metastore service to encrypt customer data at rest. Structure is documented below.
- EndpointUri string
- The URI of the endpoint used to access the metastore service.
- HiveMetastore MetastoreConfig Service Hive Metastore Config 
- Configuration information specific to running Hive metastore software as the metastore service. Structure is documented below.
- Labels Dictionary<string, string>
- User-defined labels for the metastore service.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field effective_labelsfor all of the labels present on the resource.
- Location string
- The location where the metastore service should reside.
The default value is global.
- MaintenanceWindow MetastoreService Maintenance Window 
- The one hour maintenance window of the metastore service.
This specifies when the service can be restarted for maintenance purposes in UTC time.
Maintenance window is not needed for services with the SPANNERdatabase type. Structure is documented below.
- MetadataIntegration MetastoreService Metadata Integration 
- The setting that defines how metastore metadata should be integrated with external services and systems. Structure is documented below.
- Name string
- The relative resource name of the metastore service.
- Network string
- The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: "projects/{projectNumber}/global/networks/{network_id}".
- NetworkConfig MetastoreService Network Config 
- The configuration specifying the network settings for the Dataproc Metastore service. Structure is documented below.
- Port int
- The TCP port at which the metastore service is reached. Default: 9083.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- PulumiLabels Dictionary<string, string>
- The combination of labels configured directly on the resource and default labels configured on the provider.
- ReleaseChannel string
- The release channel of the service. If unspecified, defaults to STABLE. Default value isSTABLE. Possible values are:CANARY,STABLE.
- ScalingConfig MetastoreService Scaling Config 
- Represents the scaling configuration of a metastore service. Structure is documented below.
- ScheduledBackup MetastoreService Scheduled Backup 
- The configuration of scheduled backup for the metastore service. Structure is documented below.
- ServiceId string
- The ID of the metastore service. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 63 characters.
- State string
- The current state of the metastore service.
- StateMessage string
- Additional information about the current state of the metastore service, if available.
- TelemetryConfig MetastoreService Telemetry Config 
- The configuration specifying telemetry settings for the Dataproc Metastore service. If unspecified defaults to JSON. Structure is documented below.
- Tier string
- The tier of the service.
Possible values are: DEVELOPER,ENTERPRISE.
- Uid string
- The globally unique resource identifier of the metastore service.
- ArtifactGcs stringUri 
- A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored.
- DatabaseType string
- The database type that the Metastore service stores its data.
Default value is MYSQL. Possible values are:MYSQL,SPANNER.
- DeletionProtection bool
- Indicates if the dataproc metastore should be protected against accidental deletions.
- EffectiveLabels map[string]string
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- EncryptionConfig MetastoreService Encryption Config Args 
- Information used to configure the Dataproc Metastore service to encrypt customer data at rest. Structure is documented below.
- EndpointUri string
- The URI of the endpoint used to access the metastore service.
- HiveMetastore MetastoreConfig Service Hive Metastore Config Args 
- Configuration information specific to running Hive metastore software as the metastore service. Structure is documented below.
- Labels map[string]string
- User-defined labels for the metastore service.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field effective_labelsfor all of the labels present on the resource.
- Location string
- The location where the metastore service should reside.
The default value is global.
- MaintenanceWindow MetastoreService Maintenance Window Args 
- The one hour maintenance window of the metastore service.
This specifies when the service can be restarted for maintenance purposes in UTC time.
Maintenance window is not needed for services with the SPANNERdatabase type. Structure is documented below.
- MetadataIntegration MetastoreService Metadata Integration Args 
- The setting that defines how metastore metadata should be integrated with external services and systems. Structure is documented below.
- Name string
- The relative resource name of the metastore service.
- Network string
- The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: "projects/{projectNumber}/global/networks/{network_id}".
- NetworkConfig MetastoreService Network Config Args 
- The configuration specifying the network settings for the Dataproc Metastore service. Structure is documented below.
- Port int
- The TCP port at which the metastore service is reached. Default: 9083.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- PulumiLabels map[string]string
- The combination of labels configured directly on the resource and default labels configured on the provider.
- ReleaseChannel string
- The release channel of the service. If unspecified, defaults to STABLE. Default value isSTABLE. Possible values are:CANARY,STABLE.
- ScalingConfig MetastoreService Scaling Config Args 
- Represents the scaling configuration of a metastore service. Structure is documented below.
- ScheduledBackup MetastoreService Scheduled Backup Args 
- The configuration of scheduled backup for the metastore service. Structure is documented below.
- ServiceId string
- The ID of the metastore service. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 63 characters.
- State string
- The current state of the metastore service.
- StateMessage string
- Additional information about the current state of the metastore service, if available.
- TelemetryConfig MetastoreService Telemetry Config Args 
- The configuration specifying telemetry settings for the Dataproc Metastore service. If unspecified defaults to JSON. Structure is documented below.
- Tier string
- The tier of the service.
Possible values are: DEVELOPER,ENTERPRISE.
- Uid string
- The globally unique resource identifier of the metastore service.
- artifactGcs StringUri 
- A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored.
- databaseType String
- The database type that the Metastore service stores its data.
Default value is MYSQL. Possible values are:MYSQL,SPANNER.
- deletionProtection Boolean
- Indicates if the dataproc metastore should be protected against accidental deletions.
- effectiveLabels Map<String,String>
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- encryptionConfig MetastoreService Encryption Config 
- Information used to configure the Dataproc Metastore service to encrypt customer data at rest. Structure is documented below.
- endpointUri String
- The URI of the endpoint used to access the metastore service.
- hiveMetastore MetastoreConfig Service Hive Metastore Config 
- Configuration information specific to running Hive metastore software as the metastore service. Structure is documented below.
- labels Map<String,String>
- User-defined labels for the metastore service.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field effective_labelsfor all of the labels present on the resource.
- location String
- The location where the metastore service should reside.
The default value is global.
- maintenanceWindow MetastoreService Maintenance Window 
- The one hour maintenance window of the metastore service.
This specifies when the service can be restarted for maintenance purposes in UTC time.
Maintenance window is not needed for services with the SPANNERdatabase type. Structure is documented below.
- metadataIntegration MetastoreService Metadata Integration 
- The setting that defines how metastore metadata should be integrated with external services and systems. Structure is documented below.
- name String
- The relative resource name of the metastore service.
- network String
- The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: "projects/{projectNumber}/global/networks/{network_id}".
- networkConfig MetastoreService Network Config 
- The configuration specifying the network settings for the Dataproc Metastore service. Structure is documented below.
- port Integer
- The TCP port at which the metastore service is reached. Default: 9083.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- pulumiLabels Map<String,String>
- The combination of labels configured directly on the resource and default labels configured on the provider.
- releaseChannel String
- The release channel of the service. If unspecified, defaults to STABLE. Default value isSTABLE. Possible values are:CANARY,STABLE.
- scalingConfig MetastoreService Scaling Config 
- Represents the scaling configuration of a metastore service. Structure is documented below.
- scheduledBackup MetastoreService Scheduled Backup 
- The configuration of scheduled backup for the metastore service. Structure is documented below.
- serviceId String
- The ID of the metastore service. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 63 characters.
- state String
- The current state of the metastore service.
- stateMessage String
- Additional information about the current state of the metastore service, if available.
- telemetryConfig MetastoreService Telemetry Config 
- The configuration specifying telemetry settings for the Dataproc Metastore service. If unspecified defaults to JSON. Structure is documented below.
- tier String
- The tier of the service.
Possible values are: DEVELOPER,ENTERPRISE.
- uid String
- The globally unique resource identifier of the metastore service.
- artifactGcs stringUri 
- A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored.
- databaseType string
- The database type that the Metastore service stores its data.
Default value is MYSQL. Possible values are:MYSQL,SPANNER.
- deletionProtection boolean
- Indicates if the dataproc metastore should be protected against accidental deletions.
- effectiveLabels {[key: string]: string}
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- encryptionConfig MetastoreService Encryption Config 
- Information used to configure the Dataproc Metastore service to encrypt customer data at rest. Structure is documented below.
- endpointUri string
- The URI of the endpoint used to access the metastore service.
- hiveMetastore MetastoreConfig Service Hive Metastore Config 
- Configuration information specific to running Hive metastore software as the metastore service. Structure is documented below.
- labels {[key: string]: string}
- User-defined labels for the metastore service.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field effective_labelsfor all of the labels present on the resource.
- location string
- The location where the metastore service should reside.
The default value is global.
- maintenanceWindow MetastoreService Maintenance Window 
- The one hour maintenance window of the metastore service.
This specifies when the service can be restarted for maintenance purposes in UTC time.
Maintenance window is not needed for services with the SPANNERdatabase type. Structure is documented below.
- metadataIntegration MetastoreService Metadata Integration 
- The setting that defines how metastore metadata should be integrated with external services and systems. Structure is documented below.
- name string
- The relative resource name of the metastore service.
- network string
- The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: "projects/{projectNumber}/global/networks/{network_id}".
- networkConfig MetastoreService Network Config 
- The configuration specifying the network settings for the Dataproc Metastore service. Structure is documented below.
- port number
- The TCP port at which the metastore service is reached. Default: 9083.
- project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- pulumiLabels {[key: string]: string}
- The combination of labels configured directly on the resource and default labels configured on the provider.
- releaseChannel string
- The release channel of the service. If unspecified, defaults to STABLE. Default value isSTABLE. Possible values are:CANARY,STABLE.
- scalingConfig MetastoreService Scaling Config 
- Represents the scaling configuration of a metastore service. Structure is documented below.
- scheduledBackup MetastoreService Scheduled Backup 
- The configuration of scheduled backup for the metastore service. Structure is documented below.
- serviceId string
- The ID of the metastore service. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 63 characters.
- state string
- The current state of the metastore service.
- stateMessage string
- Additional information about the current state of the metastore service, if available.
- telemetryConfig MetastoreService Telemetry Config 
- The configuration specifying telemetry settings for the Dataproc Metastore service. If unspecified defaults to JSON. Structure is documented below.
- tier string
- The tier of the service.
Possible values are: DEVELOPER,ENTERPRISE.
- uid string
- The globally unique resource identifier of the metastore service.
- artifact_gcs_ struri 
- A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored.
- database_type str
- The database type that the Metastore service stores its data.
Default value is MYSQL. Possible values are:MYSQL,SPANNER.
- deletion_protection bool
- Indicates if the dataproc metastore should be protected against accidental deletions.
- effective_labels Mapping[str, str]
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- encryption_config MetastoreService Encryption Config Args 
- Information used to configure the Dataproc Metastore service to encrypt customer data at rest. Structure is documented below.
- endpoint_uri str
- The URI of the endpoint used to access the metastore service.
- hive_metastore_ Metastoreconfig Service Hive Metastore Config Args 
- Configuration information specific to running Hive metastore software as the metastore service. Structure is documented below.
- labels Mapping[str, str]
- User-defined labels for the metastore service.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field effective_labelsfor all of the labels present on the resource.
- location str
- The location where the metastore service should reside.
The default value is global.
- maintenance_window MetastoreService Maintenance Window Args 
- The one hour maintenance window of the metastore service.
This specifies when the service can be restarted for maintenance purposes in UTC time.
Maintenance window is not needed for services with the SPANNERdatabase type. Structure is documented below.
- metadata_integration MetastoreService Metadata Integration Args 
- The setting that defines how metastore metadata should be integrated with external services and systems. Structure is documented below.
- name str
- The relative resource name of the metastore service.
- network str
- The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: "projects/{projectNumber}/global/networks/{network_id}".
- network_config MetastoreService Network Config Args 
- The configuration specifying the network settings for the Dataproc Metastore service. Structure is documented below.
- port int
- The TCP port at which the metastore service is reached. Default: 9083.
- project str
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- pulumi_labels Mapping[str, str]
- The combination of labels configured directly on the resource and default labels configured on the provider.
- release_channel str
- The release channel of the service. If unspecified, defaults to STABLE. Default value isSTABLE. Possible values are:CANARY,STABLE.
- scaling_config MetastoreService Scaling Config Args 
- Represents the scaling configuration of a metastore service. Structure is documented below.
- scheduled_backup MetastoreService Scheduled Backup Args 
- The configuration of scheduled backup for the metastore service. Structure is documented below.
- service_id str
- The ID of the metastore service. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 63 characters.
- state str
- The current state of the metastore service.
- state_message str
- Additional information about the current state of the metastore service, if available.
- telemetry_config MetastoreService Telemetry Config Args 
- The configuration specifying telemetry settings for the Dataproc Metastore service. If unspecified defaults to JSON. Structure is documented below.
- tier str
- The tier of the service.
Possible values are: DEVELOPER,ENTERPRISE.
- uid str
- The globally unique resource identifier of the metastore service.
- artifactGcs StringUri 
- A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored.
- databaseType String
- The database type that the Metastore service stores its data.
Default value is MYSQL. Possible values are:MYSQL,SPANNER.
- deletionProtection Boolean
- Indicates if the dataproc metastore should be protected against accidental deletions.
- effectiveLabels Map<String>
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- encryptionConfig Property Map
- Information used to configure the Dataproc Metastore service to encrypt customer data at rest. Structure is documented below.
- endpointUri String
- The URI of the endpoint used to access the metastore service.
- hiveMetastore Property MapConfig 
- Configuration information specific to running Hive metastore software as the metastore service. Structure is documented below.
- labels Map<String>
- User-defined labels for the metastore service.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field effective_labelsfor all of the labels present on the resource.
- location String
- The location where the metastore service should reside.
The default value is global.
- maintenanceWindow Property Map
- The one hour maintenance window of the metastore service.
This specifies when the service can be restarted for maintenance purposes in UTC time.
Maintenance window is not needed for services with the SPANNERdatabase type. Structure is documented below.
- metadataIntegration Property Map
- The setting that defines how metastore metadata should be integrated with external services and systems. Structure is documented below.
- name String
- The relative resource name of the metastore service.
- network String
- The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: "projects/{projectNumber}/global/networks/{network_id}".
- networkConfig Property Map
- The configuration specifying the network settings for the Dataproc Metastore service. Structure is documented below.
- port Number
- The TCP port at which the metastore service is reached. Default: 9083.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- pulumiLabels Map<String>
- The combination of labels configured directly on the resource and default labels configured on the provider.
- releaseChannel String
- The release channel of the service. If unspecified, defaults to STABLE. Default value isSTABLE. Possible values are:CANARY,STABLE.
- scalingConfig Property Map
- Represents the scaling configuration of a metastore service. Structure is documented below.
- scheduledBackup Property Map
- The configuration of scheduled backup for the metastore service. Structure is documented below.
- serviceId String
- The ID of the metastore service. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 63 characters.
- state String
- The current state of the metastore service.
- stateMessage String
- Additional information about the current state of the metastore service, if available.
- telemetryConfig Property Map
- The configuration specifying telemetry settings for the Dataproc Metastore service. If unspecified defaults to JSON. Structure is documented below.
- tier String
- The tier of the service.
Possible values are: DEVELOPER,ENTERPRISE.
- uid String
- The globally unique resource identifier of the metastore service.
Supporting Types
MetastoreServiceEncryptionConfig, MetastoreServiceEncryptionConfigArgs        
- KmsKey string
- The fully qualified customer provided Cloud KMS key name to use for customer data encryption.
Use the following format: projects/([^/]+)/locations/([^/]+)/keyRings/([^/]+)/cryptoKeys/([^/]+)
- KmsKey string
- The fully qualified customer provided Cloud KMS key name to use for customer data encryption.
Use the following format: projects/([^/]+)/locations/([^/]+)/keyRings/([^/]+)/cryptoKeys/([^/]+)
- kmsKey String
- The fully qualified customer provided Cloud KMS key name to use for customer data encryption.
Use the following format: projects/([^/]+)/locations/([^/]+)/keyRings/([^/]+)/cryptoKeys/([^/]+)
- kmsKey string
- The fully qualified customer provided Cloud KMS key name to use for customer data encryption.
Use the following format: projects/([^/]+)/locations/([^/]+)/keyRings/([^/]+)/cryptoKeys/([^/]+)
- kms_key str
- The fully qualified customer provided Cloud KMS key name to use for customer data encryption.
Use the following format: projects/([^/]+)/locations/([^/]+)/keyRings/([^/]+)/cryptoKeys/([^/]+)
- kmsKey String
- The fully qualified customer provided Cloud KMS key name to use for customer data encryption.
Use the following format: projects/([^/]+)/locations/([^/]+)/keyRings/([^/]+)/cryptoKeys/([^/]+)
MetastoreServiceHiveMetastoreConfig, MetastoreServiceHiveMetastoreConfigArgs          
- Version string
- The Hive metastore schema version.
- AuxiliaryVersions List<MetastoreService Hive Metastore Config Auxiliary Version> 
- A mapping of Hive metastore version to the auxiliary version configuration. When specified, a secondary Hive metastore service is created along with the primary service. All auxiliary versions must be less than the service's primary version. The key is the auxiliary service name and it must match the regular expression a-z?. This means that the first character must be a lowercase letter, and all the following characters must be hyphens, lowercase letters, or digits, except the last character, which cannot be a hyphen. Structure is documented below.
- ConfigOverrides Dictionary<string, string>
- A mapping of Hive metastore configuration key-value pairs to apply to the Hive metastore (configured in hive-site.xml). The mappings override system defaults (some keys cannot be overridden)
- EndpointProtocol string
- The protocol to use for the metastore service endpoint. If unspecified, defaults to THRIFT. Default value isTHRIFT. Possible values are:THRIFT,GRPC.
- KerberosConfig MetastoreService Hive Metastore Config Kerberos Config 
- Information used to configure the Hive metastore service as a service principal in a Kerberos realm. Structure is documented below.
- Version string
- The Hive metastore schema version.
- AuxiliaryVersions []MetastoreService Hive Metastore Config Auxiliary Version 
- A mapping of Hive metastore version to the auxiliary version configuration. When specified, a secondary Hive metastore service is created along with the primary service. All auxiliary versions must be less than the service's primary version. The key is the auxiliary service name and it must match the regular expression a-z?. This means that the first character must be a lowercase letter, and all the following characters must be hyphens, lowercase letters, or digits, except the last character, which cannot be a hyphen. Structure is documented below.
- ConfigOverrides map[string]string
- A mapping of Hive metastore configuration key-value pairs to apply to the Hive metastore (configured in hive-site.xml). The mappings override system defaults (some keys cannot be overridden)
- EndpointProtocol string
- The protocol to use for the metastore service endpoint. If unspecified, defaults to THRIFT. Default value isTHRIFT. Possible values are:THRIFT,GRPC.
- KerberosConfig MetastoreService Hive Metastore Config Kerberos Config 
- Information used to configure the Hive metastore service as a service principal in a Kerberos realm. Structure is documented below.
- version String
- The Hive metastore schema version.
- auxiliaryVersions List<MetastoreService Hive Metastore Config Auxiliary Version> 
- A mapping of Hive metastore version to the auxiliary version configuration. When specified, a secondary Hive metastore service is created along with the primary service. All auxiliary versions must be less than the service's primary version. The key is the auxiliary service name and it must match the regular expression a-z?. This means that the first character must be a lowercase letter, and all the following characters must be hyphens, lowercase letters, or digits, except the last character, which cannot be a hyphen. Structure is documented below.
- configOverrides Map<String,String>
- A mapping of Hive metastore configuration key-value pairs to apply to the Hive metastore (configured in hive-site.xml). The mappings override system defaults (some keys cannot be overridden)
- endpointProtocol String
- The protocol to use for the metastore service endpoint. If unspecified, defaults to THRIFT. Default value isTHRIFT. Possible values are:THRIFT,GRPC.
- kerberosConfig MetastoreService Hive Metastore Config Kerberos Config 
- Information used to configure the Hive metastore service as a service principal in a Kerberos realm. Structure is documented below.
- version string
- The Hive metastore schema version.
- auxiliaryVersions MetastoreService Hive Metastore Config Auxiliary Version[] 
- A mapping of Hive metastore version to the auxiliary version configuration. When specified, a secondary Hive metastore service is created along with the primary service. All auxiliary versions must be less than the service's primary version. The key is the auxiliary service name and it must match the regular expression a-z?. This means that the first character must be a lowercase letter, and all the following characters must be hyphens, lowercase letters, or digits, except the last character, which cannot be a hyphen. Structure is documented below.
- configOverrides {[key: string]: string}
- A mapping of Hive metastore configuration key-value pairs to apply to the Hive metastore (configured in hive-site.xml). The mappings override system defaults (some keys cannot be overridden)
- endpointProtocol string
- The protocol to use for the metastore service endpoint. If unspecified, defaults to THRIFT. Default value isTHRIFT. Possible values are:THRIFT,GRPC.
- kerberosConfig MetastoreService Hive Metastore Config Kerberos Config 
- Information used to configure the Hive metastore service as a service principal in a Kerberos realm. Structure is documented below.
- version str
- The Hive metastore schema version.
- auxiliary_versions Sequence[MetastoreService Hive Metastore Config Auxiliary Version] 
- A mapping of Hive metastore version to the auxiliary version configuration. When specified, a secondary Hive metastore service is created along with the primary service. All auxiliary versions must be less than the service's primary version. The key is the auxiliary service name and it must match the regular expression a-z?. This means that the first character must be a lowercase letter, and all the following characters must be hyphens, lowercase letters, or digits, except the last character, which cannot be a hyphen. Structure is documented below.
- config_overrides Mapping[str, str]
- A mapping of Hive metastore configuration key-value pairs to apply to the Hive metastore (configured in hive-site.xml). The mappings override system defaults (some keys cannot be overridden)
- endpoint_protocol str
- The protocol to use for the metastore service endpoint. If unspecified, defaults to THRIFT. Default value isTHRIFT. Possible values are:THRIFT,GRPC.
- kerberos_config MetastoreService Hive Metastore Config Kerberos Config 
- Information used to configure the Hive metastore service as a service principal in a Kerberos realm. Structure is documented below.
- version String
- The Hive metastore schema version.
- auxiliaryVersions List<Property Map>
- A mapping of Hive metastore version to the auxiliary version configuration. When specified, a secondary Hive metastore service is created along with the primary service. All auxiliary versions must be less than the service's primary version. The key is the auxiliary service name and it must match the regular expression a-z?. This means that the first character must be a lowercase letter, and all the following characters must be hyphens, lowercase letters, or digits, except the last character, which cannot be a hyphen. Structure is documented below.
- configOverrides Map<String>
- A mapping of Hive metastore configuration key-value pairs to apply to the Hive metastore (configured in hive-site.xml). The mappings override system defaults (some keys cannot be overridden)
- endpointProtocol String
- The protocol to use for the metastore service endpoint. If unspecified, defaults to THRIFT. Default value isTHRIFT. Possible values are:THRIFT,GRPC.
- kerberosConfig Property Map
- Information used to configure the Hive metastore service as a service principal in a Kerberos realm. Structure is documented below.
MetastoreServiceHiveMetastoreConfigAuxiliaryVersion, MetastoreServiceHiveMetastoreConfigAuxiliaryVersionArgs              
- Key string
- The identifier for this object. Format specified above.
- Version string
- The Hive metastore version of the auxiliary service. It must be less than the primary Hive metastore service's version.
- ConfigOverrides Dictionary<string, string>
- A mapping of Hive metastore configuration key-value pairs to apply to the auxiliary Hive metastore (configured in hive-site.xml) in addition to the primary version's overrides. If keys are present in both the auxiliary version's overrides and the primary version's overrides, the value from the auxiliary version's overrides takes precedence.
- Key string
- The identifier for this object. Format specified above.
- Version string
- The Hive metastore version of the auxiliary service. It must be less than the primary Hive metastore service's version.
- ConfigOverrides map[string]string
- A mapping of Hive metastore configuration key-value pairs to apply to the auxiliary Hive metastore (configured in hive-site.xml) in addition to the primary version's overrides. If keys are present in both the auxiliary version's overrides and the primary version's overrides, the value from the auxiliary version's overrides takes precedence.
- key String
- The identifier for this object. Format specified above.
- version String
- The Hive metastore version of the auxiliary service. It must be less than the primary Hive metastore service's version.
- configOverrides Map<String,String>
- A mapping of Hive metastore configuration key-value pairs to apply to the auxiliary Hive metastore (configured in hive-site.xml) in addition to the primary version's overrides. If keys are present in both the auxiliary version's overrides and the primary version's overrides, the value from the auxiliary version's overrides takes precedence.
- key string
- The identifier for this object. Format specified above.
- version string
- The Hive metastore version of the auxiliary service. It must be less than the primary Hive metastore service's version.
- configOverrides {[key: string]: string}
- A mapping of Hive metastore configuration key-value pairs to apply to the auxiliary Hive metastore (configured in hive-site.xml) in addition to the primary version's overrides. If keys are present in both the auxiliary version's overrides and the primary version's overrides, the value from the auxiliary version's overrides takes precedence.
- key str
- The identifier for this object. Format specified above.
- version str
- The Hive metastore version of the auxiliary service. It must be less than the primary Hive metastore service's version.
- config_overrides Mapping[str, str]
- A mapping of Hive metastore configuration key-value pairs to apply to the auxiliary Hive metastore (configured in hive-site.xml) in addition to the primary version's overrides. If keys are present in both the auxiliary version's overrides and the primary version's overrides, the value from the auxiliary version's overrides takes precedence.
- key String
- The identifier for this object. Format specified above.
- version String
- The Hive metastore version of the auxiliary service. It must be less than the primary Hive metastore service's version.
- configOverrides Map<String>
- A mapping of Hive metastore configuration key-value pairs to apply to the auxiliary Hive metastore (configured in hive-site.xml) in addition to the primary version's overrides. If keys are present in both the auxiliary version's overrides and the primary version's overrides, the value from the auxiliary version's overrides takes precedence.
MetastoreServiceHiveMetastoreConfigKerberosConfig, MetastoreServiceHiveMetastoreConfigKerberosConfigArgs              
- Keytab
MetastoreService Hive Metastore Config Kerberos Config Keytab 
- A Kerberos keytab file that can be used to authenticate a service principal with a Kerberos Key Distribution Center (KDC). Structure is documented below.
- Krb5ConfigGcs stringUri 
- A Cloud Storage URI that specifies the path to a krb5.conf file. It is of the form gs://{bucket_name}/path/to/krb5.conf, although the file does not need to be named krb5.conf explicitly.
- Principal string
- A Kerberos principal that exists in the both the keytab the KDC to authenticate as. A typical principal is of the form "primary/instance@REALM", but there is no exact format.
- Keytab
MetastoreService Hive Metastore Config Kerberos Config Keytab 
- A Kerberos keytab file that can be used to authenticate a service principal with a Kerberos Key Distribution Center (KDC). Structure is documented below.
- Krb5ConfigGcs stringUri 
- A Cloud Storage URI that specifies the path to a krb5.conf file. It is of the form gs://{bucket_name}/path/to/krb5.conf, although the file does not need to be named krb5.conf explicitly.
- Principal string
- A Kerberos principal that exists in the both the keytab the KDC to authenticate as. A typical principal is of the form "primary/instance@REALM", but there is no exact format.
- keytab
MetastoreService Hive Metastore Config Kerberos Config Keytab 
- A Kerberos keytab file that can be used to authenticate a service principal with a Kerberos Key Distribution Center (KDC). Structure is documented below.
- krb5ConfigGcs StringUri 
- A Cloud Storage URI that specifies the path to a krb5.conf file. It is of the form gs://{bucket_name}/path/to/krb5.conf, although the file does not need to be named krb5.conf explicitly.
- principal String
- A Kerberos principal that exists in the both the keytab the KDC to authenticate as. A typical principal is of the form "primary/instance@REALM", but there is no exact format.
- keytab
MetastoreService Hive Metastore Config Kerberos Config Keytab 
- A Kerberos keytab file that can be used to authenticate a service principal with a Kerberos Key Distribution Center (KDC). Structure is documented below.
- krb5ConfigGcs stringUri 
- A Cloud Storage URI that specifies the path to a krb5.conf file. It is of the form gs://{bucket_name}/path/to/krb5.conf, although the file does not need to be named krb5.conf explicitly.
- principal string
- A Kerberos principal that exists in the both the keytab the KDC to authenticate as. A typical principal is of the form "primary/instance@REALM", but there is no exact format.
- keytab
MetastoreService Hive Metastore Config Kerberos Config Keytab 
- A Kerberos keytab file that can be used to authenticate a service principal with a Kerberos Key Distribution Center (KDC). Structure is documented below.
- krb5_config_ strgcs_ uri 
- A Cloud Storage URI that specifies the path to a krb5.conf file. It is of the form gs://{bucket_name}/path/to/krb5.conf, although the file does not need to be named krb5.conf explicitly.
- principal str
- A Kerberos principal that exists in the both the keytab the KDC to authenticate as. A typical principal is of the form "primary/instance@REALM", but there is no exact format.
- keytab Property Map
- A Kerberos keytab file that can be used to authenticate a service principal with a Kerberos Key Distribution Center (KDC). Structure is documented below.
- krb5ConfigGcs StringUri 
- A Cloud Storage URI that specifies the path to a krb5.conf file. It is of the form gs://{bucket_name}/path/to/krb5.conf, although the file does not need to be named krb5.conf explicitly.
- principal String
- A Kerberos principal that exists in the both the keytab the KDC to authenticate as. A typical principal is of the form "primary/instance@REALM", but there is no exact format.
MetastoreServiceHiveMetastoreConfigKerberosConfigKeytab, MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs                
- CloudSecret string
- The relative resource name of a Secret Manager secret version, in the following form: "projects/{projectNumber}/secrets/{secret_id}/versions/{version_id}".
- CloudSecret string
- The relative resource name of a Secret Manager secret version, in the following form: "projects/{projectNumber}/secrets/{secret_id}/versions/{version_id}".
- cloudSecret String
- The relative resource name of a Secret Manager secret version, in the following form: "projects/{projectNumber}/secrets/{secret_id}/versions/{version_id}".
- cloudSecret string
- The relative resource name of a Secret Manager secret version, in the following form: "projects/{projectNumber}/secrets/{secret_id}/versions/{version_id}".
- cloud_secret str
- The relative resource name of a Secret Manager secret version, in the following form: "projects/{projectNumber}/secrets/{secret_id}/versions/{version_id}".
- cloudSecret String
- The relative resource name of a Secret Manager secret version, in the following form: "projects/{projectNumber}/secrets/{secret_id}/versions/{version_id}".
MetastoreServiceMaintenanceWindow, MetastoreServiceMaintenanceWindowArgs        
- day_of_ strweek 
- The day of week, when the window starts.
Possible values are: MONDAY,TUESDAY,WEDNESDAY,THURSDAY,FRIDAY,SATURDAY,SUNDAY.
- hour_of_ intday 
- The hour of day (0-23) when the window starts.
MetastoreServiceMetadataIntegration, MetastoreServiceMetadataIntegrationArgs        
- DataCatalog MetastoreConfig Service Metadata Integration Data Catalog Config 
- The integration config for the Data Catalog service. Structure is documented below.
- DataCatalog MetastoreConfig Service Metadata Integration Data Catalog Config 
- The integration config for the Data Catalog service. Structure is documented below.
- dataCatalog MetastoreConfig Service Metadata Integration Data Catalog Config 
- The integration config for the Data Catalog service. Structure is documented below.
- dataCatalog MetastoreConfig Service Metadata Integration Data Catalog Config 
- The integration config for the Data Catalog service. Structure is documented below.
- data_catalog_ Metastoreconfig Service Metadata Integration Data Catalog Config 
- The integration config for the Data Catalog service. Structure is documented below.
- dataCatalog Property MapConfig 
- The integration config for the Data Catalog service. Structure is documented below.
MetastoreServiceMetadataIntegrationDataCatalogConfig, MetastoreServiceMetadataIntegrationDataCatalogConfigArgs              
- Enabled bool
- Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog.
- Enabled bool
- Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog.
- enabled Boolean
- Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog.
- enabled boolean
- Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog.
- enabled bool
- Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog.
- enabled Boolean
- Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog.
MetastoreServiceNetworkConfig, MetastoreServiceNetworkConfigArgs        
- Consumers
List<MetastoreService Network Config Consumer> 
- The consumer-side network configuration for the Dataproc Metastore instance. Structure is documented below.
- CustomRoutes boolEnabled 
- Enables custom routes to be imported and exported for the Dataproc Metastore service's peered VPC network.
- Consumers
[]MetastoreService Network Config Consumer 
- The consumer-side network configuration for the Dataproc Metastore instance. Structure is documented below.
- CustomRoutes boolEnabled 
- Enables custom routes to be imported and exported for the Dataproc Metastore service's peered VPC network.
- consumers
List<MetastoreService Network Config Consumer> 
- The consumer-side network configuration for the Dataproc Metastore instance. Structure is documented below.
- customRoutes BooleanEnabled 
- Enables custom routes to be imported and exported for the Dataproc Metastore service's peered VPC network.
- consumers
MetastoreService Network Config Consumer[] 
- The consumer-side network configuration for the Dataproc Metastore instance. Structure is documented below.
- customRoutes booleanEnabled 
- Enables custom routes to be imported and exported for the Dataproc Metastore service's peered VPC network.
- consumers
Sequence[MetastoreService Network Config Consumer] 
- The consumer-side network configuration for the Dataproc Metastore instance. Structure is documented below.
- custom_routes_ boolenabled 
- Enables custom routes to be imported and exported for the Dataproc Metastore service's peered VPC network.
- consumers List<Property Map>
- The consumer-side network configuration for the Dataproc Metastore instance. Structure is documented below.
- customRoutes BooleanEnabled 
- Enables custom routes to be imported and exported for the Dataproc Metastore service's peered VPC network.
MetastoreServiceNetworkConfigConsumer, MetastoreServiceNetworkConfigConsumerArgs          
- Subnetwork string
- The subnetwork of the customer project from which an IP address is reserved and used as the Dataproc Metastore service's endpoint. It is accessible to hosts in the subnet and to all hosts in a subnet in the same region and same network. There must be at least one IP address available in the subnet's primary range. The subnet is specified in the following form: `projects/{projectNumber}/regions/{region_id}/subnetworks/{subnetwork_id}
- EndpointUri string
- (Output) The URI of the endpoint used to access the metastore service.
- Subnetwork string
- The subnetwork of the customer project from which an IP address is reserved and used as the Dataproc Metastore service's endpoint. It is accessible to hosts in the subnet and to all hosts in a subnet in the same region and same network. There must be at least one IP address available in the subnet's primary range. The subnet is specified in the following form: `projects/{projectNumber}/regions/{region_id}/subnetworks/{subnetwork_id}
- EndpointUri string
- (Output) The URI of the endpoint used to access the metastore service.
- subnetwork String
- The subnetwork of the customer project from which an IP address is reserved and used as the Dataproc Metastore service's endpoint. It is accessible to hosts in the subnet and to all hosts in a subnet in the same region and same network. There must be at least one IP address available in the subnet's primary range. The subnet is specified in the following form: `projects/{projectNumber}/regions/{region_id}/subnetworks/{subnetwork_id}
- endpointUri String
- (Output) The URI of the endpoint used to access the metastore service.
- subnetwork string
- The subnetwork of the customer project from which an IP address is reserved and used as the Dataproc Metastore service's endpoint. It is accessible to hosts in the subnet and to all hosts in a subnet in the same region and same network. There must be at least one IP address available in the subnet's primary range. The subnet is specified in the following form: `projects/{projectNumber}/regions/{region_id}/subnetworks/{subnetwork_id}
- endpointUri string
- (Output) The URI of the endpoint used to access the metastore service.
- subnetwork str
- The subnetwork of the customer project from which an IP address is reserved and used as the Dataproc Metastore service's endpoint. It is accessible to hosts in the subnet and to all hosts in a subnet in the same region and same network. There must be at least one IP address available in the subnet's primary range. The subnet is specified in the following form: `projects/{projectNumber}/regions/{region_id}/subnetworks/{subnetwork_id}
- endpoint_uri str
- (Output) The URI of the endpoint used to access the metastore service.
- subnetwork String
- The subnetwork of the customer project from which an IP address is reserved and used as the Dataproc Metastore service's endpoint. It is accessible to hosts in the subnet and to all hosts in a subnet in the same region and same network. There must be at least one IP address available in the subnet's primary range. The subnet is specified in the following form: `projects/{projectNumber}/regions/{region_id}/subnetworks/{subnetwork_id}
- endpointUri String
- (Output) The URI of the endpoint used to access the metastore service.
MetastoreServiceScalingConfig, MetastoreServiceScalingConfigArgs        
- AutoscalingConfig MetastoreService Scaling Config Autoscaling Config 
- Represents the autoscaling configuration of a metastore service. Structure is documented below.
- InstanceSize string
- Metastore instance sizes.
Possible values are: EXTRA_SMALL,SMALL,MEDIUM,LARGE,EXTRA_LARGE.
- ScalingFactor double
- Scaling factor, in increments of 0.1 for values less than 1.0, and increments of 1.0 for values greater than 1.0.
- AutoscalingConfig MetastoreService Scaling Config Autoscaling Config 
- Represents the autoscaling configuration of a metastore service. Structure is documented below.
- InstanceSize string
- Metastore instance sizes.
Possible values are: EXTRA_SMALL,SMALL,MEDIUM,LARGE,EXTRA_LARGE.
- ScalingFactor float64
- Scaling factor, in increments of 0.1 for values less than 1.0, and increments of 1.0 for values greater than 1.0.
- autoscalingConfig MetastoreService Scaling Config Autoscaling Config 
- Represents the autoscaling configuration of a metastore service. Structure is documented below.
- instanceSize String
- Metastore instance sizes.
Possible values are: EXTRA_SMALL,SMALL,MEDIUM,LARGE,EXTRA_LARGE.
- scalingFactor Double
- Scaling factor, in increments of 0.1 for values less than 1.0, and increments of 1.0 for values greater than 1.0.
- autoscalingConfig MetastoreService Scaling Config Autoscaling Config 
- Represents the autoscaling configuration of a metastore service. Structure is documented below.
- instanceSize string
- Metastore instance sizes.
Possible values are: EXTRA_SMALL,SMALL,MEDIUM,LARGE,EXTRA_LARGE.
- scalingFactor number
- Scaling factor, in increments of 0.1 for values less than 1.0, and increments of 1.0 for values greater than 1.0.
- autoscaling_config MetastoreService Scaling Config Autoscaling Config 
- Represents the autoscaling configuration of a metastore service. Structure is documented below.
- instance_size str
- Metastore instance sizes.
Possible values are: EXTRA_SMALL,SMALL,MEDIUM,LARGE,EXTRA_LARGE.
- scaling_factor float
- Scaling factor, in increments of 0.1 for values less than 1.0, and increments of 1.0 for values greater than 1.0.
- autoscalingConfig Property Map
- Represents the autoscaling configuration of a metastore service. Structure is documented below.
- instanceSize String
- Metastore instance sizes.
Possible values are: EXTRA_SMALL,SMALL,MEDIUM,LARGE,EXTRA_LARGE.
- scalingFactor Number
- Scaling factor, in increments of 0.1 for values less than 1.0, and increments of 1.0 for values greater than 1.0.
MetastoreServiceScalingConfigAutoscalingConfig, MetastoreServiceScalingConfigAutoscalingConfigArgs            
- AutoscalingEnabled bool
- Defines whether autoscaling is enabled. The default value is false.
- LimitConfig MetastoreService Scaling Config Autoscaling Config Limit Config 
- Represents the limit configuration of a metastore service. Structure is documented below.
- AutoscalingEnabled bool
- Defines whether autoscaling is enabled. The default value is false.
- LimitConfig MetastoreService Scaling Config Autoscaling Config Limit Config 
- Represents the limit configuration of a metastore service. Structure is documented below.
- autoscalingEnabled Boolean
- Defines whether autoscaling is enabled. The default value is false.
- limitConfig MetastoreService Scaling Config Autoscaling Config Limit Config 
- Represents the limit configuration of a metastore service. Structure is documented below.
- autoscalingEnabled boolean
- Defines whether autoscaling is enabled. The default value is false.
- limitConfig MetastoreService Scaling Config Autoscaling Config Limit Config 
- Represents the limit configuration of a metastore service. Structure is documented below.
- autoscaling_enabled bool
- Defines whether autoscaling is enabled. The default value is false.
- limit_config MetastoreService Scaling Config Autoscaling Config Limit Config 
- Represents the limit configuration of a metastore service. Structure is documented below.
- autoscalingEnabled Boolean
- Defines whether autoscaling is enabled. The default value is false.
- limitConfig Property Map
- Represents the limit configuration of a metastore service. Structure is documented below.
MetastoreServiceScalingConfigAutoscalingConfigLimitConfig, MetastoreServiceScalingConfigAutoscalingConfigLimitConfigArgs                
- MaxScaling doubleFactor 
- The maximum scaling factor that the service will autoscale to. The default value is 6.0.
- MinScaling doubleFactor 
- The minimum scaling factor that the service will autoscale to. The default value is 0.1.
- MaxScaling float64Factor 
- The maximum scaling factor that the service will autoscale to. The default value is 6.0.
- MinScaling float64Factor 
- The minimum scaling factor that the service will autoscale to. The default value is 0.1.
- maxScaling DoubleFactor 
- The maximum scaling factor that the service will autoscale to. The default value is 6.0.
- minScaling DoubleFactor 
- The minimum scaling factor that the service will autoscale to. The default value is 0.1.
- maxScaling numberFactor 
- The maximum scaling factor that the service will autoscale to. The default value is 6.0.
- minScaling numberFactor 
- The minimum scaling factor that the service will autoscale to. The default value is 0.1.
- max_scaling_ floatfactor 
- The maximum scaling factor that the service will autoscale to. The default value is 6.0.
- min_scaling_ floatfactor 
- The minimum scaling factor that the service will autoscale to. The default value is 0.1.
- maxScaling NumberFactor 
- The maximum scaling factor that the service will autoscale to. The default value is 6.0.
- minScaling NumberFactor 
- The minimum scaling factor that the service will autoscale to. The default value is 0.1.
MetastoreServiceScheduledBackup, MetastoreServiceScheduledBackupArgs        
- BackupLocation string
- A Cloud Storage URI of a folder, in the format gs://<bucket_name>/<path_inside_bucket>. A sub-folder <backup_folder> containing backup files will be stored below it.
- CronSchedule string
- The scheduled interval in Cron format, see https://en.wikipedia.org/wiki/Cron The default is empty: scheduled backup is not enabled. Must be specified to enable scheduled backups.
- Enabled bool
- Defines whether the scheduled backup is enabled. The default value is false.
- TimeZone string
- Specifies the time zone to be used when interpreting cronSchedule. Must be a time zone name from the time zone database (https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), e.g. America/Los_Angeles or Africa/Abidjan. If left unspecified, the default is UTC.
- BackupLocation string
- A Cloud Storage URI of a folder, in the format gs://<bucket_name>/<path_inside_bucket>. A sub-folder <backup_folder> containing backup files will be stored below it.
- CronSchedule string
- The scheduled interval in Cron format, see https://en.wikipedia.org/wiki/Cron The default is empty: scheduled backup is not enabled. Must be specified to enable scheduled backups.
- Enabled bool
- Defines whether the scheduled backup is enabled. The default value is false.
- TimeZone string
- Specifies the time zone to be used when interpreting cronSchedule. Must be a time zone name from the time zone database (https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), e.g. America/Los_Angeles or Africa/Abidjan. If left unspecified, the default is UTC.
- backupLocation String
- A Cloud Storage URI of a folder, in the format gs://<bucket_name>/<path_inside_bucket>. A sub-folder <backup_folder> containing backup files will be stored below it.
- cronSchedule String
- The scheduled interval in Cron format, see https://en.wikipedia.org/wiki/Cron The default is empty: scheduled backup is not enabled. Must be specified to enable scheduled backups.
- enabled Boolean
- Defines whether the scheduled backup is enabled. The default value is false.
- timeZone String
- Specifies the time zone to be used when interpreting cronSchedule. Must be a time zone name from the time zone database (https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), e.g. America/Los_Angeles or Africa/Abidjan. If left unspecified, the default is UTC.
- backupLocation string
- A Cloud Storage URI of a folder, in the format gs://<bucket_name>/<path_inside_bucket>. A sub-folder <backup_folder> containing backup files will be stored below it.
- cronSchedule string
- The scheduled interval in Cron format, see https://en.wikipedia.org/wiki/Cron The default is empty: scheduled backup is not enabled. Must be specified to enable scheduled backups.
- enabled boolean
- Defines whether the scheduled backup is enabled. The default value is false.
- timeZone string
- Specifies the time zone to be used when interpreting cronSchedule. Must be a time zone name from the time zone database (https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), e.g. America/Los_Angeles or Africa/Abidjan. If left unspecified, the default is UTC.
- backup_location str
- A Cloud Storage URI of a folder, in the format gs://<bucket_name>/<path_inside_bucket>. A sub-folder <backup_folder> containing backup files will be stored below it.
- cron_schedule str
- The scheduled interval in Cron format, see https://en.wikipedia.org/wiki/Cron The default is empty: scheduled backup is not enabled. Must be specified to enable scheduled backups.
- enabled bool
- Defines whether the scheduled backup is enabled. The default value is false.
- time_zone str
- Specifies the time zone to be used when interpreting cronSchedule. Must be a time zone name from the time zone database (https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), e.g. America/Los_Angeles or Africa/Abidjan. If left unspecified, the default is UTC.
- backupLocation String
- A Cloud Storage URI of a folder, in the format gs://<bucket_name>/<path_inside_bucket>. A sub-folder <backup_folder> containing backup files will be stored below it.
- cronSchedule String
- The scheduled interval in Cron format, see https://en.wikipedia.org/wiki/Cron The default is empty: scheduled backup is not enabled. Must be specified to enable scheduled backups.
- enabled Boolean
- Defines whether the scheduled backup is enabled. The default value is false.
- timeZone String
- Specifies the time zone to be used when interpreting cronSchedule. Must be a time zone name from the time zone database (https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), e.g. America/Los_Angeles or Africa/Abidjan. If left unspecified, the default is UTC.
MetastoreServiceTelemetryConfig, MetastoreServiceTelemetryConfigArgs        
- LogFormat string
- The output format of the Dataproc Metastore service's logs.
Default value is JSON. Possible values are:LEGACY,JSON.
- LogFormat string
- The output format of the Dataproc Metastore service's logs.
Default value is JSON. Possible values are:LEGACY,JSON.
- logFormat String
- The output format of the Dataproc Metastore service's logs.
Default value is JSON. Possible values are:LEGACY,JSON.
- logFormat string
- The output format of the Dataproc Metastore service's logs.
Default value is JSON. Possible values are:LEGACY,JSON.
- log_format str
- The output format of the Dataproc Metastore service's logs.
Default value is JSON. Possible values are:LEGACY,JSON.
- logFormat String
- The output format of the Dataproc Metastore service's logs.
Default value is JSON. Possible values are:LEGACY,JSON.
Import
Service can be imported using any of these accepted formats:
- projects/{{project}}/locations/{{location}}/services/{{service_id}}
- {{project}}/{{location}}/{{service_id}}
- {{location}}/{{service_id}}
When using the pulumi import command, Service can be imported using one of the formats above. For example:
$ pulumi import gcp:dataproc/metastoreService:MetastoreService default projects/{{project}}/locations/{{location}}/services/{{service_id}}
$ pulumi import gcp:dataproc/metastoreService:MetastoreService default {{project}}/{{location}}/{{service_id}}
$ pulumi import gcp:dataproc/metastoreService:MetastoreService default {{location}}/{{service_id}}
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Google Cloud (GCP) Classic pulumi/pulumi-gcp
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the google-betaTerraform Provider.