gcp.compute.NodeGroup
Explore with Pulumi AI
Represents a NodeGroup resource to manage a group of sole-tenant nodes.
To get more information about NodeGroup, see:
- API documentation
- How-to Guides
Warning: Due to limitations of the API, this provider cannot update the number of nodes in a node group and changes to node group size either through provider config or through external changes will cause the provider to delete and recreate the node group.
Example Usage
Node Group Basic
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const soletenant_tmpl = new gcp.compute.NodeTemplate("soletenant-tmpl", {
    name: "soletenant-tmpl",
    region: "us-central1",
    nodeType: "n1-node-96-624",
});
const nodes = new gcp.compute.NodeGroup("nodes", {
    name: "soletenant-group",
    zone: "us-central1-a",
    description: "example google_compute_node_group for the Google Provider",
    initialSize: 1,
    nodeTemplate: soletenant_tmpl.id,
});
import pulumi
import pulumi_gcp as gcp
soletenant_tmpl = gcp.compute.NodeTemplate("soletenant-tmpl",
    name="soletenant-tmpl",
    region="us-central1",
    node_type="n1-node-96-624")
nodes = gcp.compute.NodeGroup("nodes",
    name="soletenant-group",
    zone="us-central1-a",
    description="example google_compute_node_group for the Google Provider",
    initial_size=1,
    node_template=soletenant_tmpl.id)
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		soletenant_tmpl, err := compute.NewNodeTemplate(ctx, "soletenant-tmpl", &compute.NodeTemplateArgs{
			Name:     pulumi.String("soletenant-tmpl"),
			Region:   pulumi.String("us-central1"),
			NodeType: pulumi.String("n1-node-96-624"),
		})
		if err != nil {
			return err
		}
		_, err = compute.NewNodeGroup(ctx, "nodes", &compute.NodeGroupArgs{
			Name:         pulumi.String("soletenant-group"),
			Zone:         pulumi.String("us-central1-a"),
			Description:  pulumi.String("example google_compute_node_group for the Google Provider"),
			InitialSize:  pulumi.Int(1),
			NodeTemplate: soletenant_tmpl.ID(),
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var soletenant_tmpl = new Gcp.Compute.NodeTemplate("soletenant-tmpl", new()
    {
        Name = "soletenant-tmpl",
        Region = "us-central1",
        NodeType = "n1-node-96-624",
    });
    var nodes = new Gcp.Compute.NodeGroup("nodes", new()
    {
        Name = "soletenant-group",
        Zone = "us-central1-a",
        Description = "example google_compute_node_group for the Google Provider",
        InitialSize = 1,
        NodeTemplate = soletenant_tmpl.Id,
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.NodeTemplate;
import com.pulumi.gcp.compute.NodeTemplateArgs;
import com.pulumi.gcp.compute.NodeGroup;
import com.pulumi.gcp.compute.NodeGroupArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var soletenant_tmpl = new NodeTemplate("soletenant-tmpl", NodeTemplateArgs.builder()
            .name("soletenant-tmpl")
            .region("us-central1")
            .nodeType("n1-node-96-624")
            .build());
        var nodes = new NodeGroup("nodes", NodeGroupArgs.builder()
            .name("soletenant-group")
            .zone("us-central1-a")
            .description("example google_compute_node_group for the Google Provider")
            .initialSize(1)
            .nodeTemplate(soletenant_tmpl.id())
            .build());
    }
}
resources:
  soletenant-tmpl:
    type: gcp:compute:NodeTemplate
    properties:
      name: soletenant-tmpl
      region: us-central1
      nodeType: n1-node-96-624
  nodes:
    type: gcp:compute:NodeGroup
    properties:
      name: soletenant-group
      zone: us-central1-a
      description: example google_compute_node_group for the Google Provider
      initialSize: 1
      nodeTemplate: ${["soletenant-tmpl"].id}
Node Group Maintenance Interval
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const soletenant_tmpl = new gcp.compute.NodeTemplate("soletenant-tmpl", {
    name: "soletenant-tmpl",
    region: "us-central1",
    nodeType: "c2-node-60-240",
});
const nodes = new gcp.compute.NodeGroup("nodes", {
    name: "soletenant-group",
    zone: "us-central1-a",
    description: "example google_compute_node_group for Terraform Google Provider",
    initialSize: 1,
    nodeTemplate: soletenant_tmpl.id,
    maintenanceInterval: "RECURRENT",
});
import pulumi
import pulumi_gcp as gcp
soletenant_tmpl = gcp.compute.NodeTemplate("soletenant-tmpl",
    name="soletenant-tmpl",
    region="us-central1",
    node_type="c2-node-60-240")
nodes = gcp.compute.NodeGroup("nodes",
    name="soletenant-group",
    zone="us-central1-a",
    description="example google_compute_node_group for Terraform Google Provider",
    initial_size=1,
    node_template=soletenant_tmpl.id,
    maintenance_interval="RECURRENT")
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		soletenant_tmpl, err := compute.NewNodeTemplate(ctx, "soletenant-tmpl", &compute.NodeTemplateArgs{
			Name:     pulumi.String("soletenant-tmpl"),
			Region:   pulumi.String("us-central1"),
			NodeType: pulumi.String("c2-node-60-240"),
		})
		if err != nil {
			return err
		}
		_, err = compute.NewNodeGroup(ctx, "nodes", &compute.NodeGroupArgs{
			Name:                pulumi.String("soletenant-group"),
			Zone:                pulumi.String("us-central1-a"),
			Description:         pulumi.String("example google_compute_node_group for Terraform Google Provider"),
			InitialSize:         pulumi.Int(1),
			NodeTemplate:        soletenant_tmpl.ID(),
			MaintenanceInterval: pulumi.String("RECURRENT"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var soletenant_tmpl = new Gcp.Compute.NodeTemplate("soletenant-tmpl", new()
    {
        Name = "soletenant-tmpl",
        Region = "us-central1",
        NodeType = "c2-node-60-240",
    });
    var nodes = new Gcp.Compute.NodeGroup("nodes", new()
    {
        Name = "soletenant-group",
        Zone = "us-central1-a",
        Description = "example google_compute_node_group for Terraform Google Provider",
        InitialSize = 1,
        NodeTemplate = soletenant_tmpl.Id,
        MaintenanceInterval = "RECURRENT",
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.NodeTemplate;
import com.pulumi.gcp.compute.NodeTemplateArgs;
import com.pulumi.gcp.compute.NodeGroup;
import com.pulumi.gcp.compute.NodeGroupArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var soletenant_tmpl = new NodeTemplate("soletenant-tmpl", NodeTemplateArgs.builder()
            .name("soletenant-tmpl")
            .region("us-central1")
            .nodeType("c2-node-60-240")
            .build());
        var nodes = new NodeGroup("nodes", NodeGroupArgs.builder()
            .name("soletenant-group")
            .zone("us-central1-a")
            .description("example google_compute_node_group for Terraform Google Provider")
            .initialSize(1)
            .nodeTemplate(soletenant_tmpl.id())
            .maintenanceInterval("RECURRENT")
            .build());
    }
}
resources:
  soletenant-tmpl:
    type: gcp:compute:NodeTemplate
    properties:
      name: soletenant-tmpl
      region: us-central1
      nodeType: c2-node-60-240
  nodes:
    type: gcp:compute:NodeGroup
    properties:
      name: soletenant-group
      zone: us-central1-a
      description: example google_compute_node_group for Terraform Google Provider
      initialSize: 1
      nodeTemplate: ${["soletenant-tmpl"].id}
      maintenanceInterval: RECURRENT
Node Group Autoscaling Policy
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const soletenant_tmpl = new gcp.compute.NodeTemplate("soletenant-tmpl", {
    name: "soletenant-tmpl",
    region: "us-central1",
    nodeType: "n1-node-96-624",
});
const nodes = new gcp.compute.NodeGroup("nodes", {
    name: "soletenant-group",
    zone: "us-central1-a",
    description: "example google_compute_node_group for Google Provider",
    maintenancePolicy: "RESTART_IN_PLACE",
    maintenanceWindow: {
        startTime: "08:00",
    },
    initialSize: 1,
    nodeTemplate: soletenant_tmpl.id,
    autoscalingPolicy: {
        mode: "ONLY_SCALE_OUT",
        minNodes: 1,
        maxNodes: 10,
    },
});
import pulumi
import pulumi_gcp as gcp
soletenant_tmpl = gcp.compute.NodeTemplate("soletenant-tmpl",
    name="soletenant-tmpl",
    region="us-central1",
    node_type="n1-node-96-624")
nodes = gcp.compute.NodeGroup("nodes",
    name="soletenant-group",
    zone="us-central1-a",
    description="example google_compute_node_group for Google Provider",
    maintenance_policy="RESTART_IN_PLACE",
    maintenance_window={
        "start_time": "08:00",
    },
    initial_size=1,
    node_template=soletenant_tmpl.id,
    autoscaling_policy={
        "mode": "ONLY_SCALE_OUT",
        "min_nodes": 1,
        "max_nodes": 10,
    })
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		soletenant_tmpl, err := compute.NewNodeTemplate(ctx, "soletenant-tmpl", &compute.NodeTemplateArgs{
			Name:     pulumi.String("soletenant-tmpl"),
			Region:   pulumi.String("us-central1"),
			NodeType: pulumi.String("n1-node-96-624"),
		})
		if err != nil {
			return err
		}
		_, err = compute.NewNodeGroup(ctx, "nodes", &compute.NodeGroupArgs{
			Name:              pulumi.String("soletenant-group"),
			Zone:              pulumi.String("us-central1-a"),
			Description:       pulumi.String("example google_compute_node_group for Google Provider"),
			MaintenancePolicy: pulumi.String("RESTART_IN_PLACE"),
			MaintenanceWindow: &compute.NodeGroupMaintenanceWindowArgs{
				StartTime: pulumi.String("08:00"),
			},
			InitialSize:  pulumi.Int(1),
			NodeTemplate: soletenant_tmpl.ID(),
			AutoscalingPolicy: &compute.NodeGroupAutoscalingPolicyArgs{
				Mode:     pulumi.String("ONLY_SCALE_OUT"),
				MinNodes: pulumi.Int(1),
				MaxNodes: pulumi.Int(10),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var soletenant_tmpl = new Gcp.Compute.NodeTemplate("soletenant-tmpl", new()
    {
        Name = "soletenant-tmpl",
        Region = "us-central1",
        NodeType = "n1-node-96-624",
    });
    var nodes = new Gcp.Compute.NodeGroup("nodes", new()
    {
        Name = "soletenant-group",
        Zone = "us-central1-a",
        Description = "example google_compute_node_group for Google Provider",
        MaintenancePolicy = "RESTART_IN_PLACE",
        MaintenanceWindow = new Gcp.Compute.Inputs.NodeGroupMaintenanceWindowArgs
        {
            StartTime = "08:00",
        },
        InitialSize = 1,
        NodeTemplate = soletenant_tmpl.Id,
        AutoscalingPolicy = new Gcp.Compute.Inputs.NodeGroupAutoscalingPolicyArgs
        {
            Mode = "ONLY_SCALE_OUT",
            MinNodes = 1,
            MaxNodes = 10,
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.NodeTemplate;
import com.pulumi.gcp.compute.NodeTemplateArgs;
import com.pulumi.gcp.compute.NodeGroup;
import com.pulumi.gcp.compute.NodeGroupArgs;
import com.pulumi.gcp.compute.inputs.NodeGroupMaintenanceWindowArgs;
import com.pulumi.gcp.compute.inputs.NodeGroupAutoscalingPolicyArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var soletenant_tmpl = new NodeTemplate("soletenant-tmpl", NodeTemplateArgs.builder()
            .name("soletenant-tmpl")
            .region("us-central1")
            .nodeType("n1-node-96-624")
            .build());
        var nodes = new NodeGroup("nodes", NodeGroupArgs.builder()
            .name("soletenant-group")
            .zone("us-central1-a")
            .description("example google_compute_node_group for Google Provider")
            .maintenancePolicy("RESTART_IN_PLACE")
            .maintenanceWindow(NodeGroupMaintenanceWindowArgs.builder()
                .startTime("08:00")
                .build())
            .initialSize(1)
            .nodeTemplate(soletenant_tmpl.id())
            .autoscalingPolicy(NodeGroupAutoscalingPolicyArgs.builder()
                .mode("ONLY_SCALE_OUT")
                .minNodes(1)
                .maxNodes(10)
                .build())
            .build());
    }
}
resources:
  soletenant-tmpl:
    type: gcp:compute:NodeTemplate
    properties:
      name: soletenant-tmpl
      region: us-central1
      nodeType: n1-node-96-624
  nodes:
    type: gcp:compute:NodeGroup
    properties:
      name: soletenant-group
      zone: us-central1-a
      description: example google_compute_node_group for Google Provider
      maintenancePolicy: RESTART_IN_PLACE
      maintenanceWindow:
        startTime: 08:00
      initialSize: 1
      nodeTemplate: ${["soletenant-tmpl"].id}
      autoscalingPolicy:
        mode: ONLY_SCALE_OUT
        minNodes: 1
        maxNodes: 10
Node Group Share Settings
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const guestProject = new gcp.organizations.Project("guest_project", {
    projectId: "project-id",
    name: "project-name",
    orgId: "123456789",
    deletionPolicy: "DELETE",
});
const soletenant_tmpl = new gcp.compute.NodeTemplate("soletenant-tmpl", {
    name: "soletenant-tmpl",
    region: "us-central1",
    nodeType: "n1-node-96-624",
});
const nodes = new gcp.compute.NodeGroup("nodes", {
    name: "soletenant-group",
    zone: "us-central1-f",
    description: "example google_compute_node_group for Terraform Google Provider",
    initialSize: 1,
    nodeTemplate: soletenant_tmpl.id,
    shareSettings: {
        shareType: "SPECIFIC_PROJECTS",
        projectMaps: [{
            id: guestProject.projectId,
            projectId: guestProject.projectId,
        }],
    },
});
import pulumi
import pulumi_gcp as gcp
guest_project = gcp.organizations.Project("guest_project",
    project_id="project-id",
    name="project-name",
    org_id="123456789",
    deletion_policy="DELETE")
soletenant_tmpl = gcp.compute.NodeTemplate("soletenant-tmpl",
    name="soletenant-tmpl",
    region="us-central1",
    node_type="n1-node-96-624")
nodes = gcp.compute.NodeGroup("nodes",
    name="soletenant-group",
    zone="us-central1-f",
    description="example google_compute_node_group for Terraform Google Provider",
    initial_size=1,
    node_template=soletenant_tmpl.id,
    share_settings={
        "share_type": "SPECIFIC_PROJECTS",
        "project_maps": [{
            "id": guest_project.project_id,
            "project_id": guest_project.project_id,
        }],
    })
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute"
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		guestProject, err := organizations.NewProject(ctx, "guest_project", &organizations.ProjectArgs{
			ProjectId:      pulumi.String("project-id"),
			Name:           pulumi.String("project-name"),
			OrgId:          pulumi.String("123456789"),
			DeletionPolicy: pulumi.String("DELETE"),
		})
		if err != nil {
			return err
		}
		soletenant_tmpl, err := compute.NewNodeTemplate(ctx, "soletenant-tmpl", &compute.NodeTemplateArgs{
			Name:     pulumi.String("soletenant-tmpl"),
			Region:   pulumi.String("us-central1"),
			NodeType: pulumi.String("n1-node-96-624"),
		})
		if err != nil {
			return err
		}
		_, err = compute.NewNodeGroup(ctx, "nodes", &compute.NodeGroupArgs{
			Name:         pulumi.String("soletenant-group"),
			Zone:         pulumi.String("us-central1-f"),
			Description:  pulumi.String("example google_compute_node_group for Terraform Google Provider"),
			InitialSize:  pulumi.Int(1),
			NodeTemplate: soletenant_tmpl.ID(),
			ShareSettings: &compute.NodeGroupShareSettingsArgs{
				ShareType: pulumi.String("SPECIFIC_PROJECTS"),
				ProjectMaps: compute.NodeGroupShareSettingsProjectMapArray{
					&compute.NodeGroupShareSettingsProjectMapArgs{
						Id:        guestProject.ProjectId,
						ProjectId: guestProject.ProjectId,
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var guestProject = new Gcp.Organizations.Project("guest_project", new()
    {
        ProjectId = "project-id",
        Name = "project-name",
        OrgId = "123456789",
        DeletionPolicy = "DELETE",
    });
    var soletenant_tmpl = new Gcp.Compute.NodeTemplate("soletenant-tmpl", new()
    {
        Name = "soletenant-tmpl",
        Region = "us-central1",
        NodeType = "n1-node-96-624",
    });
    var nodes = new Gcp.Compute.NodeGroup("nodes", new()
    {
        Name = "soletenant-group",
        Zone = "us-central1-f",
        Description = "example google_compute_node_group for Terraform Google Provider",
        InitialSize = 1,
        NodeTemplate = soletenant_tmpl.Id,
        ShareSettings = new Gcp.Compute.Inputs.NodeGroupShareSettingsArgs
        {
            ShareType = "SPECIFIC_PROJECTS",
            ProjectMaps = new[]
            {
                new Gcp.Compute.Inputs.NodeGroupShareSettingsProjectMapArgs
                {
                    Id = guestProject.ProjectId,
                    ProjectId = guestProject.ProjectId,
                },
            },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.organizations.Project;
import com.pulumi.gcp.organizations.ProjectArgs;
import com.pulumi.gcp.compute.NodeTemplate;
import com.pulumi.gcp.compute.NodeTemplateArgs;
import com.pulumi.gcp.compute.NodeGroup;
import com.pulumi.gcp.compute.NodeGroupArgs;
import com.pulumi.gcp.compute.inputs.NodeGroupShareSettingsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var guestProject = new Project("guestProject", ProjectArgs.builder()
            .projectId("project-id")
            .name("project-name")
            .orgId("123456789")
            .deletionPolicy("DELETE")
            .build());
        var soletenant_tmpl = new NodeTemplate("soletenant-tmpl", NodeTemplateArgs.builder()
            .name("soletenant-tmpl")
            .region("us-central1")
            .nodeType("n1-node-96-624")
            .build());
        var nodes = new NodeGroup("nodes", NodeGroupArgs.builder()
            .name("soletenant-group")
            .zone("us-central1-f")
            .description("example google_compute_node_group for Terraform Google Provider")
            .initialSize(1)
            .nodeTemplate(soletenant_tmpl.id())
            .shareSettings(NodeGroupShareSettingsArgs.builder()
                .shareType("SPECIFIC_PROJECTS")
                .projectMaps(NodeGroupShareSettingsProjectMapArgs.builder()
                    .id(guestProject.projectId())
                    .projectId(guestProject.projectId())
                    .build())
                .build())
            .build());
    }
}
resources:
  guestProject:
    type: gcp:organizations:Project
    name: guest_project
    properties:
      projectId: project-id
      name: project-name
      orgId: '123456789'
      deletionPolicy: DELETE
  soletenant-tmpl:
    type: gcp:compute:NodeTemplate
    properties:
      name: soletenant-tmpl
      region: us-central1
      nodeType: n1-node-96-624
  nodes:
    type: gcp:compute:NodeGroup
    properties:
      name: soletenant-group
      zone: us-central1-f
      description: example google_compute_node_group for Terraform Google Provider
      initialSize: 1
      nodeTemplate: ${["soletenant-tmpl"].id}
      shareSettings:
        shareType: SPECIFIC_PROJECTS
        projectMaps:
          - id: ${guestProject.projectId}
            projectId: ${guestProject.projectId}
Create NodeGroup Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new NodeGroup(name: string, args: NodeGroupArgs, opts?: CustomResourceOptions);@overload
def NodeGroup(resource_name: str,
              args: NodeGroupArgs,
              opts: Optional[ResourceOptions] = None)
@overload
def NodeGroup(resource_name: str,
              opts: Optional[ResourceOptions] = None,
              node_template: Optional[str] = None,
              autoscaling_policy: Optional[NodeGroupAutoscalingPolicyArgs] = None,
              description: Optional[str] = None,
              initial_size: Optional[int] = None,
              maintenance_interval: Optional[str] = None,
              maintenance_policy: Optional[str] = None,
              maintenance_window: Optional[NodeGroupMaintenanceWindowArgs] = None,
              name: Optional[str] = None,
              project: Optional[str] = None,
              share_settings: Optional[NodeGroupShareSettingsArgs] = None,
              zone: Optional[str] = None)func NewNodeGroup(ctx *Context, name string, args NodeGroupArgs, opts ...ResourceOption) (*NodeGroup, error)public NodeGroup(string name, NodeGroupArgs args, CustomResourceOptions? opts = null)
public NodeGroup(String name, NodeGroupArgs args)
public NodeGroup(String name, NodeGroupArgs args, CustomResourceOptions options)
type: gcp:compute:NodeGroup
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args NodeGroupArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args NodeGroupArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args NodeGroupArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args NodeGroupArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args NodeGroupArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var nodeGroupResource = new Gcp.Compute.NodeGroup("nodeGroupResource", new()
{
    NodeTemplate = "string",
    AutoscalingPolicy = new Gcp.Compute.Inputs.NodeGroupAutoscalingPolicyArgs
    {
        MaxNodes = 0,
        MinNodes = 0,
        Mode = "string",
    },
    Description = "string",
    InitialSize = 0,
    MaintenanceInterval = "string",
    MaintenancePolicy = "string",
    MaintenanceWindow = new Gcp.Compute.Inputs.NodeGroupMaintenanceWindowArgs
    {
        StartTime = "string",
    },
    Name = "string",
    Project = "string",
    ShareSettings = new Gcp.Compute.Inputs.NodeGroupShareSettingsArgs
    {
        ShareType = "string",
        ProjectMaps = new[]
        {
            new Gcp.Compute.Inputs.NodeGroupShareSettingsProjectMapArgs
            {
                Id = "string",
                ProjectId = "string",
            },
        },
    },
    Zone = "string",
});
example, err := compute.NewNodeGroup(ctx, "nodeGroupResource", &compute.NodeGroupArgs{
	NodeTemplate: pulumi.String("string"),
	AutoscalingPolicy: &compute.NodeGroupAutoscalingPolicyArgs{
		MaxNodes: pulumi.Int(0),
		MinNodes: pulumi.Int(0),
		Mode:     pulumi.String("string"),
	},
	Description:         pulumi.String("string"),
	InitialSize:         pulumi.Int(0),
	MaintenanceInterval: pulumi.String("string"),
	MaintenancePolicy:   pulumi.String("string"),
	MaintenanceWindow: &compute.NodeGroupMaintenanceWindowArgs{
		StartTime: pulumi.String("string"),
	},
	Name:    pulumi.String("string"),
	Project: pulumi.String("string"),
	ShareSettings: &compute.NodeGroupShareSettingsArgs{
		ShareType: pulumi.String("string"),
		ProjectMaps: compute.NodeGroupShareSettingsProjectMapArray{
			&compute.NodeGroupShareSettingsProjectMapArgs{
				Id:        pulumi.String("string"),
				ProjectId: pulumi.String("string"),
			},
		},
	},
	Zone: pulumi.String("string"),
})
var nodeGroupResource = new NodeGroup("nodeGroupResource", NodeGroupArgs.builder()
    .nodeTemplate("string")
    .autoscalingPolicy(NodeGroupAutoscalingPolicyArgs.builder()
        .maxNodes(0)
        .minNodes(0)
        .mode("string")
        .build())
    .description("string")
    .initialSize(0)
    .maintenanceInterval("string")
    .maintenancePolicy("string")
    .maintenanceWindow(NodeGroupMaintenanceWindowArgs.builder()
        .startTime("string")
        .build())
    .name("string")
    .project("string")
    .shareSettings(NodeGroupShareSettingsArgs.builder()
        .shareType("string")
        .projectMaps(NodeGroupShareSettingsProjectMapArgs.builder()
            .id("string")
            .projectId("string")
            .build())
        .build())
    .zone("string")
    .build());
node_group_resource = gcp.compute.NodeGroup("nodeGroupResource",
    node_template="string",
    autoscaling_policy={
        "max_nodes": 0,
        "min_nodes": 0,
        "mode": "string",
    },
    description="string",
    initial_size=0,
    maintenance_interval="string",
    maintenance_policy="string",
    maintenance_window={
        "start_time": "string",
    },
    name="string",
    project="string",
    share_settings={
        "share_type": "string",
        "project_maps": [{
            "id": "string",
            "project_id": "string",
        }],
    },
    zone="string")
const nodeGroupResource = new gcp.compute.NodeGroup("nodeGroupResource", {
    nodeTemplate: "string",
    autoscalingPolicy: {
        maxNodes: 0,
        minNodes: 0,
        mode: "string",
    },
    description: "string",
    initialSize: 0,
    maintenanceInterval: "string",
    maintenancePolicy: "string",
    maintenanceWindow: {
        startTime: "string",
    },
    name: "string",
    project: "string",
    shareSettings: {
        shareType: "string",
        projectMaps: [{
            id: "string",
            projectId: "string",
        }],
    },
    zone: "string",
});
type: gcp:compute:NodeGroup
properties:
    autoscalingPolicy:
        maxNodes: 0
        minNodes: 0
        mode: string
    description: string
    initialSize: 0
    maintenanceInterval: string
    maintenancePolicy: string
    maintenanceWindow:
        startTime: string
    name: string
    nodeTemplate: string
    project: string
    shareSettings:
        projectMaps:
            - id: string
              projectId: string
        shareType: string
    zone: string
NodeGroup Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The NodeGroup resource accepts the following input properties:
- NodeTemplate string
- The URL of the node template to which this node group belongs.
- AutoscalingPolicy NodeGroup Autoscaling Policy 
- If you use sole-tenant nodes for your workloads, you can use the node
group autoscaler to automatically manage the sizes of your node groups.
One of initial_sizeorautoscaling_policymust be configured on resource creation. Structure is documented below.
- Description string
- An optional textual description of the resource.
- InitialSize int
- The initial number of nodes in the node group. One of initial_sizeorautoscaling_policymust be configured on resource creation.
- MaintenanceInterval string
- Specifies the frequency of planned maintenance events. Set to one of the following:- AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available.
- RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs.
Possible values are: AS_NEEDED,RECURRENT.
 
- MaintenancePolicy string
- Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.
- MaintenanceWindow NodeGroup Maintenance Window 
- contains properties for the timeframe of maintenance Structure is documented below.
- Name string
- Name of the resource.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- 
NodeGroup Share Settings 
- Share settings for the node group. Structure is documented below.
- Zone string
- Zone where this node group is located
- NodeTemplate string
- The URL of the node template to which this node group belongs.
- AutoscalingPolicy NodeGroup Autoscaling Policy Args 
- If you use sole-tenant nodes for your workloads, you can use the node
group autoscaler to automatically manage the sizes of your node groups.
One of initial_sizeorautoscaling_policymust be configured on resource creation. Structure is documented below.
- Description string
- An optional textual description of the resource.
- InitialSize int
- The initial number of nodes in the node group. One of initial_sizeorautoscaling_policymust be configured on resource creation.
- MaintenanceInterval string
- Specifies the frequency of planned maintenance events. Set to one of the following:- AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available.
- RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs.
Possible values are: AS_NEEDED,RECURRENT.
 
- MaintenancePolicy string
- Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.
- MaintenanceWindow NodeGroup Maintenance Window Args 
- contains properties for the timeframe of maintenance Structure is documented below.
- Name string
- Name of the resource.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- 
NodeGroup Share Settings Args 
- Share settings for the node group. Structure is documented below.
- Zone string
- Zone where this node group is located
- nodeTemplate String
- The URL of the node template to which this node group belongs.
- autoscalingPolicy NodeGroup Autoscaling Policy 
- If you use sole-tenant nodes for your workloads, you can use the node
group autoscaler to automatically manage the sizes of your node groups.
One of initial_sizeorautoscaling_policymust be configured on resource creation. Structure is documented below.
- description String
- An optional textual description of the resource.
- initialSize Integer
- The initial number of nodes in the node group. One of initial_sizeorautoscaling_policymust be configured on resource creation.
- maintenanceInterval String
- Specifies the frequency of planned maintenance events. Set to one of the following:- AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available.
- RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs.
Possible values are: AS_NEEDED,RECURRENT.
 
- maintenancePolicy String
- Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.
- maintenanceWindow NodeGroup Maintenance Window 
- contains properties for the timeframe of maintenance Structure is documented below.
- name String
- Name of the resource.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- 
NodeGroup Share Settings 
- Share settings for the node group. Structure is documented below.
- zone String
- Zone where this node group is located
- nodeTemplate string
- The URL of the node template to which this node group belongs.
- autoscalingPolicy NodeGroup Autoscaling Policy 
- If you use sole-tenant nodes for your workloads, you can use the node
group autoscaler to automatically manage the sizes of your node groups.
One of initial_sizeorautoscaling_policymust be configured on resource creation. Structure is documented below.
- description string
- An optional textual description of the resource.
- initialSize number
- The initial number of nodes in the node group. One of initial_sizeorautoscaling_policymust be configured on resource creation.
- maintenanceInterval string
- Specifies the frequency of planned maintenance events. Set to one of the following:- AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available.
- RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs.
Possible values are: AS_NEEDED,RECURRENT.
 
- maintenancePolicy string
- Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.
- maintenanceWindow NodeGroup Maintenance Window 
- contains properties for the timeframe of maintenance Structure is documented below.
- name string
- Name of the resource.
- project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- 
NodeGroup Share Settings 
- Share settings for the node group. Structure is documented below.
- zone string
- Zone where this node group is located
- node_template str
- The URL of the node template to which this node group belongs.
- autoscaling_policy NodeGroup Autoscaling Policy Args 
- If you use sole-tenant nodes for your workloads, you can use the node
group autoscaler to automatically manage the sizes of your node groups.
One of initial_sizeorautoscaling_policymust be configured on resource creation. Structure is documented below.
- description str
- An optional textual description of the resource.
- initial_size int
- The initial number of nodes in the node group. One of initial_sizeorautoscaling_policymust be configured on resource creation.
- maintenance_interval str
- Specifies the frequency of planned maintenance events. Set to one of the following:- AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available.
- RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs.
Possible values are: AS_NEEDED,RECURRENT.
 
- maintenance_policy str
- Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.
- maintenance_window NodeGroup Maintenance Window Args 
- contains properties for the timeframe of maintenance Structure is documented below.
- name str
- Name of the resource.
- project str
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- 
NodeGroup Share Settings Args 
- Share settings for the node group. Structure is documented below.
- zone str
- Zone where this node group is located
- nodeTemplate String
- The URL of the node template to which this node group belongs.
- autoscalingPolicy Property Map
- If you use sole-tenant nodes for your workloads, you can use the node
group autoscaler to automatically manage the sizes of your node groups.
One of initial_sizeorautoscaling_policymust be configured on resource creation. Structure is documented below.
- description String
- An optional textual description of the resource.
- initialSize Number
- The initial number of nodes in the node group. One of initial_sizeorautoscaling_policymust be configured on resource creation.
- maintenanceInterval String
- Specifies the frequency of planned maintenance events. Set to one of the following:- AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available.
- RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs.
Possible values are: AS_NEEDED,RECURRENT.
 
- maintenancePolicy String
- Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.
- maintenanceWindow Property Map
- contains properties for the timeframe of maintenance Structure is documented below.
- name String
- Name of the resource.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Property Map
- Share settings for the node group. Structure is documented below.
- zone String
- Zone where this node group is located
Outputs
All input properties are implicitly available as output properties. Additionally, the NodeGroup resource produces the following output properties:
- CreationTimestamp string
- Creation timestamp in RFC3339 text format.
- Id string
- The provider-assigned unique ID for this managed resource.
- SelfLink string
- The URI of the created resource.
- Size int
- The total number of nodes in the node group.
- CreationTimestamp string
- Creation timestamp in RFC3339 text format.
- Id string
- The provider-assigned unique ID for this managed resource.
- SelfLink string
- The URI of the created resource.
- Size int
- The total number of nodes in the node group.
- creationTimestamp String
- Creation timestamp in RFC3339 text format.
- id String
- The provider-assigned unique ID for this managed resource.
- selfLink String
- The URI of the created resource.
- size Integer
- The total number of nodes in the node group.
- creationTimestamp string
- Creation timestamp in RFC3339 text format.
- id string
- The provider-assigned unique ID for this managed resource.
- selfLink string
- The URI of the created resource.
- size number
- The total number of nodes in the node group.
- creation_timestamp str
- Creation timestamp in RFC3339 text format.
- id str
- The provider-assigned unique ID for this managed resource.
- self_link str
- The URI of the created resource.
- size int
- The total number of nodes in the node group.
- creationTimestamp String
- Creation timestamp in RFC3339 text format.
- id String
- The provider-assigned unique ID for this managed resource.
- selfLink String
- The URI of the created resource.
- size Number
- The total number of nodes in the node group.
Look up Existing NodeGroup Resource
Get an existing NodeGroup resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: NodeGroupState, opts?: CustomResourceOptions): NodeGroup@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        autoscaling_policy: Optional[NodeGroupAutoscalingPolicyArgs] = None,
        creation_timestamp: Optional[str] = None,
        description: Optional[str] = None,
        initial_size: Optional[int] = None,
        maintenance_interval: Optional[str] = None,
        maintenance_policy: Optional[str] = None,
        maintenance_window: Optional[NodeGroupMaintenanceWindowArgs] = None,
        name: Optional[str] = None,
        node_template: Optional[str] = None,
        project: Optional[str] = None,
        self_link: Optional[str] = None,
        share_settings: Optional[NodeGroupShareSettingsArgs] = None,
        size: Optional[int] = None,
        zone: Optional[str] = None) -> NodeGroupfunc GetNodeGroup(ctx *Context, name string, id IDInput, state *NodeGroupState, opts ...ResourceOption) (*NodeGroup, error)public static NodeGroup Get(string name, Input<string> id, NodeGroupState? state, CustomResourceOptions? opts = null)public static NodeGroup get(String name, Output<String> id, NodeGroupState state, CustomResourceOptions options)resources:  _:    type: gcp:compute:NodeGroup    get:      id: ${id}- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- AutoscalingPolicy NodeGroup Autoscaling Policy 
- If you use sole-tenant nodes for your workloads, you can use the node
group autoscaler to automatically manage the sizes of your node groups.
One of initial_sizeorautoscaling_policymust be configured on resource creation. Structure is documented below.
- CreationTimestamp string
- Creation timestamp in RFC3339 text format.
- Description string
- An optional textual description of the resource.
- InitialSize int
- The initial number of nodes in the node group. One of initial_sizeorautoscaling_policymust be configured on resource creation.
- MaintenanceInterval string
- Specifies the frequency of planned maintenance events. Set to one of the following:- AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available.
- RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs.
Possible values are: AS_NEEDED,RECURRENT.
 
- MaintenancePolicy string
- Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.
- MaintenanceWindow NodeGroup Maintenance Window 
- contains properties for the timeframe of maintenance Structure is documented below.
- Name string
- Name of the resource.
- NodeTemplate string
- The URL of the node template to which this node group belongs.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- SelfLink string
- The URI of the created resource.
- 
NodeGroup Share Settings 
- Share settings for the node group. Structure is documented below.
- Size int
- The total number of nodes in the node group.
- Zone string
- Zone where this node group is located
- AutoscalingPolicy NodeGroup Autoscaling Policy Args 
- If you use sole-tenant nodes for your workloads, you can use the node
group autoscaler to automatically manage the sizes of your node groups.
One of initial_sizeorautoscaling_policymust be configured on resource creation. Structure is documented below.
- CreationTimestamp string
- Creation timestamp in RFC3339 text format.
- Description string
- An optional textual description of the resource.
- InitialSize int
- The initial number of nodes in the node group. One of initial_sizeorautoscaling_policymust be configured on resource creation.
- MaintenanceInterval string
- Specifies the frequency of planned maintenance events. Set to one of the following:- AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available.
- RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs.
Possible values are: AS_NEEDED,RECURRENT.
 
- MaintenancePolicy string
- Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.
- MaintenanceWindow NodeGroup Maintenance Window Args 
- contains properties for the timeframe of maintenance Structure is documented below.
- Name string
- Name of the resource.
- NodeTemplate string
- The URL of the node template to which this node group belongs.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- SelfLink string
- The URI of the created resource.
- 
NodeGroup Share Settings Args 
- Share settings for the node group. Structure is documented below.
- Size int
- The total number of nodes in the node group.
- Zone string
- Zone where this node group is located
- autoscalingPolicy NodeGroup Autoscaling Policy 
- If you use sole-tenant nodes for your workloads, you can use the node
group autoscaler to automatically manage the sizes of your node groups.
One of initial_sizeorautoscaling_policymust be configured on resource creation. Structure is documented below.
- creationTimestamp String
- Creation timestamp in RFC3339 text format.
- description String
- An optional textual description of the resource.
- initialSize Integer
- The initial number of nodes in the node group. One of initial_sizeorautoscaling_policymust be configured on resource creation.
- maintenanceInterval String
- Specifies the frequency of planned maintenance events. Set to one of the following:- AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available.
- RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs.
Possible values are: AS_NEEDED,RECURRENT.
 
- maintenancePolicy String
- Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.
- maintenanceWindow NodeGroup Maintenance Window 
- contains properties for the timeframe of maintenance Structure is documented below.
- name String
- Name of the resource.
- nodeTemplate String
- The URL of the node template to which this node group belongs.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- selfLink String
- The URI of the created resource.
- 
NodeGroup Share Settings 
- Share settings for the node group. Structure is documented below.
- size Integer
- The total number of nodes in the node group.
- zone String
- Zone where this node group is located
- autoscalingPolicy NodeGroup Autoscaling Policy 
- If you use sole-tenant nodes for your workloads, you can use the node
group autoscaler to automatically manage the sizes of your node groups.
One of initial_sizeorautoscaling_policymust be configured on resource creation. Structure is documented below.
- creationTimestamp string
- Creation timestamp in RFC3339 text format.
- description string
- An optional textual description of the resource.
- initialSize number
- The initial number of nodes in the node group. One of initial_sizeorautoscaling_policymust be configured on resource creation.
- maintenanceInterval string
- Specifies the frequency of planned maintenance events. Set to one of the following:- AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available.
- RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs.
Possible values are: AS_NEEDED,RECURRENT.
 
- maintenancePolicy string
- Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.
- maintenanceWindow NodeGroup Maintenance Window 
- contains properties for the timeframe of maintenance Structure is documented below.
- name string
- Name of the resource.
- nodeTemplate string
- The URL of the node template to which this node group belongs.
- project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- selfLink string
- The URI of the created resource.
- 
NodeGroup Share Settings 
- Share settings for the node group. Structure is documented below.
- size number
- The total number of nodes in the node group.
- zone string
- Zone where this node group is located
- autoscaling_policy NodeGroup Autoscaling Policy Args 
- If you use sole-tenant nodes for your workloads, you can use the node
group autoscaler to automatically manage the sizes of your node groups.
One of initial_sizeorautoscaling_policymust be configured on resource creation. Structure is documented below.
- creation_timestamp str
- Creation timestamp in RFC3339 text format.
- description str
- An optional textual description of the resource.
- initial_size int
- The initial number of nodes in the node group. One of initial_sizeorautoscaling_policymust be configured on resource creation.
- maintenance_interval str
- Specifies the frequency of planned maintenance events. Set to one of the following:- AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available.
- RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs.
Possible values are: AS_NEEDED,RECURRENT.
 
- maintenance_policy str
- Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.
- maintenance_window NodeGroup Maintenance Window Args 
- contains properties for the timeframe of maintenance Structure is documented below.
- name str
- Name of the resource.
- node_template str
- The URL of the node template to which this node group belongs.
- project str
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- self_link str
- The URI of the created resource.
- 
NodeGroup Share Settings Args 
- Share settings for the node group. Structure is documented below.
- size int
- The total number of nodes in the node group.
- zone str
- Zone where this node group is located
- autoscalingPolicy Property Map
- If you use sole-tenant nodes for your workloads, you can use the node
group autoscaler to automatically manage the sizes of your node groups.
One of initial_sizeorautoscaling_policymust be configured on resource creation. Structure is documented below.
- creationTimestamp String
- Creation timestamp in RFC3339 text format.
- description String
- An optional textual description of the resource.
- initialSize Number
- The initial number of nodes in the node group. One of initial_sizeorautoscaling_policymust be configured on resource creation.
- maintenanceInterval String
- Specifies the frequency of planned maintenance events. Set to one of the following:- AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available.
- RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs.
Possible values are: AS_NEEDED,RECURRENT.
 
- maintenancePolicy String
- Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.
- maintenanceWindow Property Map
- contains properties for the timeframe of maintenance Structure is documented below.
- name String
- Name of the resource.
- nodeTemplate String
- The URL of the node template to which this node group belongs.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- selfLink String
- The URI of the created resource.
- Property Map
- Share settings for the node group. Structure is documented below.
- size Number
- The total number of nodes in the node group.
- zone String
- Zone where this node group is located
Supporting Types
NodeGroupAutoscalingPolicy, NodeGroupAutoscalingPolicyArgs        
- MaxNodes int
- Maximum size of the node group. Set to a value less than or equal to 100 and greater than or equal to min-nodes.
- MinNodes int
- Minimum size of the node group. Must be less than or equal to max-nodes. The default value is 0.
- Mode string
- The autoscaling mode. Set to one of the following:- OFF: Disables the autoscaler.
- ON: Enables scaling in and scaling out.
- ONLY_SCALE_OUT: Enables only scaling out.
You must use this mode if your node groups are configured to
restart their hosted VMs on minimal servers.
Possible values are: OFF,ON,ONLY_SCALE_OUT.
 
- MaxNodes int
- Maximum size of the node group. Set to a value less than or equal to 100 and greater than or equal to min-nodes.
- MinNodes int
- Minimum size of the node group. Must be less than or equal to max-nodes. The default value is 0.
- Mode string
- The autoscaling mode. Set to one of the following:- OFF: Disables the autoscaler.
- ON: Enables scaling in and scaling out.
- ONLY_SCALE_OUT: Enables only scaling out.
You must use this mode if your node groups are configured to
restart their hosted VMs on minimal servers.
Possible values are: OFF,ON,ONLY_SCALE_OUT.
 
- maxNodes Integer
- Maximum size of the node group. Set to a value less than or equal to 100 and greater than or equal to min-nodes.
- minNodes Integer
- Minimum size of the node group. Must be less than or equal to max-nodes. The default value is 0.
- mode String
- The autoscaling mode. Set to one of the following:- OFF: Disables the autoscaler.
- ON: Enables scaling in and scaling out.
- ONLY_SCALE_OUT: Enables only scaling out.
You must use this mode if your node groups are configured to
restart their hosted VMs on minimal servers.
Possible values are: OFF,ON,ONLY_SCALE_OUT.
 
- maxNodes number
- Maximum size of the node group. Set to a value less than or equal to 100 and greater than or equal to min-nodes.
- minNodes number
- Minimum size of the node group. Must be less than or equal to max-nodes. The default value is 0.
- mode string
- The autoscaling mode. Set to one of the following:- OFF: Disables the autoscaler.
- ON: Enables scaling in and scaling out.
- ONLY_SCALE_OUT: Enables only scaling out.
You must use this mode if your node groups are configured to
restart their hosted VMs on minimal servers.
Possible values are: OFF,ON,ONLY_SCALE_OUT.
 
- max_nodes int
- Maximum size of the node group. Set to a value less than or equal to 100 and greater than or equal to min-nodes.
- min_nodes int
- Minimum size of the node group. Must be less than or equal to max-nodes. The default value is 0.
- mode str
- The autoscaling mode. Set to one of the following:- OFF: Disables the autoscaler.
- ON: Enables scaling in and scaling out.
- ONLY_SCALE_OUT: Enables only scaling out.
You must use this mode if your node groups are configured to
restart their hosted VMs on minimal servers.
Possible values are: OFF,ON,ONLY_SCALE_OUT.
 
- maxNodes Number
- Maximum size of the node group. Set to a value less than or equal to 100 and greater than or equal to min-nodes.
- minNodes Number
- Minimum size of the node group. Must be less than or equal to max-nodes. The default value is 0.
- mode String
- The autoscaling mode. Set to one of the following:- OFF: Disables the autoscaler.
- ON: Enables scaling in and scaling out.
- ONLY_SCALE_OUT: Enables only scaling out.
You must use this mode if your node groups are configured to
restart their hosted VMs on minimal servers.
Possible values are: OFF,ON,ONLY_SCALE_OUT.
 
NodeGroupMaintenanceWindow, NodeGroupMaintenanceWindowArgs        
- StartTime string
- instances.start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid.
- StartTime string
- instances.start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid.
- startTime String
- instances.start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid.
- startTime string
- instances.start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid.
- start_time str
- instances.start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid.
- startTime String
- instances.start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid.
NodeGroupShareSettings, NodeGroupShareSettingsArgs        
- string
- Node group sharing type.
Possible values are: ORGANIZATION,SPECIFIC_PROJECTS,LOCAL.
- ProjectMaps List<NodeGroup Share Settings Project Map> 
- A map of project id and project config. This is only valid when shareType's value is SPECIFIC_PROJECTS. Structure is documented below.
- string
- Node group sharing type.
Possible values are: ORGANIZATION,SPECIFIC_PROJECTS,LOCAL.
- ProjectMaps []NodeGroup Share Settings Project Map 
- A map of project id and project config. This is only valid when shareType's value is SPECIFIC_PROJECTS. Structure is documented below.
- String
- Node group sharing type.
Possible values are: ORGANIZATION,SPECIFIC_PROJECTS,LOCAL.
- projectMaps List<NodeGroup Share Settings Project Map> 
- A map of project id and project config. This is only valid when shareType's value is SPECIFIC_PROJECTS. Structure is documented below.
- string
- Node group sharing type.
Possible values are: ORGANIZATION,SPECIFIC_PROJECTS,LOCAL.
- projectMaps NodeGroup Share Settings Project Map[] 
- A map of project id and project config. This is only valid when shareType's value is SPECIFIC_PROJECTS. Structure is documented below.
- str
- Node group sharing type.
Possible values are: ORGANIZATION,SPECIFIC_PROJECTS,LOCAL.
- project_maps Sequence[NodeGroup Share Settings Project Map] 
- A map of project id and project config. This is only valid when shareType's value is SPECIFIC_PROJECTS. Structure is documented below.
- String
- Node group sharing type.
Possible values are: ORGANIZATION,SPECIFIC_PROJECTS,LOCAL.
- projectMaps List<Property Map>
- A map of project id and project config. This is only valid when shareType's value is SPECIFIC_PROJECTS. Structure is documented below.
NodeGroupShareSettingsProjectMap, NodeGroupShareSettingsProjectMapArgs            
- id str
- The identifier for this object. Format specified above.
- project_id str
- The project id/number should be the same as the key of this project config in the project map.
Import
NodeGroup can be imported using any of these accepted formats:
- projects/{{project}}/zones/{{zone}}/nodeGroups/{{name}}
- {{project}}/{{zone}}/{{name}}
- {{zone}}/{{name}}
- {{name}}
When using the pulumi import command, NodeGroup can be imported using one of the formats above. For example:
$ pulumi import gcp:compute/nodeGroup:NodeGroup default projects/{{project}}/zones/{{zone}}/nodeGroups/{{name}}
$ pulumi import gcp:compute/nodeGroup:NodeGroup default {{project}}/{{zone}}/{{name}}
$ pulumi import gcp:compute/nodeGroup:NodeGroup default {{zone}}/{{name}}
$ pulumi import gcp:compute/nodeGroup:NodeGroup default {{name}}
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Google Cloud (GCP) Classic pulumi/pulumi-gcp
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the google-betaTerraform Provider.