We recommend using Azure Native.
azure.synapse.SparkPool
Explore with Pulumi AI
Manages a Synapse Spark Pool.
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as azure from "@pulumi/azure";
const example = new azure.core.ResourceGroup("example", {
    name: "example-resources",
    location: "West Europe",
});
const exampleAccount = new azure.storage.Account("example", {
    name: "examplestorageacc",
    resourceGroupName: example.name,
    location: example.location,
    accountTier: "Standard",
    accountReplicationType: "LRS",
    accountKind: "StorageV2",
    isHnsEnabled: true,
});
const exampleDataLakeGen2Filesystem = new azure.storage.DataLakeGen2Filesystem("example", {
    name: "example",
    storageAccountId: exampleAccount.id,
});
const exampleWorkspace = new azure.synapse.Workspace("example", {
    name: "example",
    resourceGroupName: example.name,
    location: example.location,
    storageDataLakeGen2FilesystemId: exampleDataLakeGen2Filesystem.id,
    sqlAdministratorLogin: "sqladminuser",
    sqlAdministratorLoginPassword: "H@Sh1CoR3!",
    identity: {
        type: "SystemAssigned",
    },
});
const exampleSparkPool = new azure.synapse.SparkPool("example", {
    name: "example",
    synapseWorkspaceId: exampleWorkspace.id,
    nodeSizeFamily: "MemoryOptimized",
    nodeSize: "Small",
    cacheSize: 100,
    autoScale: {
        maxNodeCount: 50,
        minNodeCount: 3,
    },
    autoPause: {
        delayInMinutes: 15,
    },
    libraryRequirement: {
        content: `appnope==0.1.0
beautifulsoup4==4.6.3
`,
        filename: "requirements.txt",
    },
    sparkConfig: {
        content: "spark.shuffle.spill                true\n",
        filename: "config.txt",
    },
    sparkVersion: "3.2",
    tags: {
        ENV: "Production",
    },
});
import pulumi
import pulumi_azure as azure
example = azure.core.ResourceGroup("example",
    name="example-resources",
    location="West Europe")
example_account = azure.storage.Account("example",
    name="examplestorageacc",
    resource_group_name=example.name,
    location=example.location,
    account_tier="Standard",
    account_replication_type="LRS",
    account_kind="StorageV2",
    is_hns_enabled=True)
example_data_lake_gen2_filesystem = azure.storage.DataLakeGen2Filesystem("example",
    name="example",
    storage_account_id=example_account.id)
example_workspace = azure.synapse.Workspace("example",
    name="example",
    resource_group_name=example.name,
    location=example.location,
    storage_data_lake_gen2_filesystem_id=example_data_lake_gen2_filesystem.id,
    sql_administrator_login="sqladminuser",
    sql_administrator_login_password="H@Sh1CoR3!",
    identity={
        "type": "SystemAssigned",
    })
example_spark_pool = azure.synapse.SparkPool("example",
    name="example",
    synapse_workspace_id=example_workspace.id,
    node_size_family="MemoryOptimized",
    node_size="Small",
    cache_size=100,
    auto_scale={
        "max_node_count": 50,
        "min_node_count": 3,
    },
    auto_pause={
        "delay_in_minutes": 15,
    },
    library_requirement={
        "content": """appnope==0.1.0
beautifulsoup4==4.6.3
""",
        "filename": "requirements.txt",
    },
    spark_config={
        "content": "spark.shuffle.spill                true\n",
        "filename": "config.txt",
    },
    spark_version="3.2",
    tags={
        "ENV": "Production",
    })
package main
import (
	"github.com/pulumi/pulumi-azure/sdk/v6/go/azure/core"
	"github.com/pulumi/pulumi-azure/sdk/v6/go/azure/storage"
	"github.com/pulumi/pulumi-azure/sdk/v6/go/azure/synapse"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		example, err := core.NewResourceGroup(ctx, "example", &core.ResourceGroupArgs{
			Name:     pulumi.String("example-resources"),
			Location: pulumi.String("West Europe"),
		})
		if err != nil {
			return err
		}
		exampleAccount, err := storage.NewAccount(ctx, "example", &storage.AccountArgs{
			Name:                   pulumi.String("examplestorageacc"),
			ResourceGroupName:      example.Name,
			Location:               example.Location,
			AccountTier:            pulumi.String("Standard"),
			AccountReplicationType: pulumi.String("LRS"),
			AccountKind:            pulumi.String("StorageV2"),
			IsHnsEnabled:           pulumi.Bool(true),
		})
		if err != nil {
			return err
		}
		exampleDataLakeGen2Filesystem, err := storage.NewDataLakeGen2Filesystem(ctx, "example", &storage.DataLakeGen2FilesystemArgs{
			Name:             pulumi.String("example"),
			StorageAccountId: exampleAccount.ID(),
		})
		if err != nil {
			return err
		}
		exampleWorkspace, err := synapse.NewWorkspace(ctx, "example", &synapse.WorkspaceArgs{
			Name:                            pulumi.String("example"),
			ResourceGroupName:               example.Name,
			Location:                        example.Location,
			StorageDataLakeGen2FilesystemId: exampleDataLakeGen2Filesystem.ID(),
			SqlAdministratorLogin:           pulumi.String("sqladminuser"),
			SqlAdministratorLoginPassword:   pulumi.String("H@Sh1CoR3!"),
			Identity: &synapse.WorkspaceIdentityArgs{
				Type: pulumi.String("SystemAssigned"),
			},
		})
		if err != nil {
			return err
		}
		_, err = synapse.NewSparkPool(ctx, "example", &synapse.SparkPoolArgs{
			Name:               pulumi.String("example"),
			SynapseWorkspaceId: exampleWorkspace.ID(),
			NodeSizeFamily:     pulumi.String("MemoryOptimized"),
			NodeSize:           pulumi.String("Small"),
			CacheSize:          pulumi.Int(100),
			AutoScale: &synapse.SparkPoolAutoScaleArgs{
				MaxNodeCount: pulumi.Int(50),
				MinNodeCount: pulumi.Int(3),
			},
			AutoPause: &synapse.SparkPoolAutoPauseArgs{
				DelayInMinutes: pulumi.Int(15),
			},
			LibraryRequirement: &synapse.SparkPoolLibraryRequirementArgs{
				Content:  pulumi.String("appnope==0.1.0\nbeautifulsoup4==4.6.3\n"),
				Filename: pulumi.String("requirements.txt"),
			},
			SparkConfig: &synapse.SparkPoolSparkConfigArgs{
				Content:  pulumi.String("spark.shuffle.spill                true\n"),
				Filename: pulumi.String("config.txt"),
			},
			SparkVersion: pulumi.String("3.2"),
			Tags: pulumi.StringMap{
				"ENV": pulumi.String("Production"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Azure = Pulumi.Azure;
return await Deployment.RunAsync(() => 
{
    var example = new Azure.Core.ResourceGroup("example", new()
    {
        Name = "example-resources",
        Location = "West Europe",
    });
    var exampleAccount = new Azure.Storage.Account("example", new()
    {
        Name = "examplestorageacc",
        ResourceGroupName = example.Name,
        Location = example.Location,
        AccountTier = "Standard",
        AccountReplicationType = "LRS",
        AccountKind = "StorageV2",
        IsHnsEnabled = true,
    });
    var exampleDataLakeGen2Filesystem = new Azure.Storage.DataLakeGen2Filesystem("example", new()
    {
        Name = "example",
        StorageAccountId = exampleAccount.Id,
    });
    var exampleWorkspace = new Azure.Synapse.Workspace("example", new()
    {
        Name = "example",
        ResourceGroupName = example.Name,
        Location = example.Location,
        StorageDataLakeGen2FilesystemId = exampleDataLakeGen2Filesystem.Id,
        SqlAdministratorLogin = "sqladminuser",
        SqlAdministratorLoginPassword = "H@Sh1CoR3!",
        Identity = new Azure.Synapse.Inputs.WorkspaceIdentityArgs
        {
            Type = "SystemAssigned",
        },
    });
    var exampleSparkPool = new Azure.Synapse.SparkPool("example", new()
    {
        Name = "example",
        SynapseWorkspaceId = exampleWorkspace.Id,
        NodeSizeFamily = "MemoryOptimized",
        NodeSize = "Small",
        CacheSize = 100,
        AutoScale = new Azure.Synapse.Inputs.SparkPoolAutoScaleArgs
        {
            MaxNodeCount = 50,
            MinNodeCount = 3,
        },
        AutoPause = new Azure.Synapse.Inputs.SparkPoolAutoPauseArgs
        {
            DelayInMinutes = 15,
        },
        LibraryRequirement = new Azure.Synapse.Inputs.SparkPoolLibraryRequirementArgs
        {
            Content = @"appnope==0.1.0
beautifulsoup4==4.6.3
",
            Filename = "requirements.txt",
        },
        SparkConfig = new Azure.Synapse.Inputs.SparkPoolSparkConfigArgs
        {
            Content = @"spark.shuffle.spill                true
",
            Filename = "config.txt",
        },
        SparkVersion = "3.2",
        Tags = 
        {
            { "ENV", "Production" },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azure.core.ResourceGroup;
import com.pulumi.azure.core.ResourceGroupArgs;
import com.pulumi.azure.storage.Account;
import com.pulumi.azure.storage.AccountArgs;
import com.pulumi.azure.storage.DataLakeGen2Filesystem;
import com.pulumi.azure.storage.DataLakeGen2FilesystemArgs;
import com.pulumi.azure.synapse.Workspace;
import com.pulumi.azure.synapse.WorkspaceArgs;
import com.pulumi.azure.synapse.inputs.WorkspaceIdentityArgs;
import com.pulumi.azure.synapse.SparkPool;
import com.pulumi.azure.synapse.SparkPoolArgs;
import com.pulumi.azure.synapse.inputs.SparkPoolAutoScaleArgs;
import com.pulumi.azure.synapse.inputs.SparkPoolAutoPauseArgs;
import com.pulumi.azure.synapse.inputs.SparkPoolLibraryRequirementArgs;
import com.pulumi.azure.synapse.inputs.SparkPoolSparkConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var example = new ResourceGroup("example", ResourceGroupArgs.builder()
            .name("example-resources")
            .location("West Europe")
            .build());
        var exampleAccount = new Account("exampleAccount", AccountArgs.builder()
            .name("examplestorageacc")
            .resourceGroupName(example.name())
            .location(example.location())
            .accountTier("Standard")
            .accountReplicationType("LRS")
            .accountKind("StorageV2")
            .isHnsEnabled("true")
            .build());
        var exampleDataLakeGen2Filesystem = new DataLakeGen2Filesystem("exampleDataLakeGen2Filesystem", DataLakeGen2FilesystemArgs.builder()
            .name("example")
            .storageAccountId(exampleAccount.id())
            .build());
        var exampleWorkspace = new Workspace("exampleWorkspace", WorkspaceArgs.builder()
            .name("example")
            .resourceGroupName(example.name())
            .location(example.location())
            .storageDataLakeGen2FilesystemId(exampleDataLakeGen2Filesystem.id())
            .sqlAdministratorLogin("sqladminuser")
            .sqlAdministratorLoginPassword("H@Sh1CoR3!")
            .identity(WorkspaceIdentityArgs.builder()
                .type("SystemAssigned")
                .build())
            .build());
        var exampleSparkPool = new SparkPool("exampleSparkPool", SparkPoolArgs.builder()
            .name("example")
            .synapseWorkspaceId(exampleWorkspace.id())
            .nodeSizeFamily("MemoryOptimized")
            .nodeSize("Small")
            .cacheSize(100)
            .autoScale(SparkPoolAutoScaleArgs.builder()
                .maxNodeCount(50)
                .minNodeCount(3)
                .build())
            .autoPause(SparkPoolAutoPauseArgs.builder()
                .delayInMinutes(15)
                .build())
            .libraryRequirement(SparkPoolLibraryRequirementArgs.builder()
                .content("""
appnope==0.1.0
beautifulsoup4==4.6.3
                """)
                .filename("requirements.txt")
                .build())
            .sparkConfig(SparkPoolSparkConfigArgs.builder()
                .content("""
spark.shuffle.spill                true
                """)
                .filename("config.txt")
                .build())
            .sparkVersion(3.2)
            .tags(Map.of("ENV", "Production"))
            .build());
    }
}
resources:
  example:
    type: azure:core:ResourceGroup
    properties:
      name: example-resources
      location: West Europe
  exampleAccount:
    type: azure:storage:Account
    name: example
    properties:
      name: examplestorageacc
      resourceGroupName: ${example.name}
      location: ${example.location}
      accountTier: Standard
      accountReplicationType: LRS
      accountKind: StorageV2
      isHnsEnabled: 'true'
  exampleDataLakeGen2Filesystem:
    type: azure:storage:DataLakeGen2Filesystem
    name: example
    properties:
      name: example
      storageAccountId: ${exampleAccount.id}
  exampleWorkspace:
    type: azure:synapse:Workspace
    name: example
    properties:
      name: example
      resourceGroupName: ${example.name}
      location: ${example.location}
      storageDataLakeGen2FilesystemId: ${exampleDataLakeGen2Filesystem.id}
      sqlAdministratorLogin: sqladminuser
      sqlAdministratorLoginPassword: H@Sh1CoR3!
      identity:
        type: SystemAssigned
  exampleSparkPool:
    type: azure:synapse:SparkPool
    name: example
    properties:
      name: example
      synapseWorkspaceId: ${exampleWorkspace.id}
      nodeSizeFamily: MemoryOptimized
      nodeSize: Small
      cacheSize: 100
      autoScale:
        maxNodeCount: 50
        minNodeCount: 3
      autoPause:
        delayInMinutes: 15
      libraryRequirement:
        content: |
          appnope==0.1.0
          beautifulsoup4==4.6.3          
        filename: requirements.txt
      sparkConfig:
        content: |
          spark.shuffle.spill                true          
        filename: config.txt
      sparkVersion: 3.2
      tags:
        ENV: Production
Create SparkPool Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new SparkPool(name: string, args: SparkPoolArgs, opts?: CustomResourceOptions);@overload
def SparkPool(resource_name: str,
              args: SparkPoolArgs,
              opts: Optional[ResourceOptions] = None)
@overload
def SparkPool(resource_name: str,
              opts: Optional[ResourceOptions] = None,
              node_size: Optional[str] = None,
              synapse_workspace_id: Optional[str] = None,
              spark_version: Optional[str] = None,
              node_size_family: Optional[str] = None,
              name: Optional[str] = None,
              compute_isolation_enabled: Optional[bool] = None,
              max_executors: Optional[int] = None,
              min_executors: Optional[int] = None,
              auto_pause: Optional[SparkPoolAutoPauseArgs] = None,
              node_count: Optional[int] = None,
              dynamic_executor_allocation_enabled: Optional[bool] = None,
              library_requirement: Optional[SparkPoolLibraryRequirementArgs] = None,
              session_level_packages_enabled: Optional[bool] = None,
              spark_config: Optional[SparkPoolSparkConfigArgs] = None,
              spark_events_folder: Optional[str] = None,
              spark_log_folder: Optional[str] = None,
              cache_size: Optional[int] = None,
              auto_scale: Optional[SparkPoolAutoScaleArgs] = None,
              tags: Optional[Mapping[str, str]] = None)func NewSparkPool(ctx *Context, name string, args SparkPoolArgs, opts ...ResourceOption) (*SparkPool, error)public SparkPool(string name, SparkPoolArgs args, CustomResourceOptions? opts = null)
public SparkPool(String name, SparkPoolArgs args)
public SparkPool(String name, SparkPoolArgs args, CustomResourceOptions options)
type: azure:synapse:SparkPool
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args SparkPoolArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args SparkPoolArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args SparkPoolArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args SparkPoolArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args SparkPoolArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var sparkPoolResource = new Azure.Synapse.SparkPool("sparkPoolResource", new()
{
    NodeSize = "string",
    SynapseWorkspaceId = "string",
    SparkVersion = "string",
    NodeSizeFamily = "string",
    Name = "string",
    ComputeIsolationEnabled = false,
    MaxExecutors = 0,
    MinExecutors = 0,
    AutoPause = new Azure.Synapse.Inputs.SparkPoolAutoPauseArgs
    {
        DelayInMinutes = 0,
    },
    NodeCount = 0,
    DynamicExecutorAllocationEnabled = false,
    LibraryRequirement = new Azure.Synapse.Inputs.SparkPoolLibraryRequirementArgs
    {
        Content = "string",
        Filename = "string",
    },
    SessionLevelPackagesEnabled = false,
    SparkConfig = new Azure.Synapse.Inputs.SparkPoolSparkConfigArgs
    {
        Content = "string",
        Filename = "string",
    },
    SparkEventsFolder = "string",
    SparkLogFolder = "string",
    CacheSize = 0,
    AutoScale = new Azure.Synapse.Inputs.SparkPoolAutoScaleArgs
    {
        MaxNodeCount = 0,
        MinNodeCount = 0,
    },
    Tags = 
    {
        { "string", "string" },
    },
});
example, err := synapse.NewSparkPool(ctx, "sparkPoolResource", &synapse.SparkPoolArgs{
	NodeSize:                pulumi.String("string"),
	SynapseWorkspaceId:      pulumi.String("string"),
	SparkVersion:            pulumi.String("string"),
	NodeSizeFamily:          pulumi.String("string"),
	Name:                    pulumi.String("string"),
	ComputeIsolationEnabled: pulumi.Bool(false),
	MaxExecutors:            pulumi.Int(0),
	MinExecutors:            pulumi.Int(0),
	AutoPause: &synapse.SparkPoolAutoPauseArgs{
		DelayInMinutes: pulumi.Int(0),
	},
	NodeCount:                        pulumi.Int(0),
	DynamicExecutorAllocationEnabled: pulumi.Bool(false),
	LibraryRequirement: &synapse.SparkPoolLibraryRequirementArgs{
		Content:  pulumi.String("string"),
		Filename: pulumi.String("string"),
	},
	SessionLevelPackagesEnabled: pulumi.Bool(false),
	SparkConfig: &synapse.SparkPoolSparkConfigArgs{
		Content:  pulumi.String("string"),
		Filename: pulumi.String("string"),
	},
	SparkEventsFolder: pulumi.String("string"),
	SparkLogFolder:    pulumi.String("string"),
	CacheSize:         pulumi.Int(0),
	AutoScale: &synapse.SparkPoolAutoScaleArgs{
		MaxNodeCount: pulumi.Int(0),
		MinNodeCount: pulumi.Int(0),
	},
	Tags: pulumi.StringMap{
		"string": pulumi.String("string"),
	},
})
var sparkPoolResource = new SparkPool("sparkPoolResource", SparkPoolArgs.builder()
    .nodeSize("string")
    .synapseWorkspaceId("string")
    .sparkVersion("string")
    .nodeSizeFamily("string")
    .name("string")
    .computeIsolationEnabled(false)
    .maxExecutors(0)
    .minExecutors(0)
    .autoPause(SparkPoolAutoPauseArgs.builder()
        .delayInMinutes(0)
        .build())
    .nodeCount(0)
    .dynamicExecutorAllocationEnabled(false)
    .libraryRequirement(SparkPoolLibraryRequirementArgs.builder()
        .content("string")
        .filename("string")
        .build())
    .sessionLevelPackagesEnabled(false)
    .sparkConfig(SparkPoolSparkConfigArgs.builder()
        .content("string")
        .filename("string")
        .build())
    .sparkEventsFolder("string")
    .sparkLogFolder("string")
    .cacheSize(0)
    .autoScale(SparkPoolAutoScaleArgs.builder()
        .maxNodeCount(0)
        .minNodeCount(0)
        .build())
    .tags(Map.of("string", "string"))
    .build());
spark_pool_resource = azure.synapse.SparkPool("sparkPoolResource",
    node_size="string",
    synapse_workspace_id="string",
    spark_version="string",
    node_size_family="string",
    name="string",
    compute_isolation_enabled=False,
    max_executors=0,
    min_executors=0,
    auto_pause={
        "delay_in_minutes": 0,
    },
    node_count=0,
    dynamic_executor_allocation_enabled=False,
    library_requirement={
        "content": "string",
        "filename": "string",
    },
    session_level_packages_enabled=False,
    spark_config={
        "content": "string",
        "filename": "string",
    },
    spark_events_folder="string",
    spark_log_folder="string",
    cache_size=0,
    auto_scale={
        "max_node_count": 0,
        "min_node_count": 0,
    },
    tags={
        "string": "string",
    })
const sparkPoolResource = new azure.synapse.SparkPool("sparkPoolResource", {
    nodeSize: "string",
    synapseWorkspaceId: "string",
    sparkVersion: "string",
    nodeSizeFamily: "string",
    name: "string",
    computeIsolationEnabled: false,
    maxExecutors: 0,
    minExecutors: 0,
    autoPause: {
        delayInMinutes: 0,
    },
    nodeCount: 0,
    dynamicExecutorAllocationEnabled: false,
    libraryRequirement: {
        content: "string",
        filename: "string",
    },
    sessionLevelPackagesEnabled: false,
    sparkConfig: {
        content: "string",
        filename: "string",
    },
    sparkEventsFolder: "string",
    sparkLogFolder: "string",
    cacheSize: 0,
    autoScale: {
        maxNodeCount: 0,
        minNodeCount: 0,
    },
    tags: {
        string: "string",
    },
});
type: azure:synapse:SparkPool
properties:
    autoPause:
        delayInMinutes: 0
    autoScale:
        maxNodeCount: 0
        minNodeCount: 0
    cacheSize: 0
    computeIsolationEnabled: false
    dynamicExecutorAllocationEnabled: false
    libraryRequirement:
        content: string
        filename: string
    maxExecutors: 0
    minExecutors: 0
    name: string
    nodeCount: 0
    nodeSize: string
    nodeSizeFamily: string
    sessionLevelPackagesEnabled: false
    sparkConfig:
        content: string
        filename: string
    sparkEventsFolder: string
    sparkLogFolder: string
    sparkVersion: string
    synapseWorkspaceId: string
    tags:
        string: string
SparkPool Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The SparkPool resource accepts the following input properties:
- NodeSize string
- The level of node in the Spark Pool. Possible values are Small,Medium,Large,None,XLarge,XXLargeandXXXLarge.
- NodeSize stringFamily 
- The kind of nodes that the Spark Pool provides. Possible values are HardwareAcceleratedFPGA,HardwareAcceleratedGPU,MemoryOptimized, andNone.
- SparkVersion string
- The Apache Spark version. Possible values are 3.2,3.3, and3.4.
- SynapseWorkspace stringId 
- The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
- AutoPause SparkPool Auto Pause 
- An auto_pauseblock as defined below.
- AutoScale SparkPool Auto Scale 
- An auto_scaleblock as defined below. Exactly one ofnode_countorauto_scalemust be specified.
- CacheSize int
- The cache size in the Spark Pool.
- ComputeIsolation boolEnabled 
- Indicates whether compute isolation is enabled or not. Defaults to false.
- DynamicExecutor boolAllocation Enabled 
- LibraryRequirement SparkPool Library Requirement 
- MaxExecutors int
- MinExecutors int
- Name string
- The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
- NodeCount int
- The number of nodes in the Spark Pool. Exactly one of node_countorauto_scalemust be specified.
- SessionLevel boolPackages Enabled 
- SparkConfig SparkPool Spark Config 
- SparkEvents stringFolder 
- SparkLog stringFolder 
- Dictionary<string, string>
- NodeSize string
- The level of node in the Spark Pool. Possible values are Small,Medium,Large,None,XLarge,XXLargeandXXXLarge.
- NodeSize stringFamily 
- The kind of nodes that the Spark Pool provides. Possible values are HardwareAcceleratedFPGA,HardwareAcceleratedGPU,MemoryOptimized, andNone.
- SparkVersion string
- The Apache Spark version. Possible values are 3.2,3.3, and3.4.
- SynapseWorkspace stringId 
- The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
- AutoPause SparkPool Auto Pause Args 
- An auto_pauseblock as defined below.
- AutoScale SparkPool Auto Scale Args 
- An auto_scaleblock as defined below. Exactly one ofnode_countorauto_scalemust be specified.
- CacheSize int
- The cache size in the Spark Pool.
- ComputeIsolation boolEnabled 
- Indicates whether compute isolation is enabled or not. Defaults to false.
- DynamicExecutor boolAllocation Enabled 
- LibraryRequirement SparkPool Library Requirement Args 
- MaxExecutors int
- MinExecutors int
- Name string
- The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
- NodeCount int
- The number of nodes in the Spark Pool. Exactly one of node_countorauto_scalemust be specified.
- SessionLevel boolPackages Enabled 
- SparkConfig SparkPool Spark Config Args 
- SparkEvents stringFolder 
- SparkLog stringFolder 
- map[string]string
- nodeSize String
- The level of node in the Spark Pool. Possible values are Small,Medium,Large,None,XLarge,XXLargeandXXXLarge.
- nodeSize StringFamily 
- The kind of nodes that the Spark Pool provides. Possible values are HardwareAcceleratedFPGA,HardwareAcceleratedGPU,MemoryOptimized, andNone.
- sparkVersion String
- The Apache Spark version. Possible values are 3.2,3.3, and3.4.
- synapseWorkspace StringId 
- The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
- autoPause SparkPool Auto Pause 
- An auto_pauseblock as defined below.
- autoScale SparkPool Auto Scale 
- An auto_scaleblock as defined below. Exactly one ofnode_countorauto_scalemust be specified.
- cacheSize Integer
- The cache size in the Spark Pool.
- computeIsolation BooleanEnabled 
- Indicates whether compute isolation is enabled or not. Defaults to false.
- dynamicExecutor BooleanAllocation Enabled 
- libraryRequirement SparkPool Library Requirement 
- maxExecutors Integer
- minExecutors Integer
- name String
- The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
- nodeCount Integer
- The number of nodes in the Spark Pool. Exactly one of node_countorauto_scalemust be specified.
- sessionLevel BooleanPackages Enabled 
- sparkConfig SparkPool Spark Config 
- sparkEvents StringFolder 
- sparkLog StringFolder 
- Map<String,String>
- nodeSize string
- The level of node in the Spark Pool. Possible values are Small,Medium,Large,None,XLarge,XXLargeandXXXLarge.
- nodeSize stringFamily 
- The kind of nodes that the Spark Pool provides. Possible values are HardwareAcceleratedFPGA,HardwareAcceleratedGPU,MemoryOptimized, andNone.
- sparkVersion string
- The Apache Spark version. Possible values are 3.2,3.3, and3.4.
- synapseWorkspace stringId 
- The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
- autoPause SparkPool Auto Pause 
- An auto_pauseblock as defined below.
- autoScale SparkPool Auto Scale 
- An auto_scaleblock as defined below. Exactly one ofnode_countorauto_scalemust be specified.
- cacheSize number
- The cache size in the Spark Pool.
- computeIsolation booleanEnabled 
- Indicates whether compute isolation is enabled or not. Defaults to false.
- dynamicExecutor booleanAllocation Enabled 
- libraryRequirement SparkPool Library Requirement 
- maxExecutors number
- minExecutors number
- name string
- The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
- nodeCount number
- The number of nodes in the Spark Pool. Exactly one of node_countorauto_scalemust be specified.
- sessionLevel booleanPackages Enabled 
- sparkConfig SparkPool Spark Config 
- sparkEvents stringFolder 
- sparkLog stringFolder 
- {[key: string]: string}
- node_size str
- The level of node in the Spark Pool. Possible values are Small,Medium,Large,None,XLarge,XXLargeandXXXLarge.
- node_size_ strfamily 
- The kind of nodes that the Spark Pool provides. Possible values are HardwareAcceleratedFPGA,HardwareAcceleratedGPU,MemoryOptimized, andNone.
- spark_version str
- The Apache Spark version. Possible values are 3.2,3.3, and3.4.
- synapse_workspace_ strid 
- The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
- auto_pause SparkPool Auto Pause Args 
- An auto_pauseblock as defined below.
- auto_scale SparkPool Auto Scale Args 
- An auto_scaleblock as defined below. Exactly one ofnode_countorauto_scalemust be specified.
- cache_size int
- The cache size in the Spark Pool.
- compute_isolation_ boolenabled 
- Indicates whether compute isolation is enabled or not. Defaults to false.
- dynamic_executor_ boolallocation_ enabled 
- library_requirement SparkPool Library Requirement Args 
- max_executors int
- min_executors int
- name str
- The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
- node_count int
- The number of nodes in the Spark Pool. Exactly one of node_countorauto_scalemust be specified.
- session_level_ boolpackages_ enabled 
- spark_config SparkPool Spark Config Args 
- spark_events_ strfolder 
- spark_log_ strfolder 
- Mapping[str, str]
- nodeSize String
- The level of node in the Spark Pool. Possible values are Small,Medium,Large,None,XLarge,XXLargeandXXXLarge.
- nodeSize StringFamily 
- The kind of nodes that the Spark Pool provides. Possible values are HardwareAcceleratedFPGA,HardwareAcceleratedGPU,MemoryOptimized, andNone.
- sparkVersion String
- The Apache Spark version. Possible values are 3.2,3.3, and3.4.
- synapseWorkspace StringId 
- The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
- autoPause Property Map
- An auto_pauseblock as defined below.
- autoScale Property Map
- An auto_scaleblock as defined below. Exactly one ofnode_countorauto_scalemust be specified.
- cacheSize Number
- The cache size in the Spark Pool.
- computeIsolation BooleanEnabled 
- Indicates whether compute isolation is enabled or not. Defaults to false.
- dynamicExecutor BooleanAllocation Enabled 
- libraryRequirement Property Map
- maxExecutors Number
- minExecutors Number
- name String
- The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
- nodeCount Number
- The number of nodes in the Spark Pool. Exactly one of node_countorauto_scalemust be specified.
- sessionLevel BooleanPackages Enabled 
- sparkConfig Property Map
- sparkEvents StringFolder 
- sparkLog StringFolder 
- Map<String>
Outputs
All input properties are implicitly available as output properties. Additionally, the SparkPool resource produces the following output properties:
- Id string
- The provider-assigned unique ID for this managed resource.
- Id string
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
- id string
- The provider-assigned unique ID for this managed resource.
- id str
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
Look up Existing SparkPool Resource
Get an existing SparkPool resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: SparkPoolState, opts?: CustomResourceOptions): SparkPool@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        auto_pause: Optional[SparkPoolAutoPauseArgs] = None,
        auto_scale: Optional[SparkPoolAutoScaleArgs] = None,
        cache_size: Optional[int] = None,
        compute_isolation_enabled: Optional[bool] = None,
        dynamic_executor_allocation_enabled: Optional[bool] = None,
        library_requirement: Optional[SparkPoolLibraryRequirementArgs] = None,
        max_executors: Optional[int] = None,
        min_executors: Optional[int] = None,
        name: Optional[str] = None,
        node_count: Optional[int] = None,
        node_size: Optional[str] = None,
        node_size_family: Optional[str] = None,
        session_level_packages_enabled: Optional[bool] = None,
        spark_config: Optional[SparkPoolSparkConfigArgs] = None,
        spark_events_folder: Optional[str] = None,
        spark_log_folder: Optional[str] = None,
        spark_version: Optional[str] = None,
        synapse_workspace_id: Optional[str] = None,
        tags: Optional[Mapping[str, str]] = None) -> SparkPoolfunc GetSparkPool(ctx *Context, name string, id IDInput, state *SparkPoolState, opts ...ResourceOption) (*SparkPool, error)public static SparkPool Get(string name, Input<string> id, SparkPoolState? state, CustomResourceOptions? opts = null)public static SparkPool get(String name, Output<String> id, SparkPoolState state, CustomResourceOptions options)resources:  _:    type: azure:synapse:SparkPool    get:      id: ${id}- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- AutoPause SparkPool Auto Pause 
- An auto_pauseblock as defined below.
- AutoScale SparkPool Auto Scale 
- An auto_scaleblock as defined below. Exactly one ofnode_countorauto_scalemust be specified.
- CacheSize int
- The cache size in the Spark Pool.
- ComputeIsolation boolEnabled 
- Indicates whether compute isolation is enabled or not. Defaults to false.
- DynamicExecutor boolAllocation Enabled 
- LibraryRequirement SparkPool Library Requirement 
- MaxExecutors int
- MinExecutors int
- Name string
- The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
- NodeCount int
- The number of nodes in the Spark Pool. Exactly one of node_countorauto_scalemust be specified.
- NodeSize string
- The level of node in the Spark Pool. Possible values are Small,Medium,Large,None,XLarge,XXLargeandXXXLarge.
- NodeSize stringFamily 
- The kind of nodes that the Spark Pool provides. Possible values are HardwareAcceleratedFPGA,HardwareAcceleratedGPU,MemoryOptimized, andNone.
- SessionLevel boolPackages Enabled 
- SparkConfig SparkPool Spark Config 
- SparkEvents stringFolder 
- SparkLog stringFolder 
- SparkVersion string
- The Apache Spark version. Possible values are 3.2,3.3, and3.4.
- SynapseWorkspace stringId 
- The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
- Dictionary<string, string>
- AutoPause SparkPool Auto Pause Args 
- An auto_pauseblock as defined below.
- AutoScale SparkPool Auto Scale Args 
- An auto_scaleblock as defined below. Exactly one ofnode_countorauto_scalemust be specified.
- CacheSize int
- The cache size in the Spark Pool.
- ComputeIsolation boolEnabled 
- Indicates whether compute isolation is enabled or not. Defaults to false.
- DynamicExecutor boolAllocation Enabled 
- LibraryRequirement SparkPool Library Requirement Args 
- MaxExecutors int
- MinExecutors int
- Name string
- The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
- NodeCount int
- The number of nodes in the Spark Pool. Exactly one of node_countorauto_scalemust be specified.
- NodeSize string
- The level of node in the Spark Pool. Possible values are Small,Medium,Large,None,XLarge,XXLargeandXXXLarge.
- NodeSize stringFamily 
- The kind of nodes that the Spark Pool provides. Possible values are HardwareAcceleratedFPGA,HardwareAcceleratedGPU,MemoryOptimized, andNone.
- SessionLevel boolPackages Enabled 
- SparkConfig SparkPool Spark Config Args 
- SparkEvents stringFolder 
- SparkLog stringFolder 
- SparkVersion string
- The Apache Spark version. Possible values are 3.2,3.3, and3.4.
- SynapseWorkspace stringId 
- The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
- map[string]string
- autoPause SparkPool Auto Pause 
- An auto_pauseblock as defined below.
- autoScale SparkPool Auto Scale 
- An auto_scaleblock as defined below. Exactly one ofnode_countorauto_scalemust be specified.
- cacheSize Integer
- The cache size in the Spark Pool.
- computeIsolation BooleanEnabled 
- Indicates whether compute isolation is enabled or not. Defaults to false.
- dynamicExecutor BooleanAllocation Enabled 
- libraryRequirement SparkPool Library Requirement 
- maxExecutors Integer
- minExecutors Integer
- name String
- The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
- nodeCount Integer
- The number of nodes in the Spark Pool. Exactly one of node_countorauto_scalemust be specified.
- nodeSize String
- The level of node in the Spark Pool. Possible values are Small,Medium,Large,None,XLarge,XXLargeandXXXLarge.
- nodeSize StringFamily 
- The kind of nodes that the Spark Pool provides. Possible values are HardwareAcceleratedFPGA,HardwareAcceleratedGPU,MemoryOptimized, andNone.
- sessionLevel BooleanPackages Enabled 
- sparkConfig SparkPool Spark Config 
- sparkEvents StringFolder 
- sparkLog StringFolder 
- sparkVersion String
- The Apache Spark version. Possible values are 3.2,3.3, and3.4.
- synapseWorkspace StringId 
- The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
- Map<String,String>
- autoPause SparkPool Auto Pause 
- An auto_pauseblock as defined below.
- autoScale SparkPool Auto Scale 
- An auto_scaleblock as defined below. Exactly one ofnode_countorauto_scalemust be specified.
- cacheSize number
- The cache size in the Spark Pool.
- computeIsolation booleanEnabled 
- Indicates whether compute isolation is enabled or not. Defaults to false.
- dynamicExecutor booleanAllocation Enabled 
- libraryRequirement SparkPool Library Requirement 
- maxExecutors number
- minExecutors number
- name string
- The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
- nodeCount number
- The number of nodes in the Spark Pool. Exactly one of node_countorauto_scalemust be specified.
- nodeSize string
- The level of node in the Spark Pool. Possible values are Small,Medium,Large,None,XLarge,XXLargeandXXXLarge.
- nodeSize stringFamily 
- The kind of nodes that the Spark Pool provides. Possible values are HardwareAcceleratedFPGA,HardwareAcceleratedGPU,MemoryOptimized, andNone.
- sessionLevel booleanPackages Enabled 
- sparkConfig SparkPool Spark Config 
- sparkEvents stringFolder 
- sparkLog stringFolder 
- sparkVersion string
- The Apache Spark version. Possible values are 3.2,3.3, and3.4.
- synapseWorkspace stringId 
- The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
- {[key: string]: string}
- auto_pause SparkPool Auto Pause Args 
- An auto_pauseblock as defined below.
- auto_scale SparkPool Auto Scale Args 
- An auto_scaleblock as defined below. Exactly one ofnode_countorauto_scalemust be specified.
- cache_size int
- The cache size in the Spark Pool.
- compute_isolation_ boolenabled 
- Indicates whether compute isolation is enabled or not. Defaults to false.
- dynamic_executor_ boolallocation_ enabled 
- library_requirement SparkPool Library Requirement Args 
- max_executors int
- min_executors int
- name str
- The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
- node_count int
- The number of nodes in the Spark Pool. Exactly one of node_countorauto_scalemust be specified.
- node_size str
- The level of node in the Spark Pool. Possible values are Small,Medium,Large,None,XLarge,XXLargeandXXXLarge.
- node_size_ strfamily 
- The kind of nodes that the Spark Pool provides. Possible values are HardwareAcceleratedFPGA,HardwareAcceleratedGPU,MemoryOptimized, andNone.
- session_level_ boolpackages_ enabled 
- spark_config SparkPool Spark Config Args 
- spark_events_ strfolder 
- spark_log_ strfolder 
- spark_version str
- The Apache Spark version. Possible values are 3.2,3.3, and3.4.
- synapse_workspace_ strid 
- The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
- Mapping[str, str]
- autoPause Property Map
- An auto_pauseblock as defined below.
- autoScale Property Map
- An auto_scaleblock as defined below. Exactly one ofnode_countorauto_scalemust be specified.
- cacheSize Number
- The cache size in the Spark Pool.
- computeIsolation BooleanEnabled 
- Indicates whether compute isolation is enabled or not. Defaults to false.
- dynamicExecutor BooleanAllocation Enabled 
- libraryRequirement Property Map
- maxExecutors Number
- minExecutors Number
- name String
- The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
- nodeCount Number
- The number of nodes in the Spark Pool. Exactly one of node_countorauto_scalemust be specified.
- nodeSize String
- The level of node in the Spark Pool. Possible values are Small,Medium,Large,None,XLarge,XXLargeandXXXLarge.
- nodeSize StringFamily 
- The kind of nodes that the Spark Pool provides. Possible values are HardwareAcceleratedFPGA,HardwareAcceleratedGPU,MemoryOptimized, andNone.
- sessionLevel BooleanPackages Enabled 
- sparkConfig Property Map
- sparkEvents StringFolder 
- sparkLog StringFolder 
- sparkVersion String
- The Apache Spark version. Possible values are 3.2,3.3, and3.4.
- synapseWorkspace StringId 
- The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
- Map<String>
Supporting Types
SparkPoolAutoPause, SparkPoolAutoPauseArgs        
- DelayIn intMinutes 
- Number of minutes of idle time before the Spark Pool is automatically paused. Must be between 5and10080.
- DelayIn intMinutes 
- Number of minutes of idle time before the Spark Pool is automatically paused. Must be between 5and10080.
- delayIn IntegerMinutes 
- Number of minutes of idle time before the Spark Pool is automatically paused. Must be between 5and10080.
- delayIn numberMinutes 
- Number of minutes of idle time before the Spark Pool is automatically paused. Must be between 5and10080.
- delay_in_ intminutes 
- Number of minutes of idle time before the Spark Pool is automatically paused. Must be between 5and10080.
- delayIn NumberMinutes 
- Number of minutes of idle time before the Spark Pool is automatically paused. Must be between 5and10080.
SparkPoolAutoScale, SparkPoolAutoScaleArgs        
- MaxNode intCount 
- The maximum number of nodes the Spark Pool can support. Must be between 3and200.
- MinNode intCount 
- The minimum number of nodes the Spark Pool can support. Must be between 3and200.
- MaxNode intCount 
- The maximum number of nodes the Spark Pool can support. Must be between 3and200.
- MinNode intCount 
- The minimum number of nodes the Spark Pool can support. Must be between 3and200.
- maxNode IntegerCount 
- The maximum number of nodes the Spark Pool can support. Must be between 3and200.
- minNode IntegerCount 
- The minimum number of nodes the Spark Pool can support. Must be between 3and200.
- maxNode numberCount 
- The maximum number of nodes the Spark Pool can support. Must be between 3and200.
- minNode numberCount 
- The minimum number of nodes the Spark Pool can support. Must be between 3and200.
- max_node_ intcount 
- The maximum number of nodes the Spark Pool can support. Must be between 3and200.
- min_node_ intcount 
- The minimum number of nodes the Spark Pool can support. Must be between 3and200.
- maxNode NumberCount 
- The maximum number of nodes the Spark Pool can support. Must be between 3and200.
- minNode NumberCount 
- The minimum number of nodes the Spark Pool can support. Must be between 3and200.
SparkPoolLibraryRequirement, SparkPoolLibraryRequirementArgs        
SparkPoolSparkConfig, SparkPoolSparkConfigArgs        
Import
Synapse Spark Pool can be imported using the resource id, e.g.
$ pulumi import azure:synapse/sparkPool:SparkPool example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Synapse/workspaces/workspace1/bigDataPools/sparkPool1
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Azure Classic pulumi/pulumi-azure
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the azurermTerraform Provider.