aws.sagemaker.Model
Explore with Pulumi AI
Provides a SageMaker model resource.
Example Usage
Basic usage:
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const assumeRole = aws.iam.getPolicyDocument({
    statements: [{
        actions: ["sts:AssumeRole"],
        principals: [{
            type: "Service",
            identifiers: ["sagemaker.amazonaws.com"],
        }],
    }],
});
const exampleRole = new aws.iam.Role("example", {assumeRolePolicy: assumeRole.then(assumeRole => assumeRole.json)});
const test = aws.sagemaker.getPrebuiltEcrImage({
    repositoryName: "kmeans",
});
const example = new aws.sagemaker.Model("example", {
    name: "my-model",
    executionRoleArn: exampleRole.arn,
    primaryContainer: {
        image: test.then(test => test.registryPath),
    },
});
import pulumi
import pulumi_aws as aws
assume_role = aws.iam.get_policy_document(statements=[{
    "actions": ["sts:AssumeRole"],
    "principals": [{
        "type": "Service",
        "identifiers": ["sagemaker.amazonaws.com"],
    }],
}])
example_role = aws.iam.Role("example", assume_role_policy=assume_role.json)
test = aws.sagemaker.get_prebuilt_ecr_image(repository_name="kmeans")
example = aws.sagemaker.Model("example",
    name="my-model",
    execution_role_arn=example_role.arn,
    primary_container={
        "image": test.registry_path,
    })
package main
import (
	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam"
	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/sagemaker"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		assumeRole, err := iam.GetPolicyDocument(ctx, &iam.GetPolicyDocumentArgs{
			Statements: []iam.GetPolicyDocumentStatement{
				{
					Actions: []string{
						"sts:AssumeRole",
					},
					Principals: []iam.GetPolicyDocumentStatementPrincipal{
						{
							Type: "Service",
							Identifiers: []string{
								"sagemaker.amazonaws.com",
							},
						},
					},
				},
			},
		}, nil)
		if err != nil {
			return err
		}
		exampleRole, err := iam.NewRole(ctx, "example", &iam.RoleArgs{
			AssumeRolePolicy: pulumi.String(assumeRole.Json),
		})
		if err != nil {
			return err
		}
		test, err := sagemaker.GetPrebuiltEcrImage(ctx, &sagemaker.GetPrebuiltEcrImageArgs{
			RepositoryName: "kmeans",
		}, nil)
		if err != nil {
			return err
		}
		_, err = sagemaker.NewModel(ctx, "example", &sagemaker.ModelArgs{
			Name:             pulumi.String("my-model"),
			ExecutionRoleArn: exampleRole.Arn,
			PrimaryContainer: &sagemaker.ModelPrimaryContainerArgs{
				Image: pulumi.String(test.RegistryPath),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() => 
{
    var assumeRole = Aws.Iam.GetPolicyDocument.Invoke(new()
    {
        Statements = new[]
        {
            new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs
            {
                Actions = new[]
                {
                    "sts:AssumeRole",
                },
                Principals = new[]
                {
                    new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs
                    {
                        Type = "Service",
                        Identifiers = new[]
                        {
                            "sagemaker.amazonaws.com",
                        },
                    },
                },
            },
        },
    });
    var exampleRole = new Aws.Iam.Role("example", new()
    {
        AssumeRolePolicy = assumeRole.Apply(getPolicyDocumentResult => getPolicyDocumentResult.Json),
    });
    var test = Aws.Sagemaker.GetPrebuiltEcrImage.Invoke(new()
    {
        RepositoryName = "kmeans",
    });
    var example = new Aws.Sagemaker.Model("example", new()
    {
        Name = "my-model",
        ExecutionRoleArn = exampleRole.Arn,
        PrimaryContainer = new Aws.Sagemaker.Inputs.ModelPrimaryContainerArgs
        {
            Image = test.Apply(getPrebuiltEcrImageResult => getPrebuiltEcrImageResult.RegistryPath),
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.iam.IamFunctions;
import com.pulumi.aws.iam.inputs.GetPolicyDocumentArgs;
import com.pulumi.aws.iam.Role;
import com.pulumi.aws.iam.RoleArgs;
import com.pulumi.aws.sagemaker.SagemakerFunctions;
import com.pulumi.aws.sagemaker.inputs.GetPrebuiltEcrImageArgs;
import com.pulumi.aws.sagemaker.Model;
import com.pulumi.aws.sagemaker.ModelArgs;
import com.pulumi.aws.sagemaker.inputs.ModelPrimaryContainerArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        final var assumeRole = IamFunctions.getPolicyDocument(GetPolicyDocumentArgs.builder()
            .statements(GetPolicyDocumentStatementArgs.builder()
                .actions("sts:AssumeRole")
                .principals(GetPolicyDocumentStatementPrincipalArgs.builder()
                    .type("Service")
                    .identifiers("sagemaker.amazonaws.com")
                    .build())
                .build())
            .build());
        var exampleRole = new Role("exampleRole", RoleArgs.builder()
            .assumeRolePolicy(assumeRole.applyValue(getPolicyDocumentResult -> getPolicyDocumentResult.json()))
            .build());
        final var test = SagemakerFunctions.getPrebuiltEcrImage(GetPrebuiltEcrImageArgs.builder()
            .repositoryName("kmeans")
            .build());
        var example = new Model("example", ModelArgs.builder()
            .name("my-model")
            .executionRoleArn(exampleRole.arn())
            .primaryContainer(ModelPrimaryContainerArgs.builder()
                .image(test.applyValue(getPrebuiltEcrImageResult -> getPrebuiltEcrImageResult.registryPath()))
                .build())
            .build());
    }
}
resources:
  example:
    type: aws:sagemaker:Model
    properties:
      name: my-model
      executionRoleArn: ${exampleRole.arn}
      primaryContainer:
        image: ${test.registryPath}
  exampleRole:
    type: aws:iam:Role
    name: example
    properties:
      assumeRolePolicy: ${assumeRole.json}
variables:
  assumeRole:
    fn::invoke:
      function: aws:iam:getPolicyDocument
      arguments:
        statements:
          - actions:
              - sts:AssumeRole
            principals:
              - type: Service
                identifiers:
                  - sagemaker.amazonaws.com
  test:
    fn::invoke:
      function: aws:sagemaker:getPrebuiltEcrImage
      arguments:
        repositoryName: kmeans
Inference Execution Config
- mode- (Required) How containers in a multi-container are run. The following values are valid- Serialand- Direct.
Create Model Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Model(name: string, args: ModelArgs, opts?: CustomResourceOptions);@overload
def Model(resource_name: str,
          args: ModelArgs,
          opts: Optional[ResourceOptions] = None)
@overload
def Model(resource_name: str,
          opts: Optional[ResourceOptions] = None,
          execution_role_arn: Optional[str] = None,
          containers: Optional[Sequence[ModelContainerArgs]] = None,
          enable_network_isolation: Optional[bool] = None,
          inference_execution_config: Optional[ModelInferenceExecutionConfigArgs] = None,
          name: Optional[str] = None,
          primary_container: Optional[ModelPrimaryContainerArgs] = None,
          tags: Optional[Mapping[str, str]] = None,
          vpc_config: Optional[ModelVpcConfigArgs] = None)func NewModel(ctx *Context, name string, args ModelArgs, opts ...ResourceOption) (*Model, error)public Model(string name, ModelArgs args, CustomResourceOptions? opts = null)type: aws:sagemaker:Model
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args ModelArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args ModelArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args ModelArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args ModelArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args ModelArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var examplemodelResourceResourceFromSagemakermodel = new Aws.Sagemaker.Model("examplemodelResourceResourceFromSagemakermodel", new()
{
    ExecutionRoleArn = "string",
    Containers = new[]
    {
        new Aws.Sagemaker.Inputs.ModelContainerArgs
        {
            ContainerHostname = "string",
            Environment = 
            {
                { "string", "string" },
            },
            Image = "string",
            ImageConfig = new Aws.Sagemaker.Inputs.ModelContainerImageConfigArgs
            {
                RepositoryAccessMode = "string",
                RepositoryAuthConfig = new Aws.Sagemaker.Inputs.ModelContainerImageConfigRepositoryAuthConfigArgs
                {
                    RepositoryCredentialsProviderArn = "string",
                },
            },
            InferenceSpecificationName = "string",
            Mode = "string",
            ModelDataSource = new Aws.Sagemaker.Inputs.ModelContainerModelDataSourceArgs
            {
                S3DataSources = new[]
                {
                    new Aws.Sagemaker.Inputs.ModelContainerModelDataSourceS3DataSourceArgs
                    {
                        CompressionType = "string",
                        S3DataType = "string",
                        S3Uri = "string",
                        ModelAccessConfig = new Aws.Sagemaker.Inputs.ModelContainerModelDataSourceS3DataSourceModelAccessConfigArgs
                        {
                            AcceptEula = false,
                        },
                    },
                },
            },
            ModelDataUrl = "string",
            ModelPackageName = "string",
            MultiModelConfig = new Aws.Sagemaker.Inputs.ModelContainerMultiModelConfigArgs
            {
                ModelCacheSetting = "string",
            },
        },
    },
    EnableNetworkIsolation = false,
    InferenceExecutionConfig = new Aws.Sagemaker.Inputs.ModelInferenceExecutionConfigArgs
    {
        Mode = "string",
    },
    Name = "string",
    PrimaryContainer = new Aws.Sagemaker.Inputs.ModelPrimaryContainerArgs
    {
        ContainerHostname = "string",
        Environment = 
        {
            { "string", "string" },
        },
        Image = "string",
        ImageConfig = new Aws.Sagemaker.Inputs.ModelPrimaryContainerImageConfigArgs
        {
            RepositoryAccessMode = "string",
            RepositoryAuthConfig = new Aws.Sagemaker.Inputs.ModelPrimaryContainerImageConfigRepositoryAuthConfigArgs
            {
                RepositoryCredentialsProviderArn = "string",
            },
        },
        InferenceSpecificationName = "string",
        Mode = "string",
        ModelDataSource = new Aws.Sagemaker.Inputs.ModelPrimaryContainerModelDataSourceArgs
        {
            S3DataSources = new[]
            {
                new Aws.Sagemaker.Inputs.ModelPrimaryContainerModelDataSourceS3DataSourceArgs
                {
                    CompressionType = "string",
                    S3DataType = "string",
                    S3Uri = "string",
                    ModelAccessConfig = new Aws.Sagemaker.Inputs.ModelPrimaryContainerModelDataSourceS3DataSourceModelAccessConfigArgs
                    {
                        AcceptEula = false,
                    },
                },
            },
        },
        ModelDataUrl = "string",
        ModelPackageName = "string",
        MultiModelConfig = new Aws.Sagemaker.Inputs.ModelPrimaryContainerMultiModelConfigArgs
        {
            ModelCacheSetting = "string",
        },
    },
    Tags = 
    {
        { "string", "string" },
    },
    VpcConfig = new Aws.Sagemaker.Inputs.ModelVpcConfigArgs
    {
        SecurityGroupIds = new[]
        {
            "string",
        },
        Subnets = new[]
        {
            "string",
        },
    },
});
example, err := sagemaker.NewModel(ctx, "examplemodelResourceResourceFromSagemakermodel", &sagemaker.ModelArgs{
	ExecutionRoleArn: pulumi.String("string"),
	Containers: sagemaker.ModelContainerArray{
		&sagemaker.ModelContainerArgs{
			ContainerHostname: pulumi.String("string"),
			Environment: pulumi.StringMap{
				"string": pulumi.String("string"),
			},
			Image: pulumi.String("string"),
			ImageConfig: &sagemaker.ModelContainerImageConfigArgs{
				RepositoryAccessMode: pulumi.String("string"),
				RepositoryAuthConfig: &sagemaker.ModelContainerImageConfigRepositoryAuthConfigArgs{
					RepositoryCredentialsProviderArn: pulumi.String("string"),
				},
			},
			InferenceSpecificationName: pulumi.String("string"),
			Mode:                       pulumi.String("string"),
			ModelDataSource: &sagemaker.ModelContainerModelDataSourceArgs{
				S3DataSources: sagemaker.ModelContainerModelDataSourceS3DataSourceArray{
					&sagemaker.ModelContainerModelDataSourceS3DataSourceArgs{
						CompressionType: pulumi.String("string"),
						S3DataType:      pulumi.String("string"),
						S3Uri:           pulumi.String("string"),
						ModelAccessConfig: &sagemaker.ModelContainerModelDataSourceS3DataSourceModelAccessConfigArgs{
							AcceptEula: pulumi.Bool(false),
						},
					},
				},
			},
			ModelDataUrl:     pulumi.String("string"),
			ModelPackageName: pulumi.String("string"),
			MultiModelConfig: &sagemaker.ModelContainerMultiModelConfigArgs{
				ModelCacheSetting: pulumi.String("string"),
			},
		},
	},
	EnableNetworkIsolation: pulumi.Bool(false),
	InferenceExecutionConfig: &sagemaker.ModelInferenceExecutionConfigArgs{
		Mode: pulumi.String("string"),
	},
	Name: pulumi.String("string"),
	PrimaryContainer: &sagemaker.ModelPrimaryContainerArgs{
		ContainerHostname: pulumi.String("string"),
		Environment: pulumi.StringMap{
			"string": pulumi.String("string"),
		},
		Image: pulumi.String("string"),
		ImageConfig: &sagemaker.ModelPrimaryContainerImageConfigArgs{
			RepositoryAccessMode: pulumi.String("string"),
			RepositoryAuthConfig: &sagemaker.ModelPrimaryContainerImageConfigRepositoryAuthConfigArgs{
				RepositoryCredentialsProviderArn: pulumi.String("string"),
			},
		},
		InferenceSpecificationName: pulumi.String("string"),
		Mode:                       pulumi.String("string"),
		ModelDataSource: &sagemaker.ModelPrimaryContainerModelDataSourceArgs{
			S3DataSources: sagemaker.ModelPrimaryContainerModelDataSourceS3DataSourceArray{
				&sagemaker.ModelPrimaryContainerModelDataSourceS3DataSourceArgs{
					CompressionType: pulumi.String("string"),
					S3DataType:      pulumi.String("string"),
					S3Uri:           pulumi.String("string"),
					ModelAccessConfig: &sagemaker.ModelPrimaryContainerModelDataSourceS3DataSourceModelAccessConfigArgs{
						AcceptEula: pulumi.Bool(false),
					},
				},
			},
		},
		ModelDataUrl:     pulumi.String("string"),
		ModelPackageName: pulumi.String("string"),
		MultiModelConfig: &sagemaker.ModelPrimaryContainerMultiModelConfigArgs{
			ModelCacheSetting: pulumi.String("string"),
		},
	},
	Tags: pulumi.StringMap{
		"string": pulumi.String("string"),
	},
	VpcConfig: &sagemaker.ModelVpcConfigArgs{
		SecurityGroupIds: pulumi.StringArray{
			pulumi.String("string"),
		},
		Subnets: pulumi.StringArray{
			pulumi.String("string"),
		},
	},
})
var examplemodelResourceResourceFromSagemakermodel = new Model("examplemodelResourceResourceFromSagemakermodel", ModelArgs.builder()
    .executionRoleArn("string")
    .containers(ModelContainerArgs.builder()
        .containerHostname("string")
        .environment(Map.of("string", "string"))
        .image("string")
        .imageConfig(ModelContainerImageConfigArgs.builder()
            .repositoryAccessMode("string")
            .repositoryAuthConfig(ModelContainerImageConfigRepositoryAuthConfigArgs.builder()
                .repositoryCredentialsProviderArn("string")
                .build())
            .build())
        .inferenceSpecificationName("string")
        .mode("string")
        .modelDataSource(ModelContainerModelDataSourceArgs.builder()
            .s3DataSources(ModelContainerModelDataSourceS3DataSourceArgs.builder()
                .compressionType("string")
                .s3DataType("string")
                .s3Uri("string")
                .modelAccessConfig(ModelContainerModelDataSourceS3DataSourceModelAccessConfigArgs.builder()
                    .acceptEula(false)
                    .build())
                .build())
            .build())
        .modelDataUrl("string")
        .modelPackageName("string")
        .multiModelConfig(ModelContainerMultiModelConfigArgs.builder()
            .modelCacheSetting("string")
            .build())
        .build())
    .enableNetworkIsolation(false)
    .inferenceExecutionConfig(ModelInferenceExecutionConfigArgs.builder()
        .mode("string")
        .build())
    .name("string")
    .primaryContainer(ModelPrimaryContainerArgs.builder()
        .containerHostname("string")
        .environment(Map.of("string", "string"))
        .image("string")
        .imageConfig(ModelPrimaryContainerImageConfigArgs.builder()
            .repositoryAccessMode("string")
            .repositoryAuthConfig(ModelPrimaryContainerImageConfigRepositoryAuthConfigArgs.builder()
                .repositoryCredentialsProviderArn("string")
                .build())
            .build())
        .inferenceSpecificationName("string")
        .mode("string")
        .modelDataSource(ModelPrimaryContainerModelDataSourceArgs.builder()
            .s3DataSources(ModelPrimaryContainerModelDataSourceS3DataSourceArgs.builder()
                .compressionType("string")
                .s3DataType("string")
                .s3Uri("string")
                .modelAccessConfig(ModelPrimaryContainerModelDataSourceS3DataSourceModelAccessConfigArgs.builder()
                    .acceptEula(false)
                    .build())
                .build())
            .build())
        .modelDataUrl("string")
        .modelPackageName("string")
        .multiModelConfig(ModelPrimaryContainerMultiModelConfigArgs.builder()
            .modelCacheSetting("string")
            .build())
        .build())
    .tags(Map.of("string", "string"))
    .vpcConfig(ModelVpcConfigArgs.builder()
        .securityGroupIds("string")
        .subnets("string")
        .build())
    .build());
examplemodel_resource_resource_from_sagemakermodel = aws.sagemaker.Model("examplemodelResourceResourceFromSagemakermodel",
    execution_role_arn="string",
    containers=[{
        "container_hostname": "string",
        "environment": {
            "string": "string",
        },
        "image": "string",
        "image_config": {
            "repository_access_mode": "string",
            "repository_auth_config": {
                "repository_credentials_provider_arn": "string",
            },
        },
        "inference_specification_name": "string",
        "mode": "string",
        "model_data_source": {
            "s3_data_sources": [{
                "compression_type": "string",
                "s3_data_type": "string",
                "s3_uri": "string",
                "model_access_config": {
                    "accept_eula": False,
                },
            }],
        },
        "model_data_url": "string",
        "model_package_name": "string",
        "multi_model_config": {
            "model_cache_setting": "string",
        },
    }],
    enable_network_isolation=False,
    inference_execution_config={
        "mode": "string",
    },
    name="string",
    primary_container={
        "container_hostname": "string",
        "environment": {
            "string": "string",
        },
        "image": "string",
        "image_config": {
            "repository_access_mode": "string",
            "repository_auth_config": {
                "repository_credentials_provider_arn": "string",
            },
        },
        "inference_specification_name": "string",
        "mode": "string",
        "model_data_source": {
            "s3_data_sources": [{
                "compression_type": "string",
                "s3_data_type": "string",
                "s3_uri": "string",
                "model_access_config": {
                    "accept_eula": False,
                },
            }],
        },
        "model_data_url": "string",
        "model_package_name": "string",
        "multi_model_config": {
            "model_cache_setting": "string",
        },
    },
    tags={
        "string": "string",
    },
    vpc_config={
        "security_group_ids": ["string"],
        "subnets": ["string"],
    })
const examplemodelResourceResourceFromSagemakermodel = new aws.sagemaker.Model("examplemodelResourceResourceFromSagemakermodel", {
    executionRoleArn: "string",
    containers: [{
        containerHostname: "string",
        environment: {
            string: "string",
        },
        image: "string",
        imageConfig: {
            repositoryAccessMode: "string",
            repositoryAuthConfig: {
                repositoryCredentialsProviderArn: "string",
            },
        },
        inferenceSpecificationName: "string",
        mode: "string",
        modelDataSource: {
            s3DataSources: [{
                compressionType: "string",
                s3DataType: "string",
                s3Uri: "string",
                modelAccessConfig: {
                    acceptEula: false,
                },
            }],
        },
        modelDataUrl: "string",
        modelPackageName: "string",
        multiModelConfig: {
            modelCacheSetting: "string",
        },
    }],
    enableNetworkIsolation: false,
    inferenceExecutionConfig: {
        mode: "string",
    },
    name: "string",
    primaryContainer: {
        containerHostname: "string",
        environment: {
            string: "string",
        },
        image: "string",
        imageConfig: {
            repositoryAccessMode: "string",
            repositoryAuthConfig: {
                repositoryCredentialsProviderArn: "string",
            },
        },
        inferenceSpecificationName: "string",
        mode: "string",
        modelDataSource: {
            s3DataSources: [{
                compressionType: "string",
                s3DataType: "string",
                s3Uri: "string",
                modelAccessConfig: {
                    acceptEula: false,
                },
            }],
        },
        modelDataUrl: "string",
        modelPackageName: "string",
        multiModelConfig: {
            modelCacheSetting: "string",
        },
    },
    tags: {
        string: "string",
    },
    vpcConfig: {
        securityGroupIds: ["string"],
        subnets: ["string"],
    },
});
type: aws:sagemaker:Model
properties:
    containers:
        - containerHostname: string
          environment:
            string: string
          image: string
          imageConfig:
            repositoryAccessMode: string
            repositoryAuthConfig:
                repositoryCredentialsProviderArn: string
          inferenceSpecificationName: string
          mode: string
          modelDataSource:
            s3DataSources:
                - compressionType: string
                  modelAccessConfig:
                    acceptEula: false
                  s3DataType: string
                  s3Uri: string
          modelDataUrl: string
          modelPackageName: string
          multiModelConfig:
            modelCacheSetting: string
    enableNetworkIsolation: false
    executionRoleArn: string
    inferenceExecutionConfig:
        mode: string
    name: string
    primaryContainer:
        containerHostname: string
        environment:
            string: string
        image: string
        imageConfig:
            repositoryAccessMode: string
            repositoryAuthConfig:
                repositoryCredentialsProviderArn: string
        inferenceSpecificationName: string
        mode: string
        modelDataSource:
            s3DataSources:
                - compressionType: string
                  modelAccessConfig:
                    acceptEula: false
                  s3DataType: string
                  s3Uri: string
        modelDataUrl: string
        modelPackageName: string
        multiModelConfig:
            modelCacheSetting: string
    tags:
        string: string
    vpcConfig:
        securityGroupIds:
            - string
        subnets:
            - string
Model Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The Model resource accepts the following input properties:
- ExecutionRole stringArn 
- A role that SageMaker can assume to access model artifacts and docker images for deployment.
- Containers
List<ModelContainer> 
- Specifies containers in the inference pipeline. If not specified, the primary_containerargument is required. Fields are documented below.
- EnableNetwork boolIsolation 
- Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
- InferenceExecution ModelConfig Inference Execution Config 
- Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
- Name string
- The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
- PrimaryContainer ModelPrimary Container 
- The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the containerargument is required. Fields are documented below.
- Dictionary<string, string>
- A map of tags to assign to the resource. .If configured with a provider default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level.
- VpcConfig ModelVpc Config 
- Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
- ExecutionRole stringArn 
- A role that SageMaker can assume to access model artifacts and docker images for deployment.
- Containers
[]ModelContainer Args 
- Specifies containers in the inference pipeline. If not specified, the primary_containerargument is required. Fields are documented below.
- EnableNetwork boolIsolation 
- Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
- InferenceExecution ModelConfig Inference Execution Config Args 
- Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
- Name string
- The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
- PrimaryContainer ModelPrimary Container Args 
- The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the containerargument is required. Fields are documented below.
- map[string]string
- A map of tags to assign to the resource. .If configured with a provider default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level.
- VpcConfig ModelVpc Config Args 
- Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
- executionRole StringArn 
- A role that SageMaker can assume to access model artifacts and docker images for deployment.
- containers
List<ModelContainer> 
- Specifies containers in the inference pipeline. If not specified, the primary_containerargument is required. Fields are documented below.
- enableNetwork BooleanIsolation 
- Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
- inferenceExecution ModelConfig Inference Execution Config 
- Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
- name String
- The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
- primaryContainer ModelPrimary Container 
- The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the containerargument is required. Fields are documented below.
- Map<String,String>
- A map of tags to assign to the resource. .If configured with a provider default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level.
- vpcConfig ModelVpc Config 
- Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
- executionRole stringArn 
- A role that SageMaker can assume to access model artifacts and docker images for deployment.
- containers
ModelContainer[] 
- Specifies containers in the inference pipeline. If not specified, the primary_containerargument is required. Fields are documented below.
- enableNetwork booleanIsolation 
- Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
- inferenceExecution ModelConfig Inference Execution Config 
- Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
- name string
- The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
- primaryContainer ModelPrimary Container 
- The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the containerargument is required. Fields are documented below.
- {[key: string]: string}
- A map of tags to assign to the resource. .If configured with a provider default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level.
- vpcConfig ModelVpc Config 
- Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
- execution_role_ strarn 
- A role that SageMaker can assume to access model artifacts and docker images for deployment.
- containers
Sequence[ModelContainer Args] 
- Specifies containers in the inference pipeline. If not specified, the primary_containerargument is required. Fields are documented below.
- enable_network_ boolisolation 
- Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
- inference_execution_ Modelconfig Inference Execution Config Args 
- Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
- name str
- The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
- primary_container ModelPrimary Container Args 
- The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the containerargument is required. Fields are documented below.
- Mapping[str, str]
- A map of tags to assign to the resource. .If configured with a provider default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level.
- vpc_config ModelVpc Config Args 
- Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
- executionRole StringArn 
- A role that SageMaker can assume to access model artifacts and docker images for deployment.
- containers List<Property Map>
- Specifies containers in the inference pipeline. If not specified, the primary_containerargument is required. Fields are documented below.
- enableNetwork BooleanIsolation 
- Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
- inferenceExecution Property MapConfig 
- Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
- name String
- The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
- primaryContainer Property Map
- The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the containerargument is required. Fields are documented below.
- Map<String>
- A map of tags to assign to the resource. .If configured with a provider default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level.
- vpcConfig Property Map
- Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
Outputs
All input properties are implicitly available as output properties. Additionally, the Model resource produces the following output properties:
Look up Existing Model Resource
Get an existing Model resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: ModelState, opts?: CustomResourceOptions): Model@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        arn: Optional[str] = None,
        containers: Optional[Sequence[ModelContainerArgs]] = None,
        enable_network_isolation: Optional[bool] = None,
        execution_role_arn: Optional[str] = None,
        inference_execution_config: Optional[ModelInferenceExecutionConfigArgs] = None,
        name: Optional[str] = None,
        primary_container: Optional[ModelPrimaryContainerArgs] = None,
        tags: Optional[Mapping[str, str]] = None,
        tags_all: Optional[Mapping[str, str]] = None,
        vpc_config: Optional[ModelVpcConfigArgs] = None) -> Modelfunc GetModel(ctx *Context, name string, id IDInput, state *ModelState, opts ...ResourceOption) (*Model, error)public static Model Get(string name, Input<string> id, ModelState? state, CustomResourceOptions? opts = null)public static Model get(String name, Output<String> id, ModelState state, CustomResourceOptions options)resources:  _:    type: aws:sagemaker:Model    get:      id: ${id}- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Arn string
- The Amazon Resource Name (ARN) assigned by AWS to this model.
- Containers
List<ModelContainer> 
- Specifies containers in the inference pipeline. If not specified, the primary_containerargument is required. Fields are documented below.
- EnableNetwork boolIsolation 
- Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
- ExecutionRole stringArn 
- A role that SageMaker can assume to access model artifacts and docker images for deployment.
- InferenceExecution ModelConfig Inference Execution Config 
- Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
- Name string
- The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
- PrimaryContainer ModelPrimary Container 
- The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the containerargument is required. Fields are documented below.
- Dictionary<string, string>
- A map of tags to assign to the resource. .If configured with a provider default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level.
- Dictionary<string, string>
- A map of tags assigned to the resource, including those inherited from the provider default_tagsconfiguration block.
- VpcConfig ModelVpc Config 
- Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
- Arn string
- The Amazon Resource Name (ARN) assigned by AWS to this model.
- Containers
[]ModelContainer Args 
- Specifies containers in the inference pipeline. If not specified, the primary_containerargument is required. Fields are documented below.
- EnableNetwork boolIsolation 
- Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
- ExecutionRole stringArn 
- A role that SageMaker can assume to access model artifacts and docker images for deployment.
- InferenceExecution ModelConfig Inference Execution Config Args 
- Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
- Name string
- The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
- PrimaryContainer ModelPrimary Container Args 
- The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the containerargument is required. Fields are documented below.
- map[string]string
- A map of tags to assign to the resource. .If configured with a provider default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level.
- map[string]string
- A map of tags assigned to the resource, including those inherited from the provider default_tagsconfiguration block.
- VpcConfig ModelVpc Config Args 
- Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
- arn String
- The Amazon Resource Name (ARN) assigned by AWS to this model.
- containers
List<ModelContainer> 
- Specifies containers in the inference pipeline. If not specified, the primary_containerargument is required. Fields are documented below.
- enableNetwork BooleanIsolation 
- Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
- executionRole StringArn 
- A role that SageMaker can assume to access model artifacts and docker images for deployment.
- inferenceExecution ModelConfig Inference Execution Config 
- Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
- name String
- The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
- primaryContainer ModelPrimary Container 
- The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the containerargument is required. Fields are documented below.
- Map<String,String>
- A map of tags to assign to the resource. .If configured with a provider default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level.
- Map<String,String>
- A map of tags assigned to the resource, including those inherited from the provider default_tagsconfiguration block.
- vpcConfig ModelVpc Config 
- Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
- arn string
- The Amazon Resource Name (ARN) assigned by AWS to this model.
- containers
ModelContainer[] 
- Specifies containers in the inference pipeline. If not specified, the primary_containerargument is required. Fields are documented below.
- enableNetwork booleanIsolation 
- Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
- executionRole stringArn 
- A role that SageMaker can assume to access model artifacts and docker images for deployment.
- inferenceExecution ModelConfig Inference Execution Config 
- Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
- name string
- The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
- primaryContainer ModelPrimary Container 
- The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the containerargument is required. Fields are documented below.
- {[key: string]: string}
- A map of tags to assign to the resource. .If configured with a provider default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level.
- {[key: string]: string}
- A map of tags assigned to the resource, including those inherited from the provider default_tagsconfiguration block.
- vpcConfig ModelVpc Config 
- Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
- arn str
- The Amazon Resource Name (ARN) assigned by AWS to this model.
- containers
Sequence[ModelContainer Args] 
- Specifies containers in the inference pipeline. If not specified, the primary_containerargument is required. Fields are documented below.
- enable_network_ boolisolation 
- Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
- execution_role_ strarn 
- A role that SageMaker can assume to access model artifacts and docker images for deployment.
- inference_execution_ Modelconfig Inference Execution Config Args 
- Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
- name str
- The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
- primary_container ModelPrimary Container Args 
- The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the containerargument is required. Fields are documented below.
- Mapping[str, str]
- A map of tags to assign to the resource. .If configured with a provider default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level.
- Mapping[str, str]
- A map of tags assigned to the resource, including those inherited from the provider default_tagsconfiguration block.
- vpc_config ModelVpc Config Args 
- Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
- arn String
- The Amazon Resource Name (ARN) assigned by AWS to this model.
- containers List<Property Map>
- Specifies containers in the inference pipeline. If not specified, the primary_containerargument is required. Fields are documented below.
- enableNetwork BooleanIsolation 
- Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
- executionRole StringArn 
- A role that SageMaker can assume to access model artifacts and docker images for deployment.
- inferenceExecution Property MapConfig 
- Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
- name String
- The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
- primaryContainer Property Map
- The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the containerargument is required. Fields are documented below.
- Map<String>
- A map of tags to assign to the resource. .If configured with a provider default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level.
- Map<String>
- A map of tags assigned to the resource, including those inherited from the provider default_tagsconfiguration block.
- vpcConfig Property Map
- Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
Supporting Types
ModelContainer, ModelContainerArgs    
- ContainerHostname string
- The DNS host name for the container.
- Environment Dictionary<string, string>
- Environment variables for the Docker container. A list of key value pairs.
- Image string
- The registry path where the inference code image is stored in Amazon ECR.
- ImageConfig ModelContainer Image Config 
- Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
- InferenceSpecification stringName 
- The inference specification name in the model package version.
- Mode string
- The container hosts value SingleModel/MultiModel. The default value isSingleModel.
- ModelData ModelSource Container Model Data Source 
- The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
- ModelData stringUrl 
- The URL for the S3 location where model artifacts are stored.
- ModelPackage stringName 
- The Amazon Resource Name (ARN) of the model package to use to create the model.
- MultiModel ModelConfig Container Multi Model Config 
- Specifies additional configuration for multi-model endpoints. see Multi Model Config.
- ContainerHostname string
- The DNS host name for the container.
- Environment map[string]string
- Environment variables for the Docker container. A list of key value pairs.
- Image string
- The registry path where the inference code image is stored in Amazon ECR.
- ImageConfig ModelContainer Image Config 
- Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
- InferenceSpecification stringName 
- The inference specification name in the model package version.
- Mode string
- The container hosts value SingleModel/MultiModel. The default value isSingleModel.
- ModelData ModelSource Container Model Data Source 
- The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
- ModelData stringUrl 
- The URL for the S3 location where model artifacts are stored.
- ModelPackage stringName 
- The Amazon Resource Name (ARN) of the model package to use to create the model.
- MultiModel ModelConfig Container Multi Model Config 
- Specifies additional configuration for multi-model endpoints. see Multi Model Config.
- containerHostname String
- The DNS host name for the container.
- environment Map<String,String>
- Environment variables for the Docker container. A list of key value pairs.
- image String
- The registry path where the inference code image is stored in Amazon ECR.
- imageConfig ModelContainer Image Config 
- Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
- inferenceSpecification StringName 
- The inference specification name in the model package version.
- mode String
- The container hosts value SingleModel/MultiModel. The default value isSingleModel.
- modelData ModelSource Container Model Data Source 
- The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
- modelData StringUrl 
- The URL for the S3 location where model artifacts are stored.
- modelPackage StringName 
- The Amazon Resource Name (ARN) of the model package to use to create the model.
- multiModel ModelConfig Container Multi Model Config 
- Specifies additional configuration for multi-model endpoints. see Multi Model Config.
- containerHostname string
- The DNS host name for the container.
- environment {[key: string]: string}
- Environment variables for the Docker container. A list of key value pairs.
- image string
- The registry path where the inference code image is stored in Amazon ECR.
- imageConfig ModelContainer Image Config 
- Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
- inferenceSpecification stringName 
- The inference specification name in the model package version.
- mode string
- The container hosts value SingleModel/MultiModel. The default value isSingleModel.
- modelData ModelSource Container Model Data Source 
- The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
- modelData stringUrl 
- The URL for the S3 location where model artifacts are stored.
- modelPackage stringName 
- The Amazon Resource Name (ARN) of the model package to use to create the model.
- multiModel ModelConfig Container Multi Model Config 
- Specifies additional configuration for multi-model endpoints. see Multi Model Config.
- container_hostname str
- The DNS host name for the container.
- environment Mapping[str, str]
- Environment variables for the Docker container. A list of key value pairs.
- image str
- The registry path where the inference code image is stored in Amazon ECR.
- image_config ModelContainer Image Config 
- Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
- inference_specification_ strname 
- The inference specification name in the model package version.
- mode str
- The container hosts value SingleModel/MultiModel. The default value isSingleModel.
- model_data_ Modelsource Container Model Data Source 
- The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
- model_data_ strurl 
- The URL for the S3 location where model artifacts are stored.
- model_package_ strname 
- The Amazon Resource Name (ARN) of the model package to use to create the model.
- multi_model_ Modelconfig Container Multi Model Config 
- Specifies additional configuration for multi-model endpoints. see Multi Model Config.
- containerHostname String
- The DNS host name for the container.
- environment Map<String>
- Environment variables for the Docker container. A list of key value pairs.
- image String
- The registry path where the inference code image is stored in Amazon ECR.
- imageConfig Property Map
- Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
- inferenceSpecification StringName 
- The inference specification name in the model package version.
- mode String
- The container hosts value SingleModel/MultiModel. The default value isSingleModel.
- modelData Property MapSource 
- The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
- modelData StringUrl 
- The URL for the S3 location where model artifacts are stored.
- modelPackage StringName 
- The Amazon Resource Name (ARN) of the model package to use to create the model.
- multiModel Property MapConfig 
- Specifies additional configuration for multi-model endpoints. see Multi Model Config.
ModelContainerImageConfig, ModelContainerImageConfigArgs        
- RepositoryAccess stringMode 
- Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: PlatformandVpc.
- RepositoryAuth ModelConfig Container Image Config Repository Auth Config 
- Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
- RepositoryAccess stringMode 
- Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: PlatformandVpc.
- RepositoryAuth ModelConfig Container Image Config Repository Auth Config 
- Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
- repositoryAccess StringMode 
- Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: PlatformandVpc.
- repositoryAuth ModelConfig Container Image Config Repository Auth Config 
- Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
- repositoryAccess stringMode 
- Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: PlatformandVpc.
- repositoryAuth ModelConfig Container Image Config Repository Auth Config 
- Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
- repository_access_ strmode 
- Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: PlatformandVpc.
- repository_auth_ Modelconfig Container Image Config Repository Auth Config 
- Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
- repositoryAccess StringMode 
- Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: PlatformandVpc.
- repositoryAuth Property MapConfig 
- Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
ModelContainerImageConfigRepositoryAuthConfig, ModelContainerImageConfigRepositoryAuthConfigArgs              
- RepositoryCredentials stringProvider Arn 
- The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
- RepositoryCredentials stringProvider Arn 
- The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
- repositoryCredentials StringProvider Arn 
- The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
- repositoryCredentials stringProvider Arn 
- The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
- repository_credentials_ strprovider_ arn 
- The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
- repositoryCredentials StringProvider Arn 
- The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
ModelContainerModelDataSource, ModelContainerModelDataSourceArgs          
- S3DataSources List<ModelContainer Model Data Source S3Data Source> 
- The S3 location of model data to deploy.
- S3DataSources []ModelContainer Model Data Source S3Data Source 
- The S3 location of model data to deploy.
- s3DataSources List<ModelContainer Model Data Source S3Data Source> 
- The S3 location of model data to deploy.
- s3DataSources ModelContainer Model Data Source S3Data Source[] 
- The S3 location of model data to deploy.
- s3_data_ Sequence[Modelsources Container Model Data Source S3Data Source] 
- The S3 location of model data to deploy.
- s3DataSources List<Property Map>
- The S3 location of model data to deploy.
ModelContainerModelDataSourceS3DataSource, ModelContainerModelDataSourceS3DataSourceArgs              
- CompressionType string
- How the model data is prepared. Allowed values are: NoneandGzip.
- S3DataType string
- The type of model data to deploy. Allowed values are: S3ObjectandS3Prefix.
- S3Uri string
- The S3 path of model data to deploy.
- ModelAccess ModelConfig Container Model Data Source S3Data Source Model Access Config 
- Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_configconfiguration block]. see Model Access Config.
- CompressionType string
- How the model data is prepared. Allowed values are: NoneandGzip.
- S3DataType string
- The type of model data to deploy. Allowed values are: S3ObjectandS3Prefix.
- S3Uri string
- The S3 path of model data to deploy.
- ModelAccess ModelConfig Container Model Data Source S3Data Source Model Access Config 
- Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_configconfiguration block]. see Model Access Config.
- compressionType String
- How the model data is prepared. Allowed values are: NoneandGzip.
- s3DataType String
- The type of model data to deploy. Allowed values are: S3ObjectandS3Prefix.
- s3Uri String
- The S3 path of model data to deploy.
- modelAccess ModelConfig Container Model Data Source S3Data Source Model Access Config 
- Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_configconfiguration block]. see Model Access Config.
- compressionType string
- How the model data is prepared. Allowed values are: NoneandGzip.
- s3DataType string
- The type of model data to deploy. Allowed values are: S3ObjectandS3Prefix.
- s3Uri string
- The S3 path of model data to deploy.
- modelAccess ModelConfig Container Model Data Source S3Data Source Model Access Config 
- Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_configconfiguration block]. see Model Access Config.
- compression_type str
- How the model data is prepared. Allowed values are: NoneandGzip.
- s3_data_ strtype 
- The type of model data to deploy. Allowed values are: S3ObjectandS3Prefix.
- s3_uri str
- The S3 path of model data to deploy.
- model_access_ Modelconfig Container Model Data Source S3Data Source Model Access Config 
- Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_configconfiguration block]. see Model Access Config.
- compressionType String
- How the model data is prepared. Allowed values are: NoneandGzip.
- s3DataType String
- The type of model data to deploy. Allowed values are: S3ObjectandS3Prefix.
- s3Uri String
- The S3 path of model data to deploy.
- modelAccess Property MapConfig 
- Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_configconfiguration block]. see Model Access Config.
ModelContainerModelDataSourceS3DataSourceModelAccessConfig, ModelContainerModelDataSourceS3DataSourceModelAccessConfigArgs                    
- AcceptEula bool
- Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as truein order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
- AcceptEula bool
- Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as truein order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
- acceptEula Boolean
- Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as truein order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
- acceptEula boolean
- Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as truein order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
- accept_eula bool
- Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as truein order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
- acceptEula Boolean
- Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as truein order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
ModelContainerMultiModelConfig, ModelContainerMultiModelConfigArgs          
- ModelCache stringSetting 
- Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are:EnabledandDisabled.
- ModelCache stringSetting 
- Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are:EnabledandDisabled.
- modelCache StringSetting 
- Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are:EnabledandDisabled.
- modelCache stringSetting 
- Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are:EnabledandDisabled.
- model_cache_ strsetting 
- Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are:EnabledandDisabled.
- modelCache StringSetting 
- Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are:EnabledandDisabled.
ModelInferenceExecutionConfig, ModelInferenceExecutionConfigArgs        
- Mode string
- The container hosts value SingleModel/MultiModel. The default value isSingleModel.
- Mode string
- The container hosts value SingleModel/MultiModel. The default value isSingleModel.
- mode String
- The container hosts value SingleModel/MultiModel. The default value isSingleModel.
- mode string
- The container hosts value SingleModel/MultiModel. The default value isSingleModel.
- mode str
- The container hosts value SingleModel/MultiModel. The default value isSingleModel.
- mode String
- The container hosts value SingleModel/MultiModel. The default value isSingleModel.
ModelPrimaryContainer, ModelPrimaryContainerArgs      
- ContainerHostname string
- The DNS host name for the container.
- Environment Dictionary<string, string>
- Environment variables for the Docker container. A list of key value pairs.
- Image string
- The registry path where the inference code image is stored in Amazon ECR.
- ImageConfig ModelPrimary Container Image Config 
- Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
- InferenceSpecification stringName 
- The inference specification name in the model package version.
- Mode string
- The container hosts value SingleModel/MultiModel. The default value isSingleModel.
- ModelData ModelSource Primary Container Model Data Source 
- The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
- ModelData stringUrl 
- The URL for the S3 location where model artifacts are stored.
- ModelPackage stringName 
- The Amazon Resource Name (ARN) of the model package to use to create the model.
- MultiModel ModelConfig Primary Container Multi Model Config 
- Specifies additional configuration for multi-model endpoints. see Multi Model Config.
- ContainerHostname string
- The DNS host name for the container.
- Environment map[string]string
- Environment variables for the Docker container. A list of key value pairs.
- Image string
- The registry path where the inference code image is stored in Amazon ECR.
- ImageConfig ModelPrimary Container Image Config 
- Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
- InferenceSpecification stringName 
- The inference specification name in the model package version.
- Mode string
- The container hosts value SingleModel/MultiModel. The default value isSingleModel.
- ModelData ModelSource Primary Container Model Data Source 
- The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
- ModelData stringUrl 
- The URL for the S3 location where model artifacts are stored.
- ModelPackage stringName 
- The Amazon Resource Name (ARN) of the model package to use to create the model.
- MultiModel ModelConfig Primary Container Multi Model Config 
- Specifies additional configuration for multi-model endpoints. see Multi Model Config.
- containerHostname String
- The DNS host name for the container.
- environment Map<String,String>
- Environment variables for the Docker container. A list of key value pairs.
- image String
- The registry path where the inference code image is stored in Amazon ECR.
- imageConfig ModelPrimary Container Image Config 
- Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
- inferenceSpecification StringName 
- The inference specification name in the model package version.
- mode String
- The container hosts value SingleModel/MultiModel. The default value isSingleModel.
- modelData ModelSource Primary Container Model Data Source 
- The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
- modelData StringUrl 
- The URL for the S3 location where model artifacts are stored.
- modelPackage StringName 
- The Amazon Resource Name (ARN) of the model package to use to create the model.
- multiModel ModelConfig Primary Container Multi Model Config 
- Specifies additional configuration for multi-model endpoints. see Multi Model Config.
- containerHostname string
- The DNS host name for the container.
- environment {[key: string]: string}
- Environment variables for the Docker container. A list of key value pairs.
- image string
- The registry path where the inference code image is stored in Amazon ECR.
- imageConfig ModelPrimary Container Image Config 
- Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
- inferenceSpecification stringName 
- The inference specification name in the model package version.
- mode string
- The container hosts value SingleModel/MultiModel. The default value isSingleModel.
- modelData ModelSource Primary Container Model Data Source 
- The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
- modelData stringUrl 
- The URL for the S3 location where model artifacts are stored.
- modelPackage stringName 
- The Amazon Resource Name (ARN) of the model package to use to create the model.
- multiModel ModelConfig Primary Container Multi Model Config 
- Specifies additional configuration for multi-model endpoints. see Multi Model Config.
- container_hostname str
- The DNS host name for the container.
- environment Mapping[str, str]
- Environment variables for the Docker container. A list of key value pairs.
- image str
- The registry path where the inference code image is stored in Amazon ECR.
- image_config ModelPrimary Container Image Config 
- Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
- inference_specification_ strname 
- The inference specification name in the model package version.
- mode str
- The container hosts value SingleModel/MultiModel. The default value isSingleModel.
- model_data_ Modelsource Primary Container Model Data Source 
- The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
- model_data_ strurl 
- The URL for the S3 location where model artifacts are stored.
- model_package_ strname 
- The Amazon Resource Name (ARN) of the model package to use to create the model.
- multi_model_ Modelconfig Primary Container Multi Model Config 
- Specifies additional configuration for multi-model endpoints. see Multi Model Config.
- containerHostname String
- The DNS host name for the container.
- environment Map<String>
- Environment variables for the Docker container. A list of key value pairs.
- image String
- The registry path where the inference code image is stored in Amazon ECR.
- imageConfig Property Map
- Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
- inferenceSpecification StringName 
- The inference specification name in the model package version.
- mode String
- The container hosts value SingleModel/MultiModel. The default value isSingleModel.
- modelData Property MapSource 
- The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
- modelData StringUrl 
- The URL for the S3 location where model artifacts are stored.
- modelPackage StringName 
- The Amazon Resource Name (ARN) of the model package to use to create the model.
- multiModel Property MapConfig 
- Specifies additional configuration for multi-model endpoints. see Multi Model Config.
ModelPrimaryContainerImageConfig, ModelPrimaryContainerImageConfigArgs          
- RepositoryAccess stringMode 
- Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: PlatformandVpc.
- RepositoryAuth ModelConfig Primary Container Image Config Repository Auth Config 
- Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
- RepositoryAccess stringMode 
- Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: PlatformandVpc.
- RepositoryAuth ModelConfig Primary Container Image Config Repository Auth Config 
- Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
- repositoryAccess StringMode 
- Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: PlatformandVpc.
- repositoryAuth ModelConfig Primary Container Image Config Repository Auth Config 
- Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
- repositoryAccess stringMode 
- Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: PlatformandVpc.
- repositoryAuth ModelConfig Primary Container Image Config Repository Auth Config 
- Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
- repository_access_ strmode 
- Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: PlatformandVpc.
- repository_auth_ Modelconfig Primary Container Image Config Repository Auth Config 
- Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
- repositoryAccess StringMode 
- Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: PlatformandVpc.
- repositoryAuth Property MapConfig 
- Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
ModelPrimaryContainerImageConfigRepositoryAuthConfig, ModelPrimaryContainerImageConfigRepositoryAuthConfigArgs                
- RepositoryCredentials stringProvider Arn 
- The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
- RepositoryCredentials stringProvider Arn 
- The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
- repositoryCredentials StringProvider Arn 
- The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
- repositoryCredentials stringProvider Arn 
- The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
- repository_credentials_ strprovider_ arn 
- The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
- repositoryCredentials StringProvider Arn 
- The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
ModelPrimaryContainerModelDataSource, ModelPrimaryContainerModelDataSourceArgs            
- S3DataSources List<ModelPrimary Container Model Data Source S3Data Source> 
- The S3 location of model data to deploy.
- S3DataSources []ModelPrimary Container Model Data Source S3Data Source 
- The S3 location of model data to deploy.
- s3DataSources List<ModelPrimary Container Model Data Source S3Data Source> 
- The S3 location of model data to deploy.
- s3DataSources ModelPrimary Container Model Data Source S3Data Source[] 
- The S3 location of model data to deploy.
- s3_data_ Sequence[Modelsources Primary Container Model Data Source S3Data Source] 
- The S3 location of model data to deploy.
- s3DataSources List<Property Map>
- The S3 location of model data to deploy.
ModelPrimaryContainerModelDataSourceS3DataSource, ModelPrimaryContainerModelDataSourceS3DataSourceArgs                
- CompressionType string
- How the model data is prepared. Allowed values are: NoneandGzip.
- S3DataType string
- The type of model data to deploy. Allowed values are: S3ObjectandS3Prefix.
- S3Uri string
- The S3 path of model data to deploy.
- ModelAccess ModelConfig Primary Container Model Data Source S3Data Source Model Access Config 
- Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_configconfiguration block]. see Model Access Config.
- CompressionType string
- How the model data is prepared. Allowed values are: NoneandGzip.
- S3DataType string
- The type of model data to deploy. Allowed values are: S3ObjectandS3Prefix.
- S3Uri string
- The S3 path of model data to deploy.
- ModelAccess ModelConfig Primary Container Model Data Source S3Data Source Model Access Config 
- Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_configconfiguration block]. see Model Access Config.
- compressionType String
- How the model data is prepared. Allowed values are: NoneandGzip.
- s3DataType String
- The type of model data to deploy. Allowed values are: S3ObjectandS3Prefix.
- s3Uri String
- The S3 path of model data to deploy.
- modelAccess ModelConfig Primary Container Model Data Source S3Data Source Model Access Config 
- Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_configconfiguration block]. see Model Access Config.
- compressionType string
- How the model data is prepared. Allowed values are: NoneandGzip.
- s3DataType string
- The type of model data to deploy. Allowed values are: S3ObjectandS3Prefix.
- s3Uri string
- The S3 path of model data to deploy.
- modelAccess ModelConfig Primary Container Model Data Source S3Data Source Model Access Config 
- Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_configconfiguration block]. see Model Access Config.
- compression_type str
- How the model data is prepared. Allowed values are: NoneandGzip.
- s3_data_ strtype 
- The type of model data to deploy. Allowed values are: S3ObjectandS3Prefix.
- s3_uri str
- The S3 path of model data to deploy.
- model_access_ Modelconfig Primary Container Model Data Source S3Data Source Model Access Config 
- Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_configconfiguration block]. see Model Access Config.
- compressionType String
- How the model data is prepared. Allowed values are: NoneandGzip.
- s3DataType String
- The type of model data to deploy. Allowed values are: S3ObjectandS3Prefix.
- s3Uri String
- The S3 path of model data to deploy.
- modelAccess Property MapConfig 
- Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_configconfiguration block]. see Model Access Config.
ModelPrimaryContainerModelDataSourceS3DataSourceModelAccessConfig, ModelPrimaryContainerModelDataSourceS3DataSourceModelAccessConfigArgs                      
- AcceptEula bool
- Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as truein order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
- AcceptEula bool
- Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as truein order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
- acceptEula Boolean
- Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as truein order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
- acceptEula boolean
- Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as truein order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
- accept_eula bool
- Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as truein order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
- acceptEula Boolean
- Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as truein order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
ModelPrimaryContainerMultiModelConfig, ModelPrimaryContainerMultiModelConfigArgs            
- ModelCache stringSetting 
- Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are:EnabledandDisabled.
- ModelCache stringSetting 
- Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are:EnabledandDisabled.
- modelCache StringSetting 
- Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are:EnabledandDisabled.
- modelCache stringSetting 
- Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are:EnabledandDisabled.
- model_cache_ strsetting 
- Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are:EnabledandDisabled.
- modelCache StringSetting 
- Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are:EnabledandDisabled.
ModelVpcConfig, ModelVpcConfigArgs      
- SecurityGroup List<string>Ids 
- Subnets List<string>
- SecurityGroup []stringIds 
- Subnets []string
- securityGroup List<String>Ids 
- subnets List<String>
- securityGroup string[]Ids 
- subnets string[]
- security_group_ Sequence[str]ids 
- subnets Sequence[str]
- securityGroup List<String>Ids 
- subnets List<String>
Import
Using pulumi import, import models using the name. For example:
$ pulumi import aws:sagemaker/model:Model test_model model-foo
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- AWS Classic pulumi/pulumi-aws
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the awsTerraform Provider.